xref: /openbmc/linux/drivers/net/ethernet/broadcom/tg3.c (revision 05cf4fe738242183f1237f1b3a28b4479348c0a1)
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *	Derived from proprietary unpublished source code,
14  *	Copyright (C) 2000-2016 Broadcom Corporation.
15  *	Copyright (C) 2016-2017 Broadcom Ltd.
16  *	Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *	refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *	Permission is hereby granted for the distribution of this firmware
20  *	data in hexadecimal or equivalent format, provided this copyright
21  *	notice is accompanying it.
22  */
23 
24 
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58 
59 #include <net/checksum.h>
60 #include <net/ip.h>
61 
62 #include <linux/io.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
65 
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
68 
69 #ifdef CONFIG_SPARC
70 #include <asm/idprom.h>
71 #include <asm/prom.h>
72 #endif
73 
74 #define BAR_0	0
75 #define BAR_2	2
76 
77 #include "tg3.h"
78 
79 /* Functions & macros to verify TG3_FLAGS types */
80 
81 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83 	return test_bit(flag, bits);
84 }
85 
86 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
87 {
88 	set_bit(flag, bits);
89 }
90 
91 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
92 {
93 	clear_bit(flag, bits);
94 }
95 
96 #define tg3_flag(tp, flag)				\
97 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
98 #define tg3_flag_set(tp, flag)				\
99 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
100 #define tg3_flag_clear(tp, flag)			\
101 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
102 
103 #define DRV_MODULE_NAME		"tg3"
104 #define TG3_MAJ_NUM			3
105 #define TG3_MIN_NUM			137
106 #define DRV_MODULE_VERSION	\
107 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
108 #define DRV_MODULE_RELDATE	"May 11, 2014"
109 
110 #define RESET_KIND_SHUTDOWN	0
111 #define RESET_KIND_INIT		1
112 #define RESET_KIND_SUSPEND	2
113 
114 #define TG3_DEF_RX_MODE		0
115 #define TG3_DEF_TX_MODE		0
116 #define TG3_DEF_MSG_ENABLE	  \
117 	(NETIF_MSG_DRV		| \
118 	 NETIF_MSG_PROBE	| \
119 	 NETIF_MSG_LINK		| \
120 	 NETIF_MSG_TIMER	| \
121 	 NETIF_MSG_IFDOWN	| \
122 	 NETIF_MSG_IFUP		| \
123 	 NETIF_MSG_RX_ERR	| \
124 	 NETIF_MSG_TX_ERR)
125 
126 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
127 
128 /* length of time before we decide the hardware is borked,
129  * and dev->tx_timeout() should be called to fix the problem
130  */
131 
132 #define TG3_TX_TIMEOUT			(5 * HZ)
133 
134 /* hardware minimum and maximum for a single frame's data payload */
135 #define TG3_MIN_MTU			ETH_ZLEN
136 #define TG3_MAX_MTU(tp)	\
137 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
138 
139 /* These numbers seem to be hard coded in the NIC firmware somehow.
140  * You can't change the ring sizes, but you can change where you place
141  * them in the NIC onboard memory.
142  */
143 #define TG3_RX_STD_RING_SIZE(tp) \
144 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
145 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
146 #define TG3_DEF_RX_RING_PENDING		200
147 #define TG3_RX_JMB_RING_SIZE(tp) \
148 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
149 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
150 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
151 
152 /* Do not place this n-ring entries value into the tp struct itself,
153  * we really want to expose these constants to GCC so that modulo et
154  * al.  operations are done with shifts and masks instead of with
155  * hw multiply/modulo instructions.  Another solution would be to
156  * replace things like '% foo' with '& (foo - 1)'.
157  */
158 
159 #define TG3_TX_RING_SIZE		512
160 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
161 
162 #define TG3_RX_STD_RING_BYTES(tp) \
163 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
164 #define TG3_RX_JMB_RING_BYTES(tp) \
165 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
166 #define TG3_RX_RCB_RING_BYTES(tp) \
167 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
168 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
169 				 TG3_TX_RING_SIZE)
170 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
171 
172 #define TG3_DMA_BYTE_ENAB		64
173 
174 #define TG3_RX_STD_DMA_SZ		1536
175 #define TG3_RX_JMB_DMA_SZ		9046
176 
177 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
178 
179 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
180 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
181 
182 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
183 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
184 
185 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
186 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
187 
188 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
189  * that are at least dword aligned when used in PCIX mode.  The driver
190  * works around this bug by double copying the packet.  This workaround
191  * is built into the normal double copy length check for efficiency.
192  *
193  * However, the double copy is only necessary on those architectures
194  * where unaligned memory accesses are inefficient.  For those architectures
195  * where unaligned memory accesses incur little penalty, we can reintegrate
196  * the 5701 in the normal rx path.  Doing so saves a device structure
197  * dereference by hardcoding the double copy threshold in place.
198  */
199 #define TG3_RX_COPY_THRESHOLD		256
200 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
201 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
202 #else
203 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
204 #endif
205 
206 #if (NET_IP_ALIGN != 0)
207 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
208 #else
209 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
210 #endif
211 
212 /* minimum number of free TX descriptors required to wake up TX process */
213 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
214 #define TG3_TX_BD_DMA_MAX_2K		2048
215 #define TG3_TX_BD_DMA_MAX_4K		4096
216 
217 #define TG3_RAW_IP_ALIGN 2
218 
219 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
220 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
221 
222 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
223 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
224 
225 #define FIRMWARE_TG3		"tigon/tg3.bin"
226 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
227 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
228 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
229 
230 static char version[] =
231 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
232 
233 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
234 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
235 MODULE_LICENSE("GPL");
236 MODULE_VERSION(DRV_MODULE_VERSION);
237 MODULE_FIRMWARE(FIRMWARE_TG3);
238 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
239 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
240 
241 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
242 module_param(tg3_debug, int, 0);
243 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
244 
245 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
246 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
247 
248 static const struct pci_device_id tg3_pci_tbl[] = {
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
257 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
258 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
260 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
261 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
264 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
265 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
267 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
268 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
269 			TG3_DRV_DATA_FLAG_5705_10_100},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
271 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
272 			TG3_DRV_DATA_FLAG_5705_10_100},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
275 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
276 			TG3_DRV_DATA_FLAG_5705_10_100},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
278 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
283 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
286 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
287 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
288 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
289 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
292 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
297 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
298 			PCI_VENDOR_ID_LENOVO,
299 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
300 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
303 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
311 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
312 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
313 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
314 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
315 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
316 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
317 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
318 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
319 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
320 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
321 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
322 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
323 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
324 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
325 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
326 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
327 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
330 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
331 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
332 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
333 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
335 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
341 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
342 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
343 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
344 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
347 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
348 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
349 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
350 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
351 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
352 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
353 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
354 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
355 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
356 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
357 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
358 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
359 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
360 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
361 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
362 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
363 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
364 	{}
365 };
366 
367 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
368 
369 static const struct {
370 	const char string[ETH_GSTRING_LEN];
371 } ethtool_stats_keys[] = {
372 	{ "rx_octets" },
373 	{ "rx_fragments" },
374 	{ "rx_ucast_packets" },
375 	{ "rx_mcast_packets" },
376 	{ "rx_bcast_packets" },
377 	{ "rx_fcs_errors" },
378 	{ "rx_align_errors" },
379 	{ "rx_xon_pause_rcvd" },
380 	{ "rx_xoff_pause_rcvd" },
381 	{ "rx_mac_ctrl_rcvd" },
382 	{ "rx_xoff_entered" },
383 	{ "rx_frame_too_long_errors" },
384 	{ "rx_jabbers" },
385 	{ "rx_undersize_packets" },
386 	{ "rx_in_length_errors" },
387 	{ "rx_out_length_errors" },
388 	{ "rx_64_or_less_octet_packets" },
389 	{ "rx_65_to_127_octet_packets" },
390 	{ "rx_128_to_255_octet_packets" },
391 	{ "rx_256_to_511_octet_packets" },
392 	{ "rx_512_to_1023_octet_packets" },
393 	{ "rx_1024_to_1522_octet_packets" },
394 	{ "rx_1523_to_2047_octet_packets" },
395 	{ "rx_2048_to_4095_octet_packets" },
396 	{ "rx_4096_to_8191_octet_packets" },
397 	{ "rx_8192_to_9022_octet_packets" },
398 
399 	{ "tx_octets" },
400 	{ "tx_collisions" },
401 
402 	{ "tx_xon_sent" },
403 	{ "tx_xoff_sent" },
404 	{ "tx_flow_control" },
405 	{ "tx_mac_errors" },
406 	{ "tx_single_collisions" },
407 	{ "tx_mult_collisions" },
408 	{ "tx_deferred" },
409 	{ "tx_excessive_collisions" },
410 	{ "tx_late_collisions" },
411 	{ "tx_collide_2times" },
412 	{ "tx_collide_3times" },
413 	{ "tx_collide_4times" },
414 	{ "tx_collide_5times" },
415 	{ "tx_collide_6times" },
416 	{ "tx_collide_7times" },
417 	{ "tx_collide_8times" },
418 	{ "tx_collide_9times" },
419 	{ "tx_collide_10times" },
420 	{ "tx_collide_11times" },
421 	{ "tx_collide_12times" },
422 	{ "tx_collide_13times" },
423 	{ "tx_collide_14times" },
424 	{ "tx_collide_15times" },
425 	{ "tx_ucast_packets" },
426 	{ "tx_mcast_packets" },
427 	{ "tx_bcast_packets" },
428 	{ "tx_carrier_sense_errors" },
429 	{ "tx_discards" },
430 	{ "tx_errors" },
431 
432 	{ "dma_writeq_full" },
433 	{ "dma_write_prioq_full" },
434 	{ "rxbds_empty" },
435 	{ "rx_discards" },
436 	{ "rx_errors" },
437 	{ "rx_threshold_hit" },
438 
439 	{ "dma_readq_full" },
440 	{ "dma_read_prioq_full" },
441 	{ "tx_comp_queue_full" },
442 
443 	{ "ring_set_send_prod_index" },
444 	{ "ring_status_update" },
445 	{ "nic_irqs" },
446 	{ "nic_avoided_irqs" },
447 	{ "nic_tx_threshold_hit" },
448 
449 	{ "mbuf_lwm_thresh_hit" },
450 };
451 
452 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
453 #define TG3_NVRAM_TEST		0
454 #define TG3_LINK_TEST		1
455 #define TG3_REGISTER_TEST	2
456 #define TG3_MEMORY_TEST		3
457 #define TG3_MAC_LOOPB_TEST	4
458 #define TG3_PHY_LOOPB_TEST	5
459 #define TG3_EXT_LOOPB_TEST	6
460 #define TG3_INTERRUPT_TEST	7
461 
462 
463 static const struct {
464 	const char string[ETH_GSTRING_LEN];
465 } ethtool_test_keys[] = {
466 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
467 	[TG3_LINK_TEST]		= { "link test         (online) " },
468 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
469 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
470 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
471 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
472 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
473 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
474 };
475 
476 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
477 
478 
479 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
480 {
481 	writel(val, tp->regs + off);
482 }
483 
484 static u32 tg3_read32(struct tg3 *tp, u32 off)
485 {
486 	return readl(tp->regs + off);
487 }
488 
489 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
490 {
491 	writel(val, tp->aperegs + off);
492 }
493 
494 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
495 {
496 	return readl(tp->aperegs + off);
497 }
498 
499 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
500 {
501 	unsigned long flags;
502 
503 	spin_lock_irqsave(&tp->indirect_lock, flags);
504 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
505 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
506 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
507 }
508 
509 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
510 {
511 	writel(val, tp->regs + off);
512 	readl(tp->regs + off);
513 }
514 
515 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
516 {
517 	unsigned long flags;
518 	u32 val;
519 
520 	spin_lock_irqsave(&tp->indirect_lock, flags);
521 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
522 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
523 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
524 	return val;
525 }
526 
527 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
528 {
529 	unsigned long flags;
530 
531 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
532 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
533 				       TG3_64BIT_REG_LOW, val);
534 		return;
535 	}
536 	if (off == TG3_RX_STD_PROD_IDX_REG) {
537 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
538 				       TG3_64BIT_REG_LOW, val);
539 		return;
540 	}
541 
542 	spin_lock_irqsave(&tp->indirect_lock, flags);
543 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
544 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
545 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
546 
547 	/* In indirect mode when disabling interrupts, we also need
548 	 * to clear the interrupt bit in the GRC local ctrl register.
549 	 */
550 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
551 	    (val == 0x1)) {
552 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
553 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
554 	}
555 }
556 
557 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
558 {
559 	unsigned long flags;
560 	u32 val;
561 
562 	spin_lock_irqsave(&tp->indirect_lock, flags);
563 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
564 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
565 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
566 	return val;
567 }
568 
569 /* usec_wait specifies the wait time in usec when writing to certain registers
570  * where it is unsafe to read back the register without some delay.
571  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
572  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
573  */
574 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
575 {
576 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
577 		/* Non-posted methods */
578 		tp->write32(tp, off, val);
579 	else {
580 		/* Posted method */
581 		tg3_write32(tp, off, val);
582 		if (usec_wait)
583 			udelay(usec_wait);
584 		tp->read32(tp, off);
585 	}
586 	/* Wait again after the read for the posted method to guarantee that
587 	 * the wait time is met.
588 	 */
589 	if (usec_wait)
590 		udelay(usec_wait);
591 }
592 
593 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
594 {
595 	tp->write32_mbox(tp, off, val);
596 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
597 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
598 	     !tg3_flag(tp, ICH_WORKAROUND)))
599 		tp->read32_mbox(tp, off);
600 }
601 
602 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
603 {
604 	void __iomem *mbox = tp->regs + off;
605 	writel(val, mbox);
606 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
607 		writel(val, mbox);
608 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
609 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
610 		readl(mbox);
611 }
612 
613 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
614 {
615 	return readl(tp->regs + off + GRCMBOX_BASE);
616 }
617 
618 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
619 {
620 	writel(val, tp->regs + off + GRCMBOX_BASE);
621 }
622 
623 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
624 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
625 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
626 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
627 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
628 
629 #define tw32(reg, val)			tp->write32(tp, reg, val)
630 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
631 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
632 #define tr32(reg)			tp->read32(tp, reg)
633 
634 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
635 {
636 	unsigned long flags;
637 
638 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
639 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
640 		return;
641 
642 	spin_lock_irqsave(&tp->indirect_lock, flags);
643 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
644 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
645 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
646 
647 		/* Always leave this as zero. */
648 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
649 	} else {
650 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
651 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
652 
653 		/* Always leave this as zero. */
654 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
655 	}
656 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
657 }
658 
659 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
660 {
661 	unsigned long flags;
662 
663 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
664 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
665 		*val = 0;
666 		return;
667 	}
668 
669 	spin_lock_irqsave(&tp->indirect_lock, flags);
670 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
671 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
672 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
673 
674 		/* Always leave this as zero. */
675 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
676 	} else {
677 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
678 		*val = tr32(TG3PCI_MEM_WIN_DATA);
679 
680 		/* Always leave this as zero. */
681 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
682 	}
683 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
684 }
685 
686 static void tg3_ape_lock_init(struct tg3 *tp)
687 {
688 	int i;
689 	u32 regbase, bit;
690 
691 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
692 		regbase = TG3_APE_LOCK_GRANT;
693 	else
694 		regbase = TG3_APE_PER_LOCK_GRANT;
695 
696 	/* Make sure the driver hasn't any stale locks. */
697 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
698 		switch (i) {
699 		case TG3_APE_LOCK_PHY0:
700 		case TG3_APE_LOCK_PHY1:
701 		case TG3_APE_LOCK_PHY2:
702 		case TG3_APE_LOCK_PHY3:
703 			bit = APE_LOCK_GRANT_DRIVER;
704 			break;
705 		default:
706 			if (!tp->pci_fn)
707 				bit = APE_LOCK_GRANT_DRIVER;
708 			else
709 				bit = 1 << tp->pci_fn;
710 		}
711 		tg3_ape_write32(tp, regbase + 4 * i, bit);
712 	}
713 
714 }
715 
716 static int tg3_ape_lock(struct tg3 *tp, int locknum)
717 {
718 	int i, off;
719 	int ret = 0;
720 	u32 status, req, gnt, bit;
721 
722 	if (!tg3_flag(tp, ENABLE_APE))
723 		return 0;
724 
725 	switch (locknum) {
726 	case TG3_APE_LOCK_GPIO:
727 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
728 			return 0;
729 		/* else: fall through */
730 	case TG3_APE_LOCK_GRC:
731 	case TG3_APE_LOCK_MEM:
732 		if (!tp->pci_fn)
733 			bit = APE_LOCK_REQ_DRIVER;
734 		else
735 			bit = 1 << tp->pci_fn;
736 		break;
737 	case TG3_APE_LOCK_PHY0:
738 	case TG3_APE_LOCK_PHY1:
739 	case TG3_APE_LOCK_PHY2:
740 	case TG3_APE_LOCK_PHY3:
741 		bit = APE_LOCK_REQ_DRIVER;
742 		break;
743 	default:
744 		return -EINVAL;
745 	}
746 
747 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
748 		req = TG3_APE_LOCK_REQ;
749 		gnt = TG3_APE_LOCK_GRANT;
750 	} else {
751 		req = TG3_APE_PER_LOCK_REQ;
752 		gnt = TG3_APE_PER_LOCK_GRANT;
753 	}
754 
755 	off = 4 * locknum;
756 
757 	tg3_ape_write32(tp, req + off, bit);
758 
759 	/* Wait for up to 1 millisecond to acquire lock. */
760 	for (i = 0; i < 100; i++) {
761 		status = tg3_ape_read32(tp, gnt + off);
762 		if (status == bit)
763 			break;
764 		if (pci_channel_offline(tp->pdev))
765 			break;
766 
767 		udelay(10);
768 	}
769 
770 	if (status != bit) {
771 		/* Revoke the lock request. */
772 		tg3_ape_write32(tp, gnt + off, bit);
773 		ret = -EBUSY;
774 	}
775 
776 	return ret;
777 }
778 
779 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
780 {
781 	u32 gnt, bit;
782 
783 	if (!tg3_flag(tp, ENABLE_APE))
784 		return;
785 
786 	switch (locknum) {
787 	case TG3_APE_LOCK_GPIO:
788 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
789 			return;
790 		/* else: fall through */
791 	case TG3_APE_LOCK_GRC:
792 	case TG3_APE_LOCK_MEM:
793 		if (!tp->pci_fn)
794 			bit = APE_LOCK_GRANT_DRIVER;
795 		else
796 			bit = 1 << tp->pci_fn;
797 		break;
798 	case TG3_APE_LOCK_PHY0:
799 	case TG3_APE_LOCK_PHY1:
800 	case TG3_APE_LOCK_PHY2:
801 	case TG3_APE_LOCK_PHY3:
802 		bit = APE_LOCK_GRANT_DRIVER;
803 		break;
804 	default:
805 		return;
806 	}
807 
808 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
809 		gnt = TG3_APE_LOCK_GRANT;
810 	else
811 		gnt = TG3_APE_PER_LOCK_GRANT;
812 
813 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
814 }
815 
816 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
817 {
818 	u32 apedata;
819 
820 	while (timeout_us) {
821 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
822 			return -EBUSY;
823 
824 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
825 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
826 			break;
827 
828 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
829 
830 		udelay(10);
831 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
832 	}
833 
834 	return timeout_us ? 0 : -EBUSY;
835 }
836 
837 #ifdef CONFIG_TIGON3_HWMON
838 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
839 {
840 	u32 i, apedata;
841 
842 	for (i = 0; i < timeout_us / 10; i++) {
843 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
844 
845 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
846 			break;
847 
848 		udelay(10);
849 	}
850 
851 	return i == timeout_us / 10;
852 }
853 
854 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
855 				   u32 len)
856 {
857 	int err;
858 	u32 i, bufoff, msgoff, maxlen, apedata;
859 
860 	if (!tg3_flag(tp, APE_HAS_NCSI))
861 		return 0;
862 
863 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
864 	if (apedata != APE_SEG_SIG_MAGIC)
865 		return -ENODEV;
866 
867 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
868 	if (!(apedata & APE_FW_STATUS_READY))
869 		return -EAGAIN;
870 
871 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
872 		 TG3_APE_SHMEM_BASE;
873 	msgoff = bufoff + 2 * sizeof(u32);
874 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
875 
876 	while (len) {
877 		u32 length;
878 
879 		/* Cap xfer sizes to scratchpad limits. */
880 		length = (len > maxlen) ? maxlen : len;
881 		len -= length;
882 
883 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
884 		if (!(apedata & APE_FW_STATUS_READY))
885 			return -EAGAIN;
886 
887 		/* Wait for up to 1 msec for APE to service previous event. */
888 		err = tg3_ape_event_lock(tp, 1000);
889 		if (err)
890 			return err;
891 
892 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
893 			  APE_EVENT_STATUS_SCRTCHPD_READ |
894 			  APE_EVENT_STATUS_EVENT_PENDING;
895 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
896 
897 		tg3_ape_write32(tp, bufoff, base_off);
898 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
899 
900 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
901 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
902 
903 		base_off += length;
904 
905 		if (tg3_ape_wait_for_event(tp, 30000))
906 			return -EAGAIN;
907 
908 		for (i = 0; length; i += 4, length -= 4) {
909 			u32 val = tg3_ape_read32(tp, msgoff + i);
910 			memcpy(data, &val, sizeof(u32));
911 			data++;
912 		}
913 	}
914 
915 	return 0;
916 }
917 #endif
918 
919 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
920 {
921 	int err;
922 	u32 apedata;
923 
924 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
925 	if (apedata != APE_SEG_SIG_MAGIC)
926 		return -EAGAIN;
927 
928 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
929 	if (!(apedata & APE_FW_STATUS_READY))
930 		return -EAGAIN;
931 
932 	/* Wait for up to 20 millisecond for APE to service previous event. */
933 	err = tg3_ape_event_lock(tp, 20000);
934 	if (err)
935 		return err;
936 
937 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
938 			event | APE_EVENT_STATUS_EVENT_PENDING);
939 
940 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
941 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
942 
943 	return 0;
944 }
945 
946 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
947 {
948 	u32 event;
949 	u32 apedata;
950 
951 	if (!tg3_flag(tp, ENABLE_APE))
952 		return;
953 
954 	switch (kind) {
955 	case RESET_KIND_INIT:
956 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
957 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
958 				APE_HOST_SEG_SIG_MAGIC);
959 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
960 				APE_HOST_SEG_LEN_MAGIC);
961 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
962 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
963 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
964 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
965 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
966 				APE_HOST_BEHAV_NO_PHYLOCK);
967 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
968 				    TG3_APE_HOST_DRVR_STATE_START);
969 
970 		event = APE_EVENT_STATUS_STATE_START;
971 		break;
972 	case RESET_KIND_SHUTDOWN:
973 		if (device_may_wakeup(&tp->pdev->dev) &&
974 		    tg3_flag(tp, WOL_ENABLE)) {
975 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
976 					    TG3_APE_HOST_WOL_SPEED_AUTO);
977 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
978 		} else
979 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
980 
981 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
982 
983 		event = APE_EVENT_STATUS_STATE_UNLOAD;
984 		break;
985 	default:
986 		return;
987 	}
988 
989 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
990 
991 	tg3_ape_send_event(tp, event);
992 }
993 
994 static void tg3_send_ape_heartbeat(struct tg3 *tp,
995 				   unsigned long interval)
996 {
997 	/* Check if hb interval has exceeded */
998 	if (!tg3_flag(tp, ENABLE_APE) ||
999 	    time_before(jiffies, tp->ape_hb_jiffies + interval))
1000 		return;
1001 
1002 	tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
1003 	tp->ape_hb_jiffies = jiffies;
1004 }
1005 
1006 static void tg3_disable_ints(struct tg3 *tp)
1007 {
1008 	int i;
1009 
1010 	tw32(TG3PCI_MISC_HOST_CTRL,
1011 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1012 	for (i = 0; i < tp->irq_max; i++)
1013 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1014 }
1015 
1016 static void tg3_enable_ints(struct tg3 *tp)
1017 {
1018 	int i;
1019 
1020 	tp->irq_sync = 0;
1021 	wmb();
1022 
1023 	tw32(TG3PCI_MISC_HOST_CTRL,
1024 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1025 
1026 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1027 	for (i = 0; i < tp->irq_cnt; i++) {
1028 		struct tg3_napi *tnapi = &tp->napi[i];
1029 
1030 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1031 		if (tg3_flag(tp, 1SHOT_MSI))
1032 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1033 
1034 		tp->coal_now |= tnapi->coal_now;
1035 	}
1036 
1037 	/* Force an initial interrupt */
1038 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1039 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1040 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1041 	else
1042 		tw32(HOSTCC_MODE, tp->coal_now);
1043 
1044 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1045 }
1046 
1047 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1048 {
1049 	struct tg3 *tp = tnapi->tp;
1050 	struct tg3_hw_status *sblk = tnapi->hw_status;
1051 	unsigned int work_exists = 0;
1052 
1053 	/* check for phy events */
1054 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1055 		if (sblk->status & SD_STATUS_LINK_CHG)
1056 			work_exists = 1;
1057 	}
1058 
1059 	/* check for TX work to do */
1060 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1061 		work_exists = 1;
1062 
1063 	/* check for RX work to do */
1064 	if (tnapi->rx_rcb_prod_idx &&
1065 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1066 		work_exists = 1;
1067 
1068 	return work_exists;
1069 }
1070 
1071 /* tg3_int_reenable
1072  *  similar to tg3_enable_ints, but it accurately determines whether there
1073  *  is new work pending and can return without flushing the PIO write
1074  *  which reenables interrupts
1075  */
1076 static void tg3_int_reenable(struct tg3_napi *tnapi)
1077 {
1078 	struct tg3 *tp = tnapi->tp;
1079 
1080 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1081 	mmiowb();
1082 
1083 	/* When doing tagged status, this work check is unnecessary.
1084 	 * The last_tag we write above tells the chip which piece of
1085 	 * work we've completed.
1086 	 */
1087 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1088 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1089 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1090 }
1091 
1092 static void tg3_switch_clocks(struct tg3 *tp)
1093 {
1094 	u32 clock_ctrl;
1095 	u32 orig_clock_ctrl;
1096 
1097 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1098 		return;
1099 
1100 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1101 
1102 	orig_clock_ctrl = clock_ctrl;
1103 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1104 		       CLOCK_CTRL_CLKRUN_OENABLE |
1105 		       0x1f);
1106 	tp->pci_clock_ctrl = clock_ctrl;
1107 
1108 	if (tg3_flag(tp, 5705_PLUS)) {
1109 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1110 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1111 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1112 		}
1113 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1114 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1115 			    clock_ctrl |
1116 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1117 			    40);
1118 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1119 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1120 			    40);
1121 	}
1122 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1123 }
1124 
1125 #define PHY_BUSY_LOOPS	5000
1126 
1127 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1128 			 u32 *val)
1129 {
1130 	u32 frame_val;
1131 	unsigned int loops;
1132 	int ret;
1133 
1134 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1135 		tw32_f(MAC_MI_MODE,
1136 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1137 		udelay(80);
1138 	}
1139 
1140 	tg3_ape_lock(tp, tp->phy_ape_lock);
1141 
1142 	*val = 0x0;
1143 
1144 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1145 		      MI_COM_PHY_ADDR_MASK);
1146 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1147 		      MI_COM_REG_ADDR_MASK);
1148 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1149 
1150 	tw32_f(MAC_MI_COM, frame_val);
1151 
1152 	loops = PHY_BUSY_LOOPS;
1153 	while (loops != 0) {
1154 		udelay(10);
1155 		frame_val = tr32(MAC_MI_COM);
1156 
1157 		if ((frame_val & MI_COM_BUSY) == 0) {
1158 			udelay(5);
1159 			frame_val = tr32(MAC_MI_COM);
1160 			break;
1161 		}
1162 		loops -= 1;
1163 	}
1164 
1165 	ret = -EBUSY;
1166 	if (loops != 0) {
1167 		*val = frame_val & MI_COM_DATA_MASK;
1168 		ret = 0;
1169 	}
1170 
1171 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1173 		udelay(80);
1174 	}
1175 
1176 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1177 
1178 	return ret;
1179 }
1180 
1181 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1182 {
1183 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1184 }
1185 
1186 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1187 			  u32 val)
1188 {
1189 	u32 frame_val;
1190 	unsigned int loops;
1191 	int ret;
1192 
1193 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1194 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1195 		return 0;
1196 
1197 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1198 		tw32_f(MAC_MI_MODE,
1199 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1200 		udelay(80);
1201 	}
1202 
1203 	tg3_ape_lock(tp, tp->phy_ape_lock);
1204 
1205 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1206 		      MI_COM_PHY_ADDR_MASK);
1207 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1208 		      MI_COM_REG_ADDR_MASK);
1209 	frame_val |= (val & MI_COM_DATA_MASK);
1210 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1211 
1212 	tw32_f(MAC_MI_COM, frame_val);
1213 
1214 	loops = PHY_BUSY_LOOPS;
1215 	while (loops != 0) {
1216 		udelay(10);
1217 		frame_val = tr32(MAC_MI_COM);
1218 		if ((frame_val & MI_COM_BUSY) == 0) {
1219 			udelay(5);
1220 			frame_val = tr32(MAC_MI_COM);
1221 			break;
1222 		}
1223 		loops -= 1;
1224 	}
1225 
1226 	ret = -EBUSY;
1227 	if (loops != 0)
1228 		ret = 0;
1229 
1230 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1231 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1232 		udelay(80);
1233 	}
1234 
1235 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1236 
1237 	return ret;
1238 }
1239 
1240 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1241 {
1242 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1243 }
1244 
1245 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1246 {
1247 	int err;
1248 
1249 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250 	if (err)
1251 		goto done;
1252 
1253 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254 	if (err)
1255 		goto done;
1256 
1257 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1258 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259 	if (err)
1260 		goto done;
1261 
1262 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1263 
1264 done:
1265 	return err;
1266 }
1267 
1268 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1269 {
1270 	int err;
1271 
1272 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1273 	if (err)
1274 		goto done;
1275 
1276 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1277 	if (err)
1278 		goto done;
1279 
1280 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1281 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1282 	if (err)
1283 		goto done;
1284 
1285 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1286 
1287 done:
1288 	return err;
1289 }
1290 
1291 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1292 {
1293 	int err;
1294 
1295 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1296 	if (!err)
1297 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1298 
1299 	return err;
1300 }
1301 
1302 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1303 {
1304 	int err;
1305 
1306 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1307 	if (!err)
1308 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1309 
1310 	return err;
1311 }
1312 
1313 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1314 {
1315 	int err;
1316 
1317 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1318 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1319 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1320 	if (!err)
1321 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1322 
1323 	return err;
1324 }
1325 
1326 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1327 {
1328 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1329 		set |= MII_TG3_AUXCTL_MISC_WREN;
1330 
1331 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1332 }
1333 
1334 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1335 {
1336 	u32 val;
1337 	int err;
1338 
1339 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1340 
1341 	if (err)
1342 		return err;
1343 
1344 	if (enable)
1345 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1346 	else
1347 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1348 
1349 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1350 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1351 
1352 	return err;
1353 }
1354 
1355 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1356 {
1357 	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1358 			    reg | val | MII_TG3_MISC_SHDW_WREN);
1359 }
1360 
1361 static int tg3_bmcr_reset(struct tg3 *tp)
1362 {
1363 	u32 phy_control;
1364 	int limit, err;
1365 
1366 	/* OK, reset it, and poll the BMCR_RESET bit until it
1367 	 * clears or we time out.
1368 	 */
1369 	phy_control = BMCR_RESET;
1370 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1371 	if (err != 0)
1372 		return -EBUSY;
1373 
1374 	limit = 5000;
1375 	while (limit--) {
1376 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1377 		if (err != 0)
1378 			return -EBUSY;
1379 
1380 		if ((phy_control & BMCR_RESET) == 0) {
1381 			udelay(40);
1382 			break;
1383 		}
1384 		udelay(10);
1385 	}
1386 	if (limit < 0)
1387 		return -EBUSY;
1388 
1389 	return 0;
1390 }
1391 
1392 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1393 {
1394 	struct tg3 *tp = bp->priv;
1395 	u32 val;
1396 
1397 	spin_lock_bh(&tp->lock);
1398 
1399 	if (__tg3_readphy(tp, mii_id, reg, &val))
1400 		val = -EIO;
1401 
1402 	spin_unlock_bh(&tp->lock);
1403 
1404 	return val;
1405 }
1406 
1407 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1408 {
1409 	struct tg3 *tp = bp->priv;
1410 	u32 ret = 0;
1411 
1412 	spin_lock_bh(&tp->lock);
1413 
1414 	if (__tg3_writephy(tp, mii_id, reg, val))
1415 		ret = -EIO;
1416 
1417 	spin_unlock_bh(&tp->lock);
1418 
1419 	return ret;
1420 }
1421 
1422 static void tg3_mdio_config_5785(struct tg3 *tp)
1423 {
1424 	u32 val;
1425 	struct phy_device *phydev;
1426 
1427 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1428 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1429 	case PHY_ID_BCM50610:
1430 	case PHY_ID_BCM50610M:
1431 		val = MAC_PHYCFG2_50610_LED_MODES;
1432 		break;
1433 	case PHY_ID_BCMAC131:
1434 		val = MAC_PHYCFG2_AC131_LED_MODES;
1435 		break;
1436 	case PHY_ID_RTL8211C:
1437 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1438 		break;
1439 	case PHY_ID_RTL8201E:
1440 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1441 		break;
1442 	default:
1443 		return;
1444 	}
1445 
1446 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1447 		tw32(MAC_PHYCFG2, val);
1448 
1449 		val = tr32(MAC_PHYCFG1);
1450 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1451 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1452 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1453 		tw32(MAC_PHYCFG1, val);
1454 
1455 		return;
1456 	}
1457 
1458 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1459 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1460 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1461 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1462 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1463 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1464 		       MAC_PHYCFG2_INBAND_ENABLE;
1465 
1466 	tw32(MAC_PHYCFG2, val);
1467 
1468 	val = tr32(MAC_PHYCFG1);
1469 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1470 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1471 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1472 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1473 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1474 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1475 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1476 	}
1477 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1478 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1479 	tw32(MAC_PHYCFG1, val);
1480 
1481 	val = tr32(MAC_EXT_RGMII_MODE);
1482 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1483 		 MAC_RGMII_MODE_RX_QUALITY |
1484 		 MAC_RGMII_MODE_RX_ACTIVITY |
1485 		 MAC_RGMII_MODE_RX_ENG_DET |
1486 		 MAC_RGMII_MODE_TX_ENABLE |
1487 		 MAC_RGMII_MODE_TX_LOWPWR |
1488 		 MAC_RGMII_MODE_TX_RESET);
1489 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1490 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1491 			val |= MAC_RGMII_MODE_RX_INT_B |
1492 			       MAC_RGMII_MODE_RX_QUALITY |
1493 			       MAC_RGMII_MODE_RX_ACTIVITY |
1494 			       MAC_RGMII_MODE_RX_ENG_DET;
1495 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1496 			val |= MAC_RGMII_MODE_TX_ENABLE |
1497 			       MAC_RGMII_MODE_TX_LOWPWR |
1498 			       MAC_RGMII_MODE_TX_RESET;
1499 	}
1500 	tw32(MAC_EXT_RGMII_MODE, val);
1501 }
1502 
1503 static void tg3_mdio_start(struct tg3 *tp)
1504 {
1505 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1506 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1507 	udelay(80);
1508 
1509 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1510 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1511 		tg3_mdio_config_5785(tp);
1512 }
1513 
1514 static int tg3_mdio_init(struct tg3 *tp)
1515 {
1516 	int i;
1517 	u32 reg;
1518 	struct phy_device *phydev;
1519 
1520 	if (tg3_flag(tp, 5717_PLUS)) {
1521 		u32 is_serdes;
1522 
1523 		tp->phy_addr = tp->pci_fn + 1;
1524 
1525 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1526 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1527 		else
1528 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1529 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1530 		if (is_serdes)
1531 			tp->phy_addr += 7;
1532 	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1533 		int addr;
1534 
1535 		addr = ssb_gige_get_phyaddr(tp->pdev);
1536 		if (addr < 0)
1537 			return addr;
1538 		tp->phy_addr = addr;
1539 	} else
1540 		tp->phy_addr = TG3_PHY_MII_ADDR;
1541 
1542 	tg3_mdio_start(tp);
1543 
1544 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1545 		return 0;
1546 
1547 	tp->mdio_bus = mdiobus_alloc();
1548 	if (tp->mdio_bus == NULL)
1549 		return -ENOMEM;
1550 
1551 	tp->mdio_bus->name     = "tg3 mdio bus";
1552 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1553 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1554 	tp->mdio_bus->priv     = tp;
1555 	tp->mdio_bus->parent   = &tp->pdev->dev;
1556 	tp->mdio_bus->read     = &tg3_mdio_read;
1557 	tp->mdio_bus->write    = &tg3_mdio_write;
1558 	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1559 
1560 	/* The bus registration will look for all the PHYs on the mdio bus.
1561 	 * Unfortunately, it does not ensure the PHY is powered up before
1562 	 * accessing the PHY ID registers.  A chip reset is the
1563 	 * quickest way to bring the device back to an operational state..
1564 	 */
1565 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1566 		tg3_bmcr_reset(tp);
1567 
1568 	i = mdiobus_register(tp->mdio_bus);
1569 	if (i) {
1570 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1571 		mdiobus_free(tp->mdio_bus);
1572 		return i;
1573 	}
1574 
1575 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1576 
1577 	if (!phydev || !phydev->drv) {
1578 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1579 		mdiobus_unregister(tp->mdio_bus);
1580 		mdiobus_free(tp->mdio_bus);
1581 		return -ENODEV;
1582 	}
1583 
1584 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1585 	case PHY_ID_BCM57780:
1586 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1587 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1588 		break;
1589 	case PHY_ID_BCM50610:
1590 	case PHY_ID_BCM50610M:
1591 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1592 				     PHY_BRCM_RX_REFCLK_UNUSED |
1593 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1594 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1595 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1596 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1597 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1598 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1599 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1600 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1601 		/* fall through */
1602 	case PHY_ID_RTL8211C:
1603 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1604 		break;
1605 	case PHY_ID_RTL8201E:
1606 	case PHY_ID_BCMAC131:
1607 		phydev->interface = PHY_INTERFACE_MODE_MII;
1608 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1609 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1610 		break;
1611 	}
1612 
1613 	tg3_flag_set(tp, MDIOBUS_INITED);
1614 
1615 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1616 		tg3_mdio_config_5785(tp);
1617 
1618 	return 0;
1619 }
1620 
1621 static void tg3_mdio_fini(struct tg3 *tp)
1622 {
1623 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1624 		tg3_flag_clear(tp, MDIOBUS_INITED);
1625 		mdiobus_unregister(tp->mdio_bus);
1626 		mdiobus_free(tp->mdio_bus);
1627 	}
1628 }
1629 
1630 /* tp->lock is held. */
1631 static inline void tg3_generate_fw_event(struct tg3 *tp)
1632 {
1633 	u32 val;
1634 
1635 	val = tr32(GRC_RX_CPU_EVENT);
1636 	val |= GRC_RX_CPU_DRIVER_EVENT;
1637 	tw32_f(GRC_RX_CPU_EVENT, val);
1638 
1639 	tp->last_event_jiffies = jiffies;
1640 }
1641 
1642 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1643 
1644 /* tp->lock is held. */
1645 static void tg3_wait_for_event_ack(struct tg3 *tp)
1646 {
1647 	int i;
1648 	unsigned int delay_cnt;
1649 	long time_remain;
1650 
1651 	/* If enough time has passed, no wait is necessary. */
1652 	time_remain = (long)(tp->last_event_jiffies + 1 +
1653 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1654 		      (long)jiffies;
1655 	if (time_remain < 0)
1656 		return;
1657 
1658 	/* Check if we can shorten the wait time. */
1659 	delay_cnt = jiffies_to_usecs(time_remain);
1660 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1661 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1662 	delay_cnt = (delay_cnt >> 3) + 1;
1663 
1664 	for (i = 0; i < delay_cnt; i++) {
1665 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1666 			break;
1667 		if (pci_channel_offline(tp->pdev))
1668 			break;
1669 
1670 		udelay(8);
1671 	}
1672 }
1673 
1674 /* tp->lock is held. */
1675 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1676 {
1677 	u32 reg, val;
1678 
1679 	val = 0;
1680 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1681 		val = reg << 16;
1682 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1683 		val |= (reg & 0xffff);
1684 	*data++ = val;
1685 
1686 	val = 0;
1687 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1688 		val = reg << 16;
1689 	if (!tg3_readphy(tp, MII_LPA, &reg))
1690 		val |= (reg & 0xffff);
1691 	*data++ = val;
1692 
1693 	val = 0;
1694 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1695 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1696 			val = reg << 16;
1697 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1698 			val |= (reg & 0xffff);
1699 	}
1700 	*data++ = val;
1701 
1702 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1703 		val = reg << 16;
1704 	else
1705 		val = 0;
1706 	*data++ = val;
1707 }
1708 
1709 /* tp->lock is held. */
1710 static void tg3_ump_link_report(struct tg3 *tp)
1711 {
1712 	u32 data[4];
1713 
1714 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1715 		return;
1716 
1717 	tg3_phy_gather_ump_data(tp, data);
1718 
1719 	tg3_wait_for_event_ack(tp);
1720 
1721 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1722 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1723 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1724 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1725 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1726 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1727 
1728 	tg3_generate_fw_event(tp);
1729 }
1730 
1731 /* tp->lock is held. */
1732 static void tg3_stop_fw(struct tg3 *tp)
1733 {
1734 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1735 		/* Wait for RX cpu to ACK the previous event. */
1736 		tg3_wait_for_event_ack(tp);
1737 
1738 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1739 
1740 		tg3_generate_fw_event(tp);
1741 
1742 		/* Wait for RX cpu to ACK this event. */
1743 		tg3_wait_for_event_ack(tp);
1744 	}
1745 }
1746 
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1749 {
1750 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1751 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1752 
1753 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1754 		switch (kind) {
1755 		case RESET_KIND_INIT:
1756 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1757 				      DRV_STATE_START);
1758 			break;
1759 
1760 		case RESET_KIND_SHUTDOWN:
1761 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1762 				      DRV_STATE_UNLOAD);
1763 			break;
1764 
1765 		case RESET_KIND_SUSPEND:
1766 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1767 				      DRV_STATE_SUSPEND);
1768 			break;
1769 
1770 		default:
1771 			break;
1772 		}
1773 	}
1774 }
1775 
1776 /* tp->lock is held. */
1777 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1778 {
1779 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1780 		switch (kind) {
1781 		case RESET_KIND_INIT:
1782 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1783 				      DRV_STATE_START_DONE);
1784 			break;
1785 
1786 		case RESET_KIND_SHUTDOWN:
1787 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788 				      DRV_STATE_UNLOAD_DONE);
1789 			break;
1790 
1791 		default:
1792 			break;
1793 		}
1794 	}
1795 }
1796 
1797 /* tp->lock is held. */
1798 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1799 {
1800 	if (tg3_flag(tp, ENABLE_ASF)) {
1801 		switch (kind) {
1802 		case RESET_KIND_INIT:
1803 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1804 				      DRV_STATE_START);
1805 			break;
1806 
1807 		case RESET_KIND_SHUTDOWN:
1808 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1809 				      DRV_STATE_UNLOAD);
1810 			break;
1811 
1812 		case RESET_KIND_SUSPEND:
1813 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1814 				      DRV_STATE_SUSPEND);
1815 			break;
1816 
1817 		default:
1818 			break;
1819 		}
1820 	}
1821 }
1822 
1823 static int tg3_poll_fw(struct tg3 *tp)
1824 {
1825 	int i;
1826 	u32 val;
1827 
1828 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1829 		return 0;
1830 
1831 	if (tg3_flag(tp, IS_SSB_CORE)) {
1832 		/* We don't use firmware. */
1833 		return 0;
1834 	}
1835 
1836 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1837 		/* Wait up to 20ms for init done. */
1838 		for (i = 0; i < 200; i++) {
1839 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1840 				return 0;
1841 			if (pci_channel_offline(tp->pdev))
1842 				return -ENODEV;
1843 
1844 			udelay(100);
1845 		}
1846 		return -ENODEV;
1847 	}
1848 
1849 	/* Wait for firmware initialization to complete. */
1850 	for (i = 0; i < 100000; i++) {
1851 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1852 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1853 			break;
1854 		if (pci_channel_offline(tp->pdev)) {
1855 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1856 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1857 				netdev_info(tp->dev, "No firmware running\n");
1858 			}
1859 
1860 			break;
1861 		}
1862 
1863 		udelay(10);
1864 	}
1865 
1866 	/* Chip might not be fitted with firmware.  Some Sun onboard
1867 	 * parts are configured like that.  So don't signal the timeout
1868 	 * of the above loop as an error, but do report the lack of
1869 	 * running firmware once.
1870 	 */
1871 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1872 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1873 
1874 		netdev_info(tp->dev, "No firmware running\n");
1875 	}
1876 
1877 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1878 		/* The 57765 A0 needs a little more
1879 		 * time to do some important work.
1880 		 */
1881 		mdelay(10);
1882 	}
1883 
1884 	return 0;
1885 }
1886 
1887 static void tg3_link_report(struct tg3 *tp)
1888 {
1889 	if (!netif_carrier_ok(tp->dev)) {
1890 		netif_info(tp, link, tp->dev, "Link is down\n");
1891 		tg3_ump_link_report(tp);
1892 	} else if (netif_msg_link(tp)) {
1893 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1894 			    (tp->link_config.active_speed == SPEED_1000 ?
1895 			     1000 :
1896 			     (tp->link_config.active_speed == SPEED_100 ?
1897 			      100 : 10)),
1898 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1899 			     "full" : "half"));
1900 
1901 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1902 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1903 			    "on" : "off",
1904 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1905 			    "on" : "off");
1906 
1907 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1908 			netdev_info(tp->dev, "EEE is %s\n",
1909 				    tp->setlpicnt ? "enabled" : "disabled");
1910 
1911 		tg3_ump_link_report(tp);
1912 	}
1913 
1914 	tp->link_up = netif_carrier_ok(tp->dev);
1915 }
1916 
1917 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1918 {
1919 	u32 flowctrl = 0;
1920 
1921 	if (adv & ADVERTISE_PAUSE_CAP) {
1922 		flowctrl |= FLOW_CTRL_RX;
1923 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1924 			flowctrl |= FLOW_CTRL_TX;
1925 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1926 		flowctrl |= FLOW_CTRL_TX;
1927 
1928 	return flowctrl;
1929 }
1930 
1931 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1932 {
1933 	u16 miireg;
1934 
1935 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1936 		miireg = ADVERTISE_1000XPAUSE;
1937 	else if (flow_ctrl & FLOW_CTRL_TX)
1938 		miireg = ADVERTISE_1000XPSE_ASYM;
1939 	else if (flow_ctrl & FLOW_CTRL_RX)
1940 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1941 	else
1942 		miireg = 0;
1943 
1944 	return miireg;
1945 }
1946 
1947 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1948 {
1949 	u32 flowctrl = 0;
1950 
1951 	if (adv & ADVERTISE_1000XPAUSE) {
1952 		flowctrl |= FLOW_CTRL_RX;
1953 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1954 			flowctrl |= FLOW_CTRL_TX;
1955 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1956 		flowctrl |= FLOW_CTRL_TX;
1957 
1958 	return flowctrl;
1959 }
1960 
1961 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1962 {
1963 	u8 cap = 0;
1964 
1965 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1966 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1967 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1968 		if (lcladv & ADVERTISE_1000XPAUSE)
1969 			cap = FLOW_CTRL_RX;
1970 		if (rmtadv & ADVERTISE_1000XPAUSE)
1971 			cap = FLOW_CTRL_TX;
1972 	}
1973 
1974 	return cap;
1975 }
1976 
1977 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1978 {
1979 	u8 autoneg;
1980 	u8 flowctrl = 0;
1981 	u32 old_rx_mode = tp->rx_mode;
1982 	u32 old_tx_mode = tp->tx_mode;
1983 
1984 	if (tg3_flag(tp, USE_PHYLIB))
1985 		autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1986 	else
1987 		autoneg = tp->link_config.autoneg;
1988 
1989 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1990 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1991 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1992 		else
1993 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1994 	} else
1995 		flowctrl = tp->link_config.flowctrl;
1996 
1997 	tp->link_config.active_flowctrl = flowctrl;
1998 
1999 	if (flowctrl & FLOW_CTRL_RX)
2000 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
2001 	else
2002 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
2003 
2004 	if (old_rx_mode != tp->rx_mode)
2005 		tw32_f(MAC_RX_MODE, tp->rx_mode);
2006 
2007 	if (flowctrl & FLOW_CTRL_TX)
2008 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2009 	else
2010 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2011 
2012 	if (old_tx_mode != tp->tx_mode)
2013 		tw32_f(MAC_TX_MODE, tp->tx_mode);
2014 }
2015 
2016 static void tg3_adjust_link(struct net_device *dev)
2017 {
2018 	u8 oldflowctrl, linkmesg = 0;
2019 	u32 mac_mode, lcl_adv, rmt_adv;
2020 	struct tg3 *tp = netdev_priv(dev);
2021 	struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2022 
2023 	spin_lock_bh(&tp->lock);
2024 
2025 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2026 				    MAC_MODE_HALF_DUPLEX);
2027 
2028 	oldflowctrl = tp->link_config.active_flowctrl;
2029 
2030 	if (phydev->link) {
2031 		lcl_adv = 0;
2032 		rmt_adv = 0;
2033 
2034 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2035 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2036 		else if (phydev->speed == SPEED_1000 ||
2037 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2038 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2039 		else
2040 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2041 
2042 		if (phydev->duplex == DUPLEX_HALF)
2043 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2044 		else {
2045 			lcl_adv = mii_advertise_flowctrl(
2046 				  tp->link_config.flowctrl);
2047 
2048 			if (phydev->pause)
2049 				rmt_adv = LPA_PAUSE_CAP;
2050 			if (phydev->asym_pause)
2051 				rmt_adv |= LPA_PAUSE_ASYM;
2052 		}
2053 
2054 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2055 	} else
2056 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2057 
2058 	if (mac_mode != tp->mac_mode) {
2059 		tp->mac_mode = mac_mode;
2060 		tw32_f(MAC_MODE, tp->mac_mode);
2061 		udelay(40);
2062 	}
2063 
2064 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2065 		if (phydev->speed == SPEED_10)
2066 			tw32(MAC_MI_STAT,
2067 			     MAC_MI_STAT_10MBPS_MODE |
2068 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2069 		else
2070 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2071 	}
2072 
2073 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2074 		tw32(MAC_TX_LENGTHS,
2075 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2076 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2077 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2078 	else
2079 		tw32(MAC_TX_LENGTHS,
2080 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2081 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2082 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2083 
2084 	if (phydev->link != tp->old_link ||
2085 	    phydev->speed != tp->link_config.active_speed ||
2086 	    phydev->duplex != tp->link_config.active_duplex ||
2087 	    oldflowctrl != tp->link_config.active_flowctrl)
2088 		linkmesg = 1;
2089 
2090 	tp->old_link = phydev->link;
2091 	tp->link_config.active_speed = phydev->speed;
2092 	tp->link_config.active_duplex = phydev->duplex;
2093 
2094 	spin_unlock_bh(&tp->lock);
2095 
2096 	if (linkmesg)
2097 		tg3_link_report(tp);
2098 }
2099 
2100 static int tg3_phy_init(struct tg3 *tp)
2101 {
2102 	struct phy_device *phydev;
2103 
2104 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2105 		return 0;
2106 
2107 	/* Bring the PHY back to a known state. */
2108 	tg3_bmcr_reset(tp);
2109 
2110 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2111 
2112 	/* Attach the MAC to the PHY. */
2113 	phydev = phy_connect(tp->dev, phydev_name(phydev),
2114 			     tg3_adjust_link, phydev->interface);
2115 	if (IS_ERR(phydev)) {
2116 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2117 		return PTR_ERR(phydev);
2118 	}
2119 
2120 	/* Mask with MAC supported features. */
2121 	switch (phydev->interface) {
2122 	case PHY_INTERFACE_MODE_GMII:
2123 	case PHY_INTERFACE_MODE_RGMII:
2124 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2125 			phy_set_max_speed(phydev, SPEED_1000);
2126 			phy_support_asym_pause(phydev);
2127 			break;
2128 		}
2129 		/* fall through */
2130 	case PHY_INTERFACE_MODE_MII:
2131 		phy_set_max_speed(phydev, SPEED_100);
2132 		phy_support_asym_pause(phydev);
2133 		break;
2134 	default:
2135 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2136 		return -EINVAL;
2137 	}
2138 
2139 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2140 
2141 	phy_attached_info(phydev);
2142 
2143 	return 0;
2144 }
2145 
2146 static void tg3_phy_start(struct tg3 *tp)
2147 {
2148 	struct phy_device *phydev;
2149 
2150 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2151 		return;
2152 
2153 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2154 
2155 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2156 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2157 		phydev->speed = tp->link_config.speed;
2158 		phydev->duplex = tp->link_config.duplex;
2159 		phydev->autoneg = tp->link_config.autoneg;
2160 		phydev->advertising = tp->link_config.advertising;
2161 	}
2162 
2163 	phy_start(phydev);
2164 
2165 	phy_start_aneg(phydev);
2166 }
2167 
2168 static void tg3_phy_stop(struct tg3 *tp)
2169 {
2170 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2171 		return;
2172 
2173 	phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2174 }
2175 
2176 static void tg3_phy_fini(struct tg3 *tp)
2177 {
2178 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2179 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2180 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2181 	}
2182 }
2183 
2184 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2185 {
2186 	int err;
2187 	u32 val;
2188 
2189 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2190 		return 0;
2191 
2192 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2193 		/* Cannot do read-modify-write on 5401 */
2194 		err = tg3_phy_auxctl_write(tp,
2195 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2196 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2197 					   0x4c20);
2198 		goto done;
2199 	}
2200 
2201 	err = tg3_phy_auxctl_read(tp,
2202 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2203 	if (err)
2204 		return err;
2205 
2206 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2207 	err = tg3_phy_auxctl_write(tp,
2208 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2209 
2210 done:
2211 	return err;
2212 }
2213 
2214 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2215 {
2216 	u32 phytest;
2217 
2218 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2219 		u32 phy;
2220 
2221 		tg3_writephy(tp, MII_TG3_FET_TEST,
2222 			     phytest | MII_TG3_FET_SHADOW_EN);
2223 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2224 			if (enable)
2225 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2226 			else
2227 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2228 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2229 		}
2230 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2231 	}
2232 }
2233 
2234 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2235 {
2236 	u32 reg;
2237 
2238 	if (!tg3_flag(tp, 5705_PLUS) ||
2239 	    (tg3_flag(tp, 5717_PLUS) &&
2240 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2241 		return;
2242 
2243 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2244 		tg3_phy_fet_toggle_apd(tp, enable);
2245 		return;
2246 	}
2247 
2248 	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2249 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2250 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2251 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2252 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2253 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2254 
2255 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2256 
2257 
2258 	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2259 	if (enable)
2260 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2261 
2262 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2263 }
2264 
2265 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2266 {
2267 	u32 phy;
2268 
2269 	if (!tg3_flag(tp, 5705_PLUS) ||
2270 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2271 		return;
2272 
2273 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2274 		u32 ephy;
2275 
2276 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2277 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2278 
2279 			tg3_writephy(tp, MII_TG3_FET_TEST,
2280 				     ephy | MII_TG3_FET_SHADOW_EN);
2281 			if (!tg3_readphy(tp, reg, &phy)) {
2282 				if (enable)
2283 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2284 				else
2285 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2286 				tg3_writephy(tp, reg, phy);
2287 			}
2288 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2289 		}
2290 	} else {
2291 		int ret;
2292 
2293 		ret = tg3_phy_auxctl_read(tp,
2294 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2295 		if (!ret) {
2296 			if (enable)
2297 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2298 			else
2299 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2300 			tg3_phy_auxctl_write(tp,
2301 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2302 		}
2303 	}
2304 }
2305 
2306 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2307 {
2308 	int ret;
2309 	u32 val;
2310 
2311 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2312 		return;
2313 
2314 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2315 	if (!ret)
2316 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2317 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2318 }
2319 
2320 static void tg3_phy_apply_otp(struct tg3 *tp)
2321 {
2322 	u32 otp, phy;
2323 
2324 	if (!tp->phy_otp)
2325 		return;
2326 
2327 	otp = tp->phy_otp;
2328 
2329 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2330 		return;
2331 
2332 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2333 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2334 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2335 
2336 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2337 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2338 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2339 
2340 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2341 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2342 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2343 
2344 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2345 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2346 
2347 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2348 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2349 
2350 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2351 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2352 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2353 
2354 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2355 }
2356 
2357 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2358 {
2359 	u32 val;
2360 	struct ethtool_eee *dest = &tp->eee;
2361 
2362 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2363 		return;
2364 
2365 	if (eee)
2366 		dest = eee;
2367 
2368 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2369 		return;
2370 
2371 	/* Pull eee_active */
2372 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2373 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2374 		dest->eee_active = 1;
2375 	} else
2376 		dest->eee_active = 0;
2377 
2378 	/* Pull lp advertised settings */
2379 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2380 		return;
2381 	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2382 
2383 	/* Pull advertised and eee_enabled settings */
2384 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2385 		return;
2386 	dest->eee_enabled = !!val;
2387 	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2388 
2389 	/* Pull tx_lpi_enabled */
2390 	val = tr32(TG3_CPMU_EEE_MODE);
2391 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2392 
2393 	/* Pull lpi timer value */
2394 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2395 }
2396 
2397 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2398 {
2399 	u32 val;
2400 
2401 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2402 		return;
2403 
2404 	tp->setlpicnt = 0;
2405 
2406 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2407 	    current_link_up &&
2408 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2409 	    (tp->link_config.active_speed == SPEED_100 ||
2410 	     tp->link_config.active_speed == SPEED_1000)) {
2411 		u32 eeectl;
2412 
2413 		if (tp->link_config.active_speed == SPEED_1000)
2414 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2415 		else
2416 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2417 
2418 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2419 
2420 		tg3_eee_pull_config(tp, NULL);
2421 		if (tp->eee.eee_active)
2422 			tp->setlpicnt = 2;
2423 	}
2424 
2425 	if (!tp->setlpicnt) {
2426 		if (current_link_up &&
2427 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2428 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2429 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2430 		}
2431 
2432 		val = tr32(TG3_CPMU_EEE_MODE);
2433 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2434 	}
2435 }
2436 
2437 static void tg3_phy_eee_enable(struct tg3 *tp)
2438 {
2439 	u32 val;
2440 
2441 	if (tp->link_config.active_speed == SPEED_1000 &&
2442 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2443 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2444 	     tg3_flag(tp, 57765_CLASS)) &&
2445 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2446 		val = MII_TG3_DSP_TAP26_ALNOKO |
2447 		      MII_TG3_DSP_TAP26_RMRXSTO;
2448 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2449 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2450 	}
2451 
2452 	val = tr32(TG3_CPMU_EEE_MODE);
2453 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2454 }
2455 
2456 static int tg3_wait_macro_done(struct tg3 *tp)
2457 {
2458 	int limit = 100;
2459 
2460 	while (limit--) {
2461 		u32 tmp32;
2462 
2463 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2464 			if ((tmp32 & 0x1000) == 0)
2465 				break;
2466 		}
2467 	}
2468 	if (limit < 0)
2469 		return -EBUSY;
2470 
2471 	return 0;
2472 }
2473 
2474 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2475 {
2476 	static const u32 test_pat[4][6] = {
2477 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2478 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2479 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2480 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2481 	};
2482 	int chan;
2483 
2484 	for (chan = 0; chan < 4; chan++) {
2485 		int i;
2486 
2487 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2488 			     (chan * 0x2000) | 0x0200);
2489 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2490 
2491 		for (i = 0; i < 6; i++)
2492 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2493 				     test_pat[chan][i]);
2494 
2495 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2496 		if (tg3_wait_macro_done(tp)) {
2497 			*resetp = 1;
2498 			return -EBUSY;
2499 		}
2500 
2501 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2502 			     (chan * 0x2000) | 0x0200);
2503 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2504 		if (tg3_wait_macro_done(tp)) {
2505 			*resetp = 1;
2506 			return -EBUSY;
2507 		}
2508 
2509 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2510 		if (tg3_wait_macro_done(tp)) {
2511 			*resetp = 1;
2512 			return -EBUSY;
2513 		}
2514 
2515 		for (i = 0; i < 6; i += 2) {
2516 			u32 low, high;
2517 
2518 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2519 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2520 			    tg3_wait_macro_done(tp)) {
2521 				*resetp = 1;
2522 				return -EBUSY;
2523 			}
2524 			low &= 0x7fff;
2525 			high &= 0x000f;
2526 			if (low != test_pat[chan][i] ||
2527 			    high != test_pat[chan][i+1]) {
2528 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2529 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2530 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2531 
2532 				return -EBUSY;
2533 			}
2534 		}
2535 	}
2536 
2537 	return 0;
2538 }
2539 
2540 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2541 {
2542 	int chan;
2543 
2544 	for (chan = 0; chan < 4; chan++) {
2545 		int i;
2546 
2547 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2548 			     (chan * 0x2000) | 0x0200);
2549 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2550 		for (i = 0; i < 6; i++)
2551 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2552 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2553 		if (tg3_wait_macro_done(tp))
2554 			return -EBUSY;
2555 	}
2556 
2557 	return 0;
2558 }
2559 
2560 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2561 {
2562 	u32 reg32, phy9_orig;
2563 	int retries, do_phy_reset, err;
2564 
2565 	retries = 10;
2566 	do_phy_reset = 1;
2567 	do {
2568 		if (do_phy_reset) {
2569 			err = tg3_bmcr_reset(tp);
2570 			if (err)
2571 				return err;
2572 			do_phy_reset = 0;
2573 		}
2574 
2575 		/* Disable transmitter and interrupt.  */
2576 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2577 			continue;
2578 
2579 		reg32 |= 0x3000;
2580 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2581 
2582 		/* Set full-duplex, 1000 mbps.  */
2583 		tg3_writephy(tp, MII_BMCR,
2584 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2585 
2586 		/* Set to master mode.  */
2587 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2588 			continue;
2589 
2590 		tg3_writephy(tp, MII_CTRL1000,
2591 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2592 
2593 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2594 		if (err)
2595 			return err;
2596 
2597 		/* Block the PHY control access.  */
2598 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2599 
2600 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2601 		if (!err)
2602 			break;
2603 	} while (--retries);
2604 
2605 	err = tg3_phy_reset_chanpat(tp);
2606 	if (err)
2607 		return err;
2608 
2609 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2610 
2611 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2612 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2613 
2614 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2615 
2616 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2617 
2618 	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2619 	if (err)
2620 		return err;
2621 
2622 	reg32 &= ~0x3000;
2623 	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2624 
2625 	return 0;
2626 }
2627 
2628 static void tg3_carrier_off(struct tg3 *tp)
2629 {
2630 	netif_carrier_off(tp->dev);
2631 	tp->link_up = false;
2632 }
2633 
2634 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2635 {
2636 	if (tg3_flag(tp, ENABLE_ASF))
2637 		netdev_warn(tp->dev,
2638 			    "Management side-band traffic will be interrupted during phy settings change\n");
2639 }
2640 
2641 /* This will reset the tigon3 PHY if there is no valid
2642  * link unless the FORCE argument is non-zero.
2643  */
2644 static int tg3_phy_reset(struct tg3 *tp)
2645 {
2646 	u32 val, cpmuctrl;
2647 	int err;
2648 
2649 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2650 		val = tr32(GRC_MISC_CFG);
2651 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2652 		udelay(40);
2653 	}
2654 	err  = tg3_readphy(tp, MII_BMSR, &val);
2655 	err |= tg3_readphy(tp, MII_BMSR, &val);
2656 	if (err != 0)
2657 		return -EBUSY;
2658 
2659 	if (netif_running(tp->dev) && tp->link_up) {
2660 		netif_carrier_off(tp->dev);
2661 		tg3_link_report(tp);
2662 	}
2663 
2664 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2665 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2666 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2667 		err = tg3_phy_reset_5703_4_5(tp);
2668 		if (err)
2669 			return err;
2670 		goto out;
2671 	}
2672 
2673 	cpmuctrl = 0;
2674 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2675 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2676 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2677 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2678 			tw32(TG3_CPMU_CTRL,
2679 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2680 	}
2681 
2682 	err = tg3_bmcr_reset(tp);
2683 	if (err)
2684 		return err;
2685 
2686 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2687 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2688 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2689 
2690 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2691 	}
2692 
2693 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2694 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2695 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2696 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2697 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2698 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2699 			udelay(40);
2700 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2701 		}
2702 	}
2703 
2704 	if (tg3_flag(tp, 5717_PLUS) &&
2705 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2706 		return 0;
2707 
2708 	tg3_phy_apply_otp(tp);
2709 
2710 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2711 		tg3_phy_toggle_apd(tp, true);
2712 	else
2713 		tg3_phy_toggle_apd(tp, false);
2714 
2715 out:
2716 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2717 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2718 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2719 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2720 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2721 	}
2722 
2723 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2724 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2725 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2726 	}
2727 
2728 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2729 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2730 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2731 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2732 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2733 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2734 		}
2735 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2736 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2737 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2738 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2739 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2740 				tg3_writephy(tp, MII_TG3_TEST1,
2741 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2742 			} else
2743 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2744 
2745 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2746 		}
2747 	}
2748 
2749 	/* Set Extended packet length bit (bit 14) on all chips that */
2750 	/* support jumbo frames */
2751 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2752 		/* Cannot do read-modify-write on 5401 */
2753 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2754 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2755 		/* Set bit 14 with read-modify-write to preserve other bits */
2756 		err = tg3_phy_auxctl_read(tp,
2757 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2758 		if (!err)
2759 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2760 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2761 	}
2762 
2763 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2764 	 * jumbo frames transmission.
2765 	 */
2766 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2767 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2768 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2769 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2770 	}
2771 
2772 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2773 		/* adjust output voltage */
2774 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2775 	}
2776 
2777 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2778 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2779 
2780 	tg3_phy_toggle_automdix(tp, true);
2781 	tg3_phy_set_wirespeed(tp);
2782 	return 0;
2783 }
2784 
2785 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2786 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2787 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2788 					  TG3_GPIO_MSG_NEED_VAUX)
2789 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2790 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2791 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2792 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2793 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2794 
2795 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2796 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2797 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2798 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2799 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2800 
2801 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2802 {
2803 	u32 status, shift;
2804 
2805 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2806 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2807 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2808 	else
2809 		status = tr32(TG3_CPMU_DRV_STATUS);
2810 
2811 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2812 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2813 	status |= (newstat << shift);
2814 
2815 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2816 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2817 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2818 	else
2819 		tw32(TG3_CPMU_DRV_STATUS, status);
2820 
2821 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2822 }
2823 
2824 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2825 {
2826 	if (!tg3_flag(tp, IS_NIC))
2827 		return 0;
2828 
2829 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2830 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2831 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2832 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2833 			return -EIO;
2834 
2835 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2836 
2837 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2838 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2839 
2840 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2841 	} else {
2842 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2843 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2844 	}
2845 
2846 	return 0;
2847 }
2848 
2849 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2850 {
2851 	u32 grc_local_ctrl;
2852 
2853 	if (!tg3_flag(tp, IS_NIC) ||
2854 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2855 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2856 		return;
2857 
2858 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2859 
2860 	tw32_wait_f(GRC_LOCAL_CTRL,
2861 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2862 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2863 
2864 	tw32_wait_f(GRC_LOCAL_CTRL,
2865 		    grc_local_ctrl,
2866 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2867 
2868 	tw32_wait_f(GRC_LOCAL_CTRL,
2869 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2870 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2871 }
2872 
2873 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2874 {
2875 	if (!tg3_flag(tp, IS_NIC))
2876 		return;
2877 
2878 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2879 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2880 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2881 			    (GRC_LCLCTRL_GPIO_OE0 |
2882 			     GRC_LCLCTRL_GPIO_OE1 |
2883 			     GRC_LCLCTRL_GPIO_OE2 |
2884 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2885 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2886 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2887 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2888 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2889 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2890 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2891 				     GRC_LCLCTRL_GPIO_OE1 |
2892 				     GRC_LCLCTRL_GPIO_OE2 |
2893 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2894 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2895 				     tp->grc_local_ctrl;
2896 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2897 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2898 
2899 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2900 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2901 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2902 
2903 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2904 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2905 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2906 	} else {
2907 		u32 no_gpio2;
2908 		u32 grc_local_ctrl = 0;
2909 
2910 		/* Workaround to prevent overdrawing Amps. */
2911 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2912 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2913 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2914 				    grc_local_ctrl,
2915 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2916 		}
2917 
2918 		/* On 5753 and variants, GPIO2 cannot be used. */
2919 		no_gpio2 = tp->nic_sram_data_cfg &
2920 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2921 
2922 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2923 				  GRC_LCLCTRL_GPIO_OE1 |
2924 				  GRC_LCLCTRL_GPIO_OE2 |
2925 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2926 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2927 		if (no_gpio2) {
2928 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2929 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2930 		}
2931 		tw32_wait_f(GRC_LOCAL_CTRL,
2932 			    tp->grc_local_ctrl | grc_local_ctrl,
2933 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2934 
2935 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2936 
2937 		tw32_wait_f(GRC_LOCAL_CTRL,
2938 			    tp->grc_local_ctrl | grc_local_ctrl,
2939 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2940 
2941 		if (!no_gpio2) {
2942 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2943 			tw32_wait_f(GRC_LOCAL_CTRL,
2944 				    tp->grc_local_ctrl | grc_local_ctrl,
2945 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2946 		}
2947 	}
2948 }
2949 
2950 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2951 {
2952 	u32 msg = 0;
2953 
2954 	/* Serialize power state transitions */
2955 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2956 		return;
2957 
2958 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2959 		msg = TG3_GPIO_MSG_NEED_VAUX;
2960 
2961 	msg = tg3_set_function_status(tp, msg);
2962 
2963 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2964 		goto done;
2965 
2966 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2967 		tg3_pwrsrc_switch_to_vaux(tp);
2968 	else
2969 		tg3_pwrsrc_die_with_vmain(tp);
2970 
2971 done:
2972 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2973 }
2974 
2975 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2976 {
2977 	bool need_vaux = false;
2978 
2979 	/* The GPIOs do something completely different on 57765. */
2980 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2981 		return;
2982 
2983 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2984 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2985 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2986 		tg3_frob_aux_power_5717(tp, include_wol ?
2987 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2988 		return;
2989 	}
2990 
2991 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2992 		struct net_device *dev_peer;
2993 
2994 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2995 
2996 		/* remove_one() may have been run on the peer. */
2997 		if (dev_peer) {
2998 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2999 
3000 			if (tg3_flag(tp_peer, INIT_COMPLETE))
3001 				return;
3002 
3003 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
3004 			    tg3_flag(tp_peer, ENABLE_ASF))
3005 				need_vaux = true;
3006 		}
3007 	}
3008 
3009 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3010 	    tg3_flag(tp, ENABLE_ASF))
3011 		need_vaux = true;
3012 
3013 	if (need_vaux)
3014 		tg3_pwrsrc_switch_to_vaux(tp);
3015 	else
3016 		tg3_pwrsrc_die_with_vmain(tp);
3017 }
3018 
3019 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3020 {
3021 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3022 		return 1;
3023 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3024 		if (speed != SPEED_10)
3025 			return 1;
3026 	} else if (speed == SPEED_10)
3027 		return 1;
3028 
3029 	return 0;
3030 }
3031 
3032 static bool tg3_phy_power_bug(struct tg3 *tp)
3033 {
3034 	switch (tg3_asic_rev(tp)) {
3035 	case ASIC_REV_5700:
3036 	case ASIC_REV_5704:
3037 		return true;
3038 	case ASIC_REV_5780:
3039 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3040 			return true;
3041 		return false;
3042 	case ASIC_REV_5717:
3043 		if (!tp->pci_fn)
3044 			return true;
3045 		return false;
3046 	case ASIC_REV_5719:
3047 	case ASIC_REV_5720:
3048 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3049 		    !tp->pci_fn)
3050 			return true;
3051 		return false;
3052 	}
3053 
3054 	return false;
3055 }
3056 
3057 static bool tg3_phy_led_bug(struct tg3 *tp)
3058 {
3059 	switch (tg3_asic_rev(tp)) {
3060 	case ASIC_REV_5719:
3061 	case ASIC_REV_5720:
3062 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3063 		    !tp->pci_fn)
3064 			return true;
3065 		return false;
3066 	}
3067 
3068 	return false;
3069 }
3070 
3071 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3072 {
3073 	u32 val;
3074 
3075 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3076 		return;
3077 
3078 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3079 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3080 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3081 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3082 
3083 			sg_dig_ctrl |=
3084 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3085 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3086 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3087 		}
3088 		return;
3089 	}
3090 
3091 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3092 		tg3_bmcr_reset(tp);
3093 		val = tr32(GRC_MISC_CFG);
3094 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3095 		udelay(40);
3096 		return;
3097 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3098 		u32 phytest;
3099 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3100 			u32 phy;
3101 
3102 			tg3_writephy(tp, MII_ADVERTISE, 0);
3103 			tg3_writephy(tp, MII_BMCR,
3104 				     BMCR_ANENABLE | BMCR_ANRESTART);
3105 
3106 			tg3_writephy(tp, MII_TG3_FET_TEST,
3107 				     phytest | MII_TG3_FET_SHADOW_EN);
3108 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3109 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3110 				tg3_writephy(tp,
3111 					     MII_TG3_FET_SHDW_AUXMODE4,
3112 					     phy);
3113 			}
3114 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3115 		}
3116 		return;
3117 	} else if (do_low_power) {
3118 		if (!tg3_phy_led_bug(tp))
3119 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3120 				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3121 
3122 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3123 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3124 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3125 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3126 	}
3127 
3128 	/* The PHY should not be powered down on some chips because
3129 	 * of bugs.
3130 	 */
3131 	if (tg3_phy_power_bug(tp))
3132 		return;
3133 
3134 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3135 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3136 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3137 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3138 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3139 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3140 	}
3141 
3142 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3143 }
3144 
3145 /* tp->lock is held. */
3146 static int tg3_nvram_lock(struct tg3 *tp)
3147 {
3148 	if (tg3_flag(tp, NVRAM)) {
3149 		int i;
3150 
3151 		if (tp->nvram_lock_cnt == 0) {
3152 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3153 			for (i = 0; i < 8000; i++) {
3154 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3155 					break;
3156 				udelay(20);
3157 			}
3158 			if (i == 8000) {
3159 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3160 				return -ENODEV;
3161 			}
3162 		}
3163 		tp->nvram_lock_cnt++;
3164 	}
3165 	return 0;
3166 }
3167 
3168 /* tp->lock is held. */
3169 static void tg3_nvram_unlock(struct tg3 *tp)
3170 {
3171 	if (tg3_flag(tp, NVRAM)) {
3172 		if (tp->nvram_lock_cnt > 0)
3173 			tp->nvram_lock_cnt--;
3174 		if (tp->nvram_lock_cnt == 0)
3175 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3176 	}
3177 }
3178 
3179 /* tp->lock is held. */
3180 static void tg3_enable_nvram_access(struct tg3 *tp)
3181 {
3182 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3183 		u32 nvaccess = tr32(NVRAM_ACCESS);
3184 
3185 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3186 	}
3187 }
3188 
3189 /* tp->lock is held. */
3190 static void tg3_disable_nvram_access(struct tg3 *tp)
3191 {
3192 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3193 		u32 nvaccess = tr32(NVRAM_ACCESS);
3194 
3195 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3196 	}
3197 }
3198 
3199 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3200 					u32 offset, u32 *val)
3201 {
3202 	u32 tmp;
3203 	int i;
3204 
3205 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3206 		return -EINVAL;
3207 
3208 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3209 					EEPROM_ADDR_DEVID_MASK |
3210 					EEPROM_ADDR_READ);
3211 	tw32(GRC_EEPROM_ADDR,
3212 	     tmp |
3213 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3214 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3215 	      EEPROM_ADDR_ADDR_MASK) |
3216 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3217 
3218 	for (i = 0; i < 1000; i++) {
3219 		tmp = tr32(GRC_EEPROM_ADDR);
3220 
3221 		if (tmp & EEPROM_ADDR_COMPLETE)
3222 			break;
3223 		msleep(1);
3224 	}
3225 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3226 		return -EBUSY;
3227 
3228 	tmp = tr32(GRC_EEPROM_DATA);
3229 
3230 	/*
3231 	 * The data will always be opposite the native endian
3232 	 * format.  Perform a blind byteswap to compensate.
3233 	 */
3234 	*val = swab32(tmp);
3235 
3236 	return 0;
3237 }
3238 
3239 #define NVRAM_CMD_TIMEOUT 10000
3240 
3241 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3242 {
3243 	int i;
3244 
3245 	tw32(NVRAM_CMD, nvram_cmd);
3246 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3247 		usleep_range(10, 40);
3248 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3249 			udelay(10);
3250 			break;
3251 		}
3252 	}
3253 
3254 	if (i == NVRAM_CMD_TIMEOUT)
3255 		return -EBUSY;
3256 
3257 	return 0;
3258 }
3259 
3260 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3261 {
3262 	if (tg3_flag(tp, NVRAM) &&
3263 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3264 	    tg3_flag(tp, FLASH) &&
3265 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3266 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3267 
3268 		addr = ((addr / tp->nvram_pagesize) <<
3269 			ATMEL_AT45DB0X1B_PAGE_POS) +
3270 		       (addr % tp->nvram_pagesize);
3271 
3272 	return addr;
3273 }
3274 
3275 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3276 {
3277 	if (tg3_flag(tp, NVRAM) &&
3278 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3279 	    tg3_flag(tp, FLASH) &&
3280 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3281 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3282 
3283 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3284 			tp->nvram_pagesize) +
3285 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3286 
3287 	return addr;
3288 }
3289 
3290 /* NOTE: Data read in from NVRAM is byteswapped according to
3291  * the byteswapping settings for all other register accesses.
3292  * tg3 devices are BE devices, so on a BE machine, the data
3293  * returned will be exactly as it is seen in NVRAM.  On a LE
3294  * machine, the 32-bit value will be byteswapped.
3295  */
3296 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3297 {
3298 	int ret;
3299 
3300 	if (!tg3_flag(tp, NVRAM))
3301 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3302 
3303 	offset = tg3_nvram_phys_addr(tp, offset);
3304 
3305 	if (offset > NVRAM_ADDR_MSK)
3306 		return -EINVAL;
3307 
3308 	ret = tg3_nvram_lock(tp);
3309 	if (ret)
3310 		return ret;
3311 
3312 	tg3_enable_nvram_access(tp);
3313 
3314 	tw32(NVRAM_ADDR, offset);
3315 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3316 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3317 
3318 	if (ret == 0)
3319 		*val = tr32(NVRAM_RDDATA);
3320 
3321 	tg3_disable_nvram_access(tp);
3322 
3323 	tg3_nvram_unlock(tp);
3324 
3325 	return ret;
3326 }
3327 
3328 /* Ensures NVRAM data is in bytestream format. */
3329 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3330 {
3331 	u32 v;
3332 	int res = tg3_nvram_read(tp, offset, &v);
3333 	if (!res)
3334 		*val = cpu_to_be32(v);
3335 	return res;
3336 }
3337 
3338 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3339 				    u32 offset, u32 len, u8 *buf)
3340 {
3341 	int i, j, rc = 0;
3342 	u32 val;
3343 
3344 	for (i = 0; i < len; i += 4) {
3345 		u32 addr;
3346 		__be32 data;
3347 
3348 		addr = offset + i;
3349 
3350 		memcpy(&data, buf + i, 4);
3351 
3352 		/*
3353 		 * The SEEPROM interface expects the data to always be opposite
3354 		 * the native endian format.  We accomplish this by reversing
3355 		 * all the operations that would have been performed on the
3356 		 * data from a call to tg3_nvram_read_be32().
3357 		 */
3358 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3359 
3360 		val = tr32(GRC_EEPROM_ADDR);
3361 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3362 
3363 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3364 			EEPROM_ADDR_READ);
3365 		tw32(GRC_EEPROM_ADDR, val |
3366 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3367 			(addr & EEPROM_ADDR_ADDR_MASK) |
3368 			EEPROM_ADDR_START |
3369 			EEPROM_ADDR_WRITE);
3370 
3371 		for (j = 0; j < 1000; j++) {
3372 			val = tr32(GRC_EEPROM_ADDR);
3373 
3374 			if (val & EEPROM_ADDR_COMPLETE)
3375 				break;
3376 			msleep(1);
3377 		}
3378 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3379 			rc = -EBUSY;
3380 			break;
3381 		}
3382 	}
3383 
3384 	return rc;
3385 }
3386 
3387 /* offset and length are dword aligned */
3388 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3389 		u8 *buf)
3390 {
3391 	int ret = 0;
3392 	u32 pagesize = tp->nvram_pagesize;
3393 	u32 pagemask = pagesize - 1;
3394 	u32 nvram_cmd;
3395 	u8 *tmp;
3396 
3397 	tmp = kmalloc(pagesize, GFP_KERNEL);
3398 	if (tmp == NULL)
3399 		return -ENOMEM;
3400 
3401 	while (len) {
3402 		int j;
3403 		u32 phy_addr, page_off, size;
3404 
3405 		phy_addr = offset & ~pagemask;
3406 
3407 		for (j = 0; j < pagesize; j += 4) {
3408 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3409 						  (__be32 *) (tmp + j));
3410 			if (ret)
3411 				break;
3412 		}
3413 		if (ret)
3414 			break;
3415 
3416 		page_off = offset & pagemask;
3417 		size = pagesize;
3418 		if (len < size)
3419 			size = len;
3420 
3421 		len -= size;
3422 
3423 		memcpy(tmp + page_off, buf, size);
3424 
3425 		offset = offset + (pagesize - page_off);
3426 
3427 		tg3_enable_nvram_access(tp);
3428 
3429 		/*
3430 		 * Before we can erase the flash page, we need
3431 		 * to issue a special "write enable" command.
3432 		 */
3433 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3434 
3435 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3436 			break;
3437 
3438 		/* Erase the target page */
3439 		tw32(NVRAM_ADDR, phy_addr);
3440 
3441 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3442 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3443 
3444 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3445 			break;
3446 
3447 		/* Issue another write enable to start the write. */
3448 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3449 
3450 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3451 			break;
3452 
3453 		for (j = 0; j < pagesize; j += 4) {
3454 			__be32 data;
3455 
3456 			data = *((__be32 *) (tmp + j));
3457 
3458 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3459 
3460 			tw32(NVRAM_ADDR, phy_addr + j);
3461 
3462 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3463 				NVRAM_CMD_WR;
3464 
3465 			if (j == 0)
3466 				nvram_cmd |= NVRAM_CMD_FIRST;
3467 			else if (j == (pagesize - 4))
3468 				nvram_cmd |= NVRAM_CMD_LAST;
3469 
3470 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3471 			if (ret)
3472 				break;
3473 		}
3474 		if (ret)
3475 			break;
3476 	}
3477 
3478 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3479 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3480 
3481 	kfree(tmp);
3482 
3483 	return ret;
3484 }
3485 
3486 /* offset and length are dword aligned */
3487 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3488 		u8 *buf)
3489 {
3490 	int i, ret = 0;
3491 
3492 	for (i = 0; i < len; i += 4, offset += 4) {
3493 		u32 page_off, phy_addr, nvram_cmd;
3494 		__be32 data;
3495 
3496 		memcpy(&data, buf + i, 4);
3497 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3498 
3499 		page_off = offset % tp->nvram_pagesize;
3500 
3501 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3502 
3503 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3504 
3505 		if (page_off == 0 || i == 0)
3506 			nvram_cmd |= NVRAM_CMD_FIRST;
3507 		if (page_off == (tp->nvram_pagesize - 4))
3508 			nvram_cmd |= NVRAM_CMD_LAST;
3509 
3510 		if (i == (len - 4))
3511 			nvram_cmd |= NVRAM_CMD_LAST;
3512 
3513 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3514 		    !tg3_flag(tp, FLASH) ||
3515 		    !tg3_flag(tp, 57765_PLUS))
3516 			tw32(NVRAM_ADDR, phy_addr);
3517 
3518 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3519 		    !tg3_flag(tp, 5755_PLUS) &&
3520 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3521 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3522 			u32 cmd;
3523 
3524 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3525 			ret = tg3_nvram_exec_cmd(tp, cmd);
3526 			if (ret)
3527 				break;
3528 		}
3529 		if (!tg3_flag(tp, FLASH)) {
3530 			/* We always do complete word writes to eeprom. */
3531 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3532 		}
3533 
3534 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3535 		if (ret)
3536 			break;
3537 	}
3538 	return ret;
3539 }
3540 
3541 /* offset and length are dword aligned */
3542 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3543 {
3544 	int ret;
3545 
3546 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3547 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3548 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3549 		udelay(40);
3550 	}
3551 
3552 	if (!tg3_flag(tp, NVRAM)) {
3553 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3554 	} else {
3555 		u32 grc_mode;
3556 
3557 		ret = tg3_nvram_lock(tp);
3558 		if (ret)
3559 			return ret;
3560 
3561 		tg3_enable_nvram_access(tp);
3562 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3563 			tw32(NVRAM_WRITE1, 0x406);
3564 
3565 		grc_mode = tr32(GRC_MODE);
3566 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3567 
3568 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3569 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3570 				buf);
3571 		} else {
3572 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3573 				buf);
3574 		}
3575 
3576 		grc_mode = tr32(GRC_MODE);
3577 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3578 
3579 		tg3_disable_nvram_access(tp);
3580 		tg3_nvram_unlock(tp);
3581 	}
3582 
3583 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3584 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3585 		udelay(40);
3586 	}
3587 
3588 	return ret;
3589 }
3590 
3591 #define RX_CPU_SCRATCH_BASE	0x30000
3592 #define RX_CPU_SCRATCH_SIZE	0x04000
3593 #define TX_CPU_SCRATCH_BASE	0x34000
3594 #define TX_CPU_SCRATCH_SIZE	0x04000
3595 
3596 /* tp->lock is held. */
3597 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3598 {
3599 	int i;
3600 	const int iters = 10000;
3601 
3602 	for (i = 0; i < iters; i++) {
3603 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3604 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3605 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3606 			break;
3607 		if (pci_channel_offline(tp->pdev))
3608 			return -EBUSY;
3609 	}
3610 
3611 	return (i == iters) ? -EBUSY : 0;
3612 }
3613 
3614 /* tp->lock is held. */
3615 static int tg3_rxcpu_pause(struct tg3 *tp)
3616 {
3617 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3618 
3619 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3620 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3621 	udelay(10);
3622 
3623 	return rc;
3624 }
3625 
3626 /* tp->lock is held. */
3627 static int tg3_txcpu_pause(struct tg3 *tp)
3628 {
3629 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3630 }
3631 
3632 /* tp->lock is held. */
3633 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3634 {
3635 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3636 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3637 }
3638 
3639 /* tp->lock is held. */
3640 static void tg3_rxcpu_resume(struct tg3 *tp)
3641 {
3642 	tg3_resume_cpu(tp, RX_CPU_BASE);
3643 }
3644 
3645 /* tp->lock is held. */
3646 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3647 {
3648 	int rc;
3649 
3650 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3651 
3652 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3653 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3654 
3655 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3656 		return 0;
3657 	}
3658 	if (cpu_base == RX_CPU_BASE) {
3659 		rc = tg3_rxcpu_pause(tp);
3660 	} else {
3661 		/*
3662 		 * There is only an Rx CPU for the 5750 derivative in the
3663 		 * BCM4785.
3664 		 */
3665 		if (tg3_flag(tp, IS_SSB_CORE))
3666 			return 0;
3667 
3668 		rc = tg3_txcpu_pause(tp);
3669 	}
3670 
3671 	if (rc) {
3672 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3673 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3674 		return -ENODEV;
3675 	}
3676 
3677 	/* Clear firmware's nvram arbitration. */
3678 	if (tg3_flag(tp, NVRAM))
3679 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3680 	return 0;
3681 }
3682 
3683 static int tg3_fw_data_len(struct tg3 *tp,
3684 			   const struct tg3_firmware_hdr *fw_hdr)
3685 {
3686 	int fw_len;
3687 
3688 	/* Non fragmented firmware have one firmware header followed by a
3689 	 * contiguous chunk of data to be written. The length field in that
3690 	 * header is not the length of data to be written but the complete
3691 	 * length of the bss. The data length is determined based on
3692 	 * tp->fw->size minus headers.
3693 	 *
3694 	 * Fragmented firmware have a main header followed by multiple
3695 	 * fragments. Each fragment is identical to non fragmented firmware
3696 	 * with a firmware header followed by a contiguous chunk of data. In
3697 	 * the main header, the length field is unused and set to 0xffffffff.
3698 	 * In each fragment header the length is the entire size of that
3699 	 * fragment i.e. fragment data + header length. Data length is
3700 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3701 	 */
3702 	if (tp->fw_len == 0xffffffff)
3703 		fw_len = be32_to_cpu(fw_hdr->len);
3704 	else
3705 		fw_len = tp->fw->size;
3706 
3707 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3708 }
3709 
3710 /* tp->lock is held. */
3711 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3712 				 u32 cpu_scratch_base, int cpu_scratch_size,
3713 				 const struct tg3_firmware_hdr *fw_hdr)
3714 {
3715 	int err, i;
3716 	void (*write_op)(struct tg3 *, u32, u32);
3717 	int total_len = tp->fw->size;
3718 
3719 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3720 		netdev_err(tp->dev,
3721 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3722 			   __func__);
3723 		return -EINVAL;
3724 	}
3725 
3726 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3727 		write_op = tg3_write_mem;
3728 	else
3729 		write_op = tg3_write_indirect_reg32;
3730 
3731 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3732 		/* It is possible that bootcode is still loading at this point.
3733 		 * Get the nvram lock first before halting the cpu.
3734 		 */
3735 		int lock_err = tg3_nvram_lock(tp);
3736 		err = tg3_halt_cpu(tp, cpu_base);
3737 		if (!lock_err)
3738 			tg3_nvram_unlock(tp);
3739 		if (err)
3740 			goto out;
3741 
3742 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3743 			write_op(tp, cpu_scratch_base + i, 0);
3744 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3745 		tw32(cpu_base + CPU_MODE,
3746 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3747 	} else {
3748 		/* Subtract additional main header for fragmented firmware and
3749 		 * advance to the first fragment
3750 		 */
3751 		total_len -= TG3_FW_HDR_LEN;
3752 		fw_hdr++;
3753 	}
3754 
3755 	do {
3756 		u32 *fw_data = (u32 *)(fw_hdr + 1);
3757 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3758 			write_op(tp, cpu_scratch_base +
3759 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3760 				     (i * sizeof(u32)),
3761 				 be32_to_cpu(fw_data[i]));
3762 
3763 		total_len -= be32_to_cpu(fw_hdr->len);
3764 
3765 		/* Advance to next fragment */
3766 		fw_hdr = (struct tg3_firmware_hdr *)
3767 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3768 	} while (total_len > 0);
3769 
3770 	err = 0;
3771 
3772 out:
3773 	return err;
3774 }
3775 
3776 /* tp->lock is held. */
3777 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3778 {
3779 	int i;
3780 	const int iters = 5;
3781 
3782 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3783 	tw32_f(cpu_base + CPU_PC, pc);
3784 
3785 	for (i = 0; i < iters; i++) {
3786 		if (tr32(cpu_base + CPU_PC) == pc)
3787 			break;
3788 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3789 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3790 		tw32_f(cpu_base + CPU_PC, pc);
3791 		udelay(1000);
3792 	}
3793 
3794 	return (i == iters) ? -EBUSY : 0;
3795 }
3796 
3797 /* tp->lock is held. */
3798 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3799 {
3800 	const struct tg3_firmware_hdr *fw_hdr;
3801 	int err;
3802 
3803 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3804 
3805 	/* Firmware blob starts with version numbers, followed by
3806 	   start address and length. We are setting complete length.
3807 	   length = end_address_of_bss - start_address_of_text.
3808 	   Remainder is the blob to be loaded contiguously
3809 	   from start address. */
3810 
3811 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3812 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3813 				    fw_hdr);
3814 	if (err)
3815 		return err;
3816 
3817 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3818 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3819 				    fw_hdr);
3820 	if (err)
3821 		return err;
3822 
3823 	/* Now startup only the RX cpu. */
3824 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3825 				       be32_to_cpu(fw_hdr->base_addr));
3826 	if (err) {
3827 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3828 			   "should be %08x\n", __func__,
3829 			   tr32(RX_CPU_BASE + CPU_PC),
3830 				be32_to_cpu(fw_hdr->base_addr));
3831 		return -ENODEV;
3832 	}
3833 
3834 	tg3_rxcpu_resume(tp);
3835 
3836 	return 0;
3837 }
3838 
3839 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3840 {
3841 	const int iters = 1000;
3842 	int i;
3843 	u32 val;
3844 
3845 	/* Wait for boot code to complete initialization and enter service
3846 	 * loop. It is then safe to download service patches
3847 	 */
3848 	for (i = 0; i < iters; i++) {
3849 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3850 			break;
3851 
3852 		udelay(10);
3853 	}
3854 
3855 	if (i == iters) {
3856 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3857 		return -EBUSY;
3858 	}
3859 
3860 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3861 	if (val & 0xff) {
3862 		netdev_warn(tp->dev,
3863 			    "Other patches exist. Not downloading EEE patch\n");
3864 		return -EEXIST;
3865 	}
3866 
3867 	return 0;
3868 }
3869 
3870 /* tp->lock is held. */
3871 static void tg3_load_57766_firmware(struct tg3 *tp)
3872 {
3873 	struct tg3_firmware_hdr *fw_hdr;
3874 
3875 	if (!tg3_flag(tp, NO_NVRAM))
3876 		return;
3877 
3878 	if (tg3_validate_rxcpu_state(tp))
3879 		return;
3880 
3881 	if (!tp->fw)
3882 		return;
3883 
3884 	/* This firmware blob has a different format than older firmware
3885 	 * releases as given below. The main difference is we have fragmented
3886 	 * data to be written to non-contiguous locations.
3887 	 *
3888 	 * In the beginning we have a firmware header identical to other
3889 	 * firmware which consists of version, base addr and length. The length
3890 	 * here is unused and set to 0xffffffff.
3891 	 *
3892 	 * This is followed by a series of firmware fragments which are
3893 	 * individually identical to previous firmware. i.e. they have the
3894 	 * firmware header and followed by data for that fragment. The version
3895 	 * field of the individual fragment header is unused.
3896 	 */
3897 
3898 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3899 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3900 		return;
3901 
3902 	if (tg3_rxcpu_pause(tp))
3903 		return;
3904 
3905 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3906 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3907 
3908 	tg3_rxcpu_resume(tp);
3909 }
3910 
3911 /* tp->lock is held. */
3912 static int tg3_load_tso_firmware(struct tg3 *tp)
3913 {
3914 	const struct tg3_firmware_hdr *fw_hdr;
3915 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3916 	int err;
3917 
3918 	if (!tg3_flag(tp, FW_TSO))
3919 		return 0;
3920 
3921 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3922 
3923 	/* Firmware blob starts with version numbers, followed by
3924 	   start address and length. We are setting complete length.
3925 	   length = end_address_of_bss - start_address_of_text.
3926 	   Remainder is the blob to be loaded contiguously
3927 	   from start address. */
3928 
3929 	cpu_scratch_size = tp->fw_len;
3930 
3931 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3932 		cpu_base = RX_CPU_BASE;
3933 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3934 	} else {
3935 		cpu_base = TX_CPU_BASE;
3936 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3937 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3938 	}
3939 
3940 	err = tg3_load_firmware_cpu(tp, cpu_base,
3941 				    cpu_scratch_base, cpu_scratch_size,
3942 				    fw_hdr);
3943 	if (err)
3944 		return err;
3945 
3946 	/* Now startup the cpu. */
3947 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3948 				       be32_to_cpu(fw_hdr->base_addr));
3949 	if (err) {
3950 		netdev_err(tp->dev,
3951 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3952 			   __func__, tr32(cpu_base + CPU_PC),
3953 			   be32_to_cpu(fw_hdr->base_addr));
3954 		return -ENODEV;
3955 	}
3956 
3957 	tg3_resume_cpu(tp, cpu_base);
3958 	return 0;
3959 }
3960 
3961 /* tp->lock is held. */
3962 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3963 {
3964 	u32 addr_high, addr_low;
3965 
3966 	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3967 	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3968 		    (mac_addr[4] <<  8) | mac_addr[5]);
3969 
3970 	if (index < 4) {
3971 		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3972 		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3973 	} else {
3974 		index -= 4;
3975 		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3976 		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3977 	}
3978 }
3979 
3980 /* tp->lock is held. */
3981 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3982 {
3983 	u32 addr_high;
3984 	int i;
3985 
3986 	for (i = 0; i < 4; i++) {
3987 		if (i == 1 && skip_mac_1)
3988 			continue;
3989 		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3990 	}
3991 
3992 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3993 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3994 		for (i = 4; i < 16; i++)
3995 			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3996 	}
3997 
3998 	addr_high = (tp->dev->dev_addr[0] +
3999 		     tp->dev->dev_addr[1] +
4000 		     tp->dev->dev_addr[2] +
4001 		     tp->dev->dev_addr[3] +
4002 		     tp->dev->dev_addr[4] +
4003 		     tp->dev->dev_addr[5]) &
4004 		TX_BACKOFF_SEED_MASK;
4005 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
4006 }
4007 
4008 static void tg3_enable_register_access(struct tg3 *tp)
4009 {
4010 	/*
4011 	 * Make sure register accesses (indirect or otherwise) will function
4012 	 * correctly.
4013 	 */
4014 	pci_write_config_dword(tp->pdev,
4015 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4016 }
4017 
4018 static int tg3_power_up(struct tg3 *tp)
4019 {
4020 	int err;
4021 
4022 	tg3_enable_register_access(tp);
4023 
4024 	err = pci_set_power_state(tp->pdev, PCI_D0);
4025 	if (!err) {
4026 		/* Switch out of Vaux if it is a NIC */
4027 		tg3_pwrsrc_switch_to_vmain(tp);
4028 	} else {
4029 		netdev_err(tp->dev, "Transition to D0 failed\n");
4030 	}
4031 
4032 	return err;
4033 }
4034 
4035 static int tg3_setup_phy(struct tg3 *, bool);
4036 
4037 static int tg3_power_down_prepare(struct tg3 *tp)
4038 {
4039 	u32 misc_host_ctrl;
4040 	bool device_should_wake, do_low_power;
4041 
4042 	tg3_enable_register_access(tp);
4043 
4044 	/* Restore the CLKREQ setting. */
4045 	if (tg3_flag(tp, CLKREQ_BUG))
4046 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4047 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4048 
4049 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4050 	tw32(TG3PCI_MISC_HOST_CTRL,
4051 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4052 
4053 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4054 			     tg3_flag(tp, WOL_ENABLE);
4055 
4056 	if (tg3_flag(tp, USE_PHYLIB)) {
4057 		do_low_power = false;
4058 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4059 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4060 			struct phy_device *phydev;
4061 			u32 phyid, advertising;
4062 
4063 			phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4064 
4065 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4066 
4067 			tp->link_config.speed = phydev->speed;
4068 			tp->link_config.duplex = phydev->duplex;
4069 			tp->link_config.autoneg = phydev->autoneg;
4070 			tp->link_config.advertising = phydev->advertising;
4071 
4072 			advertising = ADVERTISED_TP |
4073 				      ADVERTISED_Pause |
4074 				      ADVERTISED_Autoneg |
4075 				      ADVERTISED_10baseT_Half;
4076 
4077 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4078 				if (tg3_flag(tp, WOL_SPEED_100MB))
4079 					advertising |=
4080 						ADVERTISED_100baseT_Half |
4081 						ADVERTISED_100baseT_Full |
4082 						ADVERTISED_10baseT_Full;
4083 				else
4084 					advertising |= ADVERTISED_10baseT_Full;
4085 			}
4086 
4087 			phydev->advertising = advertising;
4088 
4089 			phy_start_aneg(phydev);
4090 
4091 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4092 			if (phyid != PHY_ID_BCMAC131) {
4093 				phyid &= PHY_BCM_OUI_MASK;
4094 				if (phyid == PHY_BCM_OUI_1 ||
4095 				    phyid == PHY_BCM_OUI_2 ||
4096 				    phyid == PHY_BCM_OUI_3)
4097 					do_low_power = true;
4098 			}
4099 		}
4100 	} else {
4101 		do_low_power = true;
4102 
4103 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4104 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4105 
4106 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4107 			tg3_setup_phy(tp, false);
4108 	}
4109 
4110 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4111 		u32 val;
4112 
4113 		val = tr32(GRC_VCPU_EXT_CTRL);
4114 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4115 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4116 		int i;
4117 		u32 val;
4118 
4119 		for (i = 0; i < 200; i++) {
4120 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4121 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4122 				break;
4123 			msleep(1);
4124 		}
4125 	}
4126 	if (tg3_flag(tp, WOL_CAP))
4127 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4128 						     WOL_DRV_STATE_SHUTDOWN |
4129 						     WOL_DRV_WOL |
4130 						     WOL_SET_MAGIC_PKT);
4131 
4132 	if (device_should_wake) {
4133 		u32 mac_mode;
4134 
4135 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4136 			if (do_low_power &&
4137 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4138 				tg3_phy_auxctl_write(tp,
4139 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4140 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4141 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4142 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4143 				udelay(40);
4144 			}
4145 
4146 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4147 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4148 			else if (tp->phy_flags &
4149 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4150 				if (tp->link_config.active_speed == SPEED_1000)
4151 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4152 				else
4153 					mac_mode = MAC_MODE_PORT_MODE_MII;
4154 			} else
4155 				mac_mode = MAC_MODE_PORT_MODE_MII;
4156 
4157 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4158 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4159 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4160 					     SPEED_100 : SPEED_10;
4161 				if (tg3_5700_link_polarity(tp, speed))
4162 					mac_mode |= MAC_MODE_LINK_POLARITY;
4163 				else
4164 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4165 			}
4166 		} else {
4167 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4168 		}
4169 
4170 		if (!tg3_flag(tp, 5750_PLUS))
4171 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4172 
4173 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4174 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4175 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4176 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4177 
4178 		if (tg3_flag(tp, ENABLE_APE))
4179 			mac_mode |= MAC_MODE_APE_TX_EN |
4180 				    MAC_MODE_APE_RX_EN |
4181 				    MAC_MODE_TDE_ENABLE;
4182 
4183 		tw32_f(MAC_MODE, mac_mode);
4184 		udelay(100);
4185 
4186 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4187 		udelay(10);
4188 	}
4189 
4190 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4191 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4192 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4193 		u32 base_val;
4194 
4195 		base_val = tp->pci_clock_ctrl;
4196 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4197 			     CLOCK_CTRL_TXCLK_DISABLE);
4198 
4199 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4200 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4201 	} else if (tg3_flag(tp, 5780_CLASS) ||
4202 		   tg3_flag(tp, CPMU_PRESENT) ||
4203 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4204 		/* do nothing */
4205 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4206 		u32 newbits1, newbits2;
4207 
4208 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4209 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4210 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4211 				    CLOCK_CTRL_TXCLK_DISABLE |
4212 				    CLOCK_CTRL_ALTCLK);
4213 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4214 		} else if (tg3_flag(tp, 5705_PLUS)) {
4215 			newbits1 = CLOCK_CTRL_625_CORE;
4216 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4217 		} else {
4218 			newbits1 = CLOCK_CTRL_ALTCLK;
4219 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4220 		}
4221 
4222 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4223 			    40);
4224 
4225 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4226 			    40);
4227 
4228 		if (!tg3_flag(tp, 5705_PLUS)) {
4229 			u32 newbits3;
4230 
4231 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4232 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4233 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4234 					    CLOCK_CTRL_TXCLK_DISABLE |
4235 					    CLOCK_CTRL_44MHZ_CORE);
4236 			} else {
4237 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4238 			}
4239 
4240 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4241 				    tp->pci_clock_ctrl | newbits3, 40);
4242 		}
4243 	}
4244 
4245 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4246 		tg3_power_down_phy(tp, do_low_power);
4247 
4248 	tg3_frob_aux_power(tp, true);
4249 
4250 	/* Workaround for unstable PLL clock */
4251 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4252 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4253 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4254 		u32 val = tr32(0x7d00);
4255 
4256 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4257 		tw32(0x7d00, val);
4258 		if (!tg3_flag(tp, ENABLE_ASF)) {
4259 			int err;
4260 
4261 			err = tg3_nvram_lock(tp);
4262 			tg3_halt_cpu(tp, RX_CPU_BASE);
4263 			if (!err)
4264 				tg3_nvram_unlock(tp);
4265 		}
4266 	}
4267 
4268 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4269 
4270 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4271 
4272 	return 0;
4273 }
4274 
4275 static void tg3_power_down(struct tg3 *tp)
4276 {
4277 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4278 	pci_set_power_state(tp->pdev, PCI_D3hot);
4279 }
4280 
4281 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4282 {
4283 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4284 	case MII_TG3_AUX_STAT_10HALF:
4285 		*speed = SPEED_10;
4286 		*duplex = DUPLEX_HALF;
4287 		break;
4288 
4289 	case MII_TG3_AUX_STAT_10FULL:
4290 		*speed = SPEED_10;
4291 		*duplex = DUPLEX_FULL;
4292 		break;
4293 
4294 	case MII_TG3_AUX_STAT_100HALF:
4295 		*speed = SPEED_100;
4296 		*duplex = DUPLEX_HALF;
4297 		break;
4298 
4299 	case MII_TG3_AUX_STAT_100FULL:
4300 		*speed = SPEED_100;
4301 		*duplex = DUPLEX_FULL;
4302 		break;
4303 
4304 	case MII_TG3_AUX_STAT_1000HALF:
4305 		*speed = SPEED_1000;
4306 		*duplex = DUPLEX_HALF;
4307 		break;
4308 
4309 	case MII_TG3_AUX_STAT_1000FULL:
4310 		*speed = SPEED_1000;
4311 		*duplex = DUPLEX_FULL;
4312 		break;
4313 
4314 	default:
4315 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4316 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4317 				 SPEED_10;
4318 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4319 				  DUPLEX_HALF;
4320 			break;
4321 		}
4322 		*speed = SPEED_UNKNOWN;
4323 		*duplex = DUPLEX_UNKNOWN;
4324 		break;
4325 	}
4326 }
4327 
4328 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4329 {
4330 	int err = 0;
4331 	u32 val, new_adv;
4332 
4333 	new_adv = ADVERTISE_CSMA;
4334 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4335 	new_adv |= mii_advertise_flowctrl(flowctrl);
4336 
4337 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4338 	if (err)
4339 		goto done;
4340 
4341 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4342 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4343 
4344 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4345 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4346 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4347 
4348 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4349 		if (err)
4350 			goto done;
4351 	}
4352 
4353 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4354 		goto done;
4355 
4356 	tw32(TG3_CPMU_EEE_MODE,
4357 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4358 
4359 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4360 	if (!err) {
4361 		u32 err2;
4362 
4363 		val = 0;
4364 		/* Advertise 100-BaseTX EEE ability */
4365 		if (advertise & ADVERTISED_100baseT_Full)
4366 			val |= MDIO_AN_EEE_ADV_100TX;
4367 		/* Advertise 1000-BaseT EEE ability */
4368 		if (advertise & ADVERTISED_1000baseT_Full)
4369 			val |= MDIO_AN_EEE_ADV_1000T;
4370 
4371 		if (!tp->eee.eee_enabled) {
4372 			val = 0;
4373 			tp->eee.advertised = 0;
4374 		} else {
4375 			tp->eee.advertised = advertise &
4376 					     (ADVERTISED_100baseT_Full |
4377 					      ADVERTISED_1000baseT_Full);
4378 		}
4379 
4380 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4381 		if (err)
4382 			val = 0;
4383 
4384 		switch (tg3_asic_rev(tp)) {
4385 		case ASIC_REV_5717:
4386 		case ASIC_REV_57765:
4387 		case ASIC_REV_57766:
4388 		case ASIC_REV_5719:
4389 			/* If we advertised any eee advertisements above... */
4390 			if (val)
4391 				val = MII_TG3_DSP_TAP26_ALNOKO |
4392 				      MII_TG3_DSP_TAP26_RMRXSTO |
4393 				      MII_TG3_DSP_TAP26_OPCSINPT;
4394 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4395 			/* Fall through */
4396 		case ASIC_REV_5720:
4397 		case ASIC_REV_5762:
4398 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4399 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4400 						 MII_TG3_DSP_CH34TP2_HIBW01);
4401 		}
4402 
4403 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4404 		if (!err)
4405 			err = err2;
4406 	}
4407 
4408 done:
4409 	return err;
4410 }
4411 
4412 static void tg3_phy_copper_begin(struct tg3 *tp)
4413 {
4414 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4415 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4416 		u32 adv, fc;
4417 
4418 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4419 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4420 			adv = ADVERTISED_10baseT_Half |
4421 			      ADVERTISED_10baseT_Full;
4422 			if (tg3_flag(tp, WOL_SPEED_100MB))
4423 				adv |= ADVERTISED_100baseT_Half |
4424 				       ADVERTISED_100baseT_Full;
4425 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4426 				if (!(tp->phy_flags &
4427 				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
4428 					adv |= ADVERTISED_1000baseT_Half;
4429 				adv |= ADVERTISED_1000baseT_Full;
4430 			}
4431 
4432 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4433 		} else {
4434 			adv = tp->link_config.advertising;
4435 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4436 				adv &= ~(ADVERTISED_1000baseT_Half |
4437 					 ADVERTISED_1000baseT_Full);
4438 
4439 			fc = tp->link_config.flowctrl;
4440 		}
4441 
4442 		tg3_phy_autoneg_cfg(tp, adv, fc);
4443 
4444 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4445 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4446 			/* Normally during power down we want to autonegotiate
4447 			 * the lowest possible speed for WOL. However, to avoid
4448 			 * link flap, we leave it untouched.
4449 			 */
4450 			return;
4451 		}
4452 
4453 		tg3_writephy(tp, MII_BMCR,
4454 			     BMCR_ANENABLE | BMCR_ANRESTART);
4455 	} else {
4456 		int i;
4457 		u32 bmcr, orig_bmcr;
4458 
4459 		tp->link_config.active_speed = tp->link_config.speed;
4460 		tp->link_config.active_duplex = tp->link_config.duplex;
4461 
4462 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4463 			/* With autoneg disabled, 5715 only links up when the
4464 			 * advertisement register has the configured speed
4465 			 * enabled.
4466 			 */
4467 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4468 		}
4469 
4470 		bmcr = 0;
4471 		switch (tp->link_config.speed) {
4472 		default:
4473 		case SPEED_10:
4474 			break;
4475 
4476 		case SPEED_100:
4477 			bmcr |= BMCR_SPEED100;
4478 			break;
4479 
4480 		case SPEED_1000:
4481 			bmcr |= BMCR_SPEED1000;
4482 			break;
4483 		}
4484 
4485 		if (tp->link_config.duplex == DUPLEX_FULL)
4486 			bmcr |= BMCR_FULLDPLX;
4487 
4488 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4489 		    (bmcr != orig_bmcr)) {
4490 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4491 			for (i = 0; i < 1500; i++) {
4492 				u32 tmp;
4493 
4494 				udelay(10);
4495 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4496 				    tg3_readphy(tp, MII_BMSR, &tmp))
4497 					continue;
4498 				if (!(tmp & BMSR_LSTATUS)) {
4499 					udelay(40);
4500 					break;
4501 				}
4502 			}
4503 			tg3_writephy(tp, MII_BMCR, bmcr);
4504 			udelay(40);
4505 		}
4506 	}
4507 }
4508 
4509 static int tg3_phy_pull_config(struct tg3 *tp)
4510 {
4511 	int err;
4512 	u32 val;
4513 
4514 	err = tg3_readphy(tp, MII_BMCR, &val);
4515 	if (err)
4516 		goto done;
4517 
4518 	if (!(val & BMCR_ANENABLE)) {
4519 		tp->link_config.autoneg = AUTONEG_DISABLE;
4520 		tp->link_config.advertising = 0;
4521 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4522 
4523 		err = -EIO;
4524 
4525 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4526 		case 0:
4527 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4528 				goto done;
4529 
4530 			tp->link_config.speed = SPEED_10;
4531 			break;
4532 		case BMCR_SPEED100:
4533 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4534 				goto done;
4535 
4536 			tp->link_config.speed = SPEED_100;
4537 			break;
4538 		case BMCR_SPEED1000:
4539 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4540 				tp->link_config.speed = SPEED_1000;
4541 				break;
4542 			}
4543 			/* Fall through */
4544 		default:
4545 			goto done;
4546 		}
4547 
4548 		if (val & BMCR_FULLDPLX)
4549 			tp->link_config.duplex = DUPLEX_FULL;
4550 		else
4551 			tp->link_config.duplex = DUPLEX_HALF;
4552 
4553 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4554 
4555 		err = 0;
4556 		goto done;
4557 	}
4558 
4559 	tp->link_config.autoneg = AUTONEG_ENABLE;
4560 	tp->link_config.advertising = ADVERTISED_Autoneg;
4561 	tg3_flag_set(tp, PAUSE_AUTONEG);
4562 
4563 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4564 		u32 adv;
4565 
4566 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4567 		if (err)
4568 			goto done;
4569 
4570 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4571 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4572 
4573 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4574 	} else {
4575 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4576 	}
4577 
4578 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4579 		u32 adv;
4580 
4581 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4582 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4583 			if (err)
4584 				goto done;
4585 
4586 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4587 		} else {
4588 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4589 			if (err)
4590 				goto done;
4591 
4592 			adv = tg3_decode_flowctrl_1000X(val);
4593 			tp->link_config.flowctrl = adv;
4594 
4595 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4596 			adv = mii_adv_to_ethtool_adv_x(val);
4597 		}
4598 
4599 		tp->link_config.advertising |= adv;
4600 	}
4601 
4602 done:
4603 	return err;
4604 }
4605 
4606 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4607 {
4608 	int err;
4609 
4610 	/* Turn off tap power management. */
4611 	/* Set Extended packet length bit */
4612 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4613 
4614 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4615 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4616 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4617 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4618 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4619 
4620 	udelay(40);
4621 
4622 	return err;
4623 }
4624 
4625 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4626 {
4627 	struct ethtool_eee eee;
4628 
4629 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4630 		return true;
4631 
4632 	tg3_eee_pull_config(tp, &eee);
4633 
4634 	if (tp->eee.eee_enabled) {
4635 		if (tp->eee.advertised != eee.advertised ||
4636 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4637 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4638 			return false;
4639 	} else {
4640 		/* EEE is disabled but we're advertising */
4641 		if (eee.advertised)
4642 			return false;
4643 	}
4644 
4645 	return true;
4646 }
4647 
4648 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4649 {
4650 	u32 advmsk, tgtadv, advertising;
4651 
4652 	advertising = tp->link_config.advertising;
4653 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4654 
4655 	advmsk = ADVERTISE_ALL;
4656 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4657 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4658 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4659 	}
4660 
4661 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4662 		return false;
4663 
4664 	if ((*lcladv & advmsk) != tgtadv)
4665 		return false;
4666 
4667 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4668 		u32 tg3_ctrl;
4669 
4670 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4671 
4672 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4673 			return false;
4674 
4675 		if (tgtadv &&
4676 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4677 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4678 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4679 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4680 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4681 		} else {
4682 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4683 		}
4684 
4685 		if (tg3_ctrl != tgtadv)
4686 			return false;
4687 	}
4688 
4689 	return true;
4690 }
4691 
4692 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4693 {
4694 	u32 lpeth = 0;
4695 
4696 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4697 		u32 val;
4698 
4699 		if (tg3_readphy(tp, MII_STAT1000, &val))
4700 			return false;
4701 
4702 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4703 	}
4704 
4705 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4706 		return false;
4707 
4708 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4709 	tp->link_config.rmt_adv = lpeth;
4710 
4711 	return true;
4712 }
4713 
4714 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4715 {
4716 	if (curr_link_up != tp->link_up) {
4717 		if (curr_link_up) {
4718 			netif_carrier_on(tp->dev);
4719 		} else {
4720 			netif_carrier_off(tp->dev);
4721 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4722 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4723 		}
4724 
4725 		tg3_link_report(tp);
4726 		return true;
4727 	}
4728 
4729 	return false;
4730 }
4731 
4732 static void tg3_clear_mac_status(struct tg3 *tp)
4733 {
4734 	tw32(MAC_EVENT, 0);
4735 
4736 	tw32_f(MAC_STATUS,
4737 	       MAC_STATUS_SYNC_CHANGED |
4738 	       MAC_STATUS_CFG_CHANGED |
4739 	       MAC_STATUS_MI_COMPLETION |
4740 	       MAC_STATUS_LNKSTATE_CHANGED);
4741 	udelay(40);
4742 }
4743 
4744 static void tg3_setup_eee(struct tg3 *tp)
4745 {
4746 	u32 val;
4747 
4748 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4749 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4750 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4751 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4752 
4753 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4754 
4755 	tw32_f(TG3_CPMU_EEE_CTRL,
4756 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4757 
4758 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4759 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4760 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4761 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4762 
4763 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4764 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4765 
4766 	if (tg3_flag(tp, ENABLE_APE))
4767 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4768 
4769 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4770 
4771 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4772 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4773 	       (tp->eee.tx_lpi_timer & 0xffff));
4774 
4775 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4776 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4777 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4778 }
4779 
4780 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4781 {
4782 	bool current_link_up;
4783 	u32 bmsr, val;
4784 	u32 lcl_adv, rmt_adv;
4785 	u16 current_speed;
4786 	u8 current_duplex;
4787 	int i, err;
4788 
4789 	tg3_clear_mac_status(tp);
4790 
4791 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4792 		tw32_f(MAC_MI_MODE,
4793 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4794 		udelay(80);
4795 	}
4796 
4797 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4798 
4799 	/* Some third-party PHYs need to be reset on link going
4800 	 * down.
4801 	 */
4802 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4803 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4804 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4805 	    tp->link_up) {
4806 		tg3_readphy(tp, MII_BMSR, &bmsr);
4807 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4808 		    !(bmsr & BMSR_LSTATUS))
4809 			force_reset = true;
4810 	}
4811 	if (force_reset)
4812 		tg3_phy_reset(tp);
4813 
4814 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4815 		tg3_readphy(tp, MII_BMSR, &bmsr);
4816 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4817 		    !tg3_flag(tp, INIT_COMPLETE))
4818 			bmsr = 0;
4819 
4820 		if (!(bmsr & BMSR_LSTATUS)) {
4821 			err = tg3_init_5401phy_dsp(tp);
4822 			if (err)
4823 				return err;
4824 
4825 			tg3_readphy(tp, MII_BMSR, &bmsr);
4826 			for (i = 0; i < 1000; i++) {
4827 				udelay(10);
4828 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4829 				    (bmsr & BMSR_LSTATUS)) {
4830 					udelay(40);
4831 					break;
4832 				}
4833 			}
4834 
4835 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4836 			    TG3_PHY_REV_BCM5401_B0 &&
4837 			    !(bmsr & BMSR_LSTATUS) &&
4838 			    tp->link_config.active_speed == SPEED_1000) {
4839 				err = tg3_phy_reset(tp);
4840 				if (!err)
4841 					err = tg3_init_5401phy_dsp(tp);
4842 				if (err)
4843 					return err;
4844 			}
4845 		}
4846 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4847 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4848 		/* 5701 {A0,B0} CRC bug workaround */
4849 		tg3_writephy(tp, 0x15, 0x0a75);
4850 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4851 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4852 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4853 	}
4854 
4855 	/* Clear pending interrupts... */
4856 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4857 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4858 
4859 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4860 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4861 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4862 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4863 
4864 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4865 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4866 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4867 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4868 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4869 		else
4870 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4871 	}
4872 
4873 	current_link_up = false;
4874 	current_speed = SPEED_UNKNOWN;
4875 	current_duplex = DUPLEX_UNKNOWN;
4876 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4877 	tp->link_config.rmt_adv = 0;
4878 
4879 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4880 		err = tg3_phy_auxctl_read(tp,
4881 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4882 					  &val);
4883 		if (!err && !(val & (1 << 10))) {
4884 			tg3_phy_auxctl_write(tp,
4885 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4886 					     val | (1 << 10));
4887 			goto relink;
4888 		}
4889 	}
4890 
4891 	bmsr = 0;
4892 	for (i = 0; i < 100; i++) {
4893 		tg3_readphy(tp, MII_BMSR, &bmsr);
4894 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4895 		    (bmsr & BMSR_LSTATUS))
4896 			break;
4897 		udelay(40);
4898 	}
4899 
4900 	if (bmsr & BMSR_LSTATUS) {
4901 		u32 aux_stat, bmcr;
4902 
4903 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4904 		for (i = 0; i < 2000; i++) {
4905 			udelay(10);
4906 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4907 			    aux_stat)
4908 				break;
4909 		}
4910 
4911 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4912 					     &current_speed,
4913 					     &current_duplex);
4914 
4915 		bmcr = 0;
4916 		for (i = 0; i < 200; i++) {
4917 			tg3_readphy(tp, MII_BMCR, &bmcr);
4918 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4919 				continue;
4920 			if (bmcr && bmcr != 0x7fff)
4921 				break;
4922 			udelay(10);
4923 		}
4924 
4925 		lcl_adv = 0;
4926 		rmt_adv = 0;
4927 
4928 		tp->link_config.active_speed = current_speed;
4929 		tp->link_config.active_duplex = current_duplex;
4930 
4931 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4932 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4933 
4934 			if ((bmcr & BMCR_ANENABLE) &&
4935 			    eee_config_ok &&
4936 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4937 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4938 				current_link_up = true;
4939 
4940 			/* EEE settings changes take effect only after a phy
4941 			 * reset.  If we have skipped a reset due to Link Flap
4942 			 * Avoidance being enabled, do it now.
4943 			 */
4944 			if (!eee_config_ok &&
4945 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4946 			    !force_reset) {
4947 				tg3_setup_eee(tp);
4948 				tg3_phy_reset(tp);
4949 			}
4950 		} else {
4951 			if (!(bmcr & BMCR_ANENABLE) &&
4952 			    tp->link_config.speed == current_speed &&
4953 			    tp->link_config.duplex == current_duplex) {
4954 				current_link_up = true;
4955 			}
4956 		}
4957 
4958 		if (current_link_up &&
4959 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4960 			u32 reg, bit;
4961 
4962 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4963 				reg = MII_TG3_FET_GEN_STAT;
4964 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4965 			} else {
4966 				reg = MII_TG3_EXT_STAT;
4967 				bit = MII_TG3_EXT_STAT_MDIX;
4968 			}
4969 
4970 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4971 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4972 
4973 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4974 		}
4975 	}
4976 
4977 relink:
4978 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4979 		tg3_phy_copper_begin(tp);
4980 
4981 		if (tg3_flag(tp, ROBOSWITCH)) {
4982 			current_link_up = true;
4983 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4984 			current_speed = SPEED_1000;
4985 			current_duplex = DUPLEX_FULL;
4986 			tp->link_config.active_speed = current_speed;
4987 			tp->link_config.active_duplex = current_duplex;
4988 		}
4989 
4990 		tg3_readphy(tp, MII_BMSR, &bmsr);
4991 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4992 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4993 			current_link_up = true;
4994 	}
4995 
4996 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4997 	if (current_link_up) {
4998 		if (tp->link_config.active_speed == SPEED_100 ||
4999 		    tp->link_config.active_speed == SPEED_10)
5000 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5001 		else
5002 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5003 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5004 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5005 	else
5006 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5007 
5008 	/* In order for the 5750 core in BCM4785 chip to work properly
5009 	 * in RGMII mode, the Led Control Register must be set up.
5010 	 */
5011 	if (tg3_flag(tp, RGMII_MODE)) {
5012 		u32 led_ctrl = tr32(MAC_LED_CTRL);
5013 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5014 
5015 		if (tp->link_config.active_speed == SPEED_10)
5016 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5017 		else if (tp->link_config.active_speed == SPEED_100)
5018 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5019 				     LED_CTRL_100MBPS_ON);
5020 		else if (tp->link_config.active_speed == SPEED_1000)
5021 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5022 				     LED_CTRL_1000MBPS_ON);
5023 
5024 		tw32(MAC_LED_CTRL, led_ctrl);
5025 		udelay(40);
5026 	}
5027 
5028 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5029 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5030 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5031 
5032 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5033 		if (current_link_up &&
5034 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5035 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5036 		else
5037 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5038 	}
5039 
5040 	/* ??? Without this setting Netgear GA302T PHY does not
5041 	 * ??? send/receive packets...
5042 	 */
5043 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5044 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5045 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5046 		tw32_f(MAC_MI_MODE, tp->mi_mode);
5047 		udelay(80);
5048 	}
5049 
5050 	tw32_f(MAC_MODE, tp->mac_mode);
5051 	udelay(40);
5052 
5053 	tg3_phy_eee_adjust(tp, current_link_up);
5054 
5055 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5056 		/* Polled via timer. */
5057 		tw32_f(MAC_EVENT, 0);
5058 	} else {
5059 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5060 	}
5061 	udelay(40);
5062 
5063 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5064 	    current_link_up &&
5065 	    tp->link_config.active_speed == SPEED_1000 &&
5066 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5067 		udelay(120);
5068 		tw32_f(MAC_STATUS,
5069 		     (MAC_STATUS_SYNC_CHANGED |
5070 		      MAC_STATUS_CFG_CHANGED));
5071 		udelay(40);
5072 		tg3_write_mem(tp,
5073 			      NIC_SRAM_FIRMWARE_MBOX,
5074 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5075 	}
5076 
5077 	/* Prevent send BD corruption. */
5078 	if (tg3_flag(tp, CLKREQ_BUG)) {
5079 		if (tp->link_config.active_speed == SPEED_100 ||
5080 		    tp->link_config.active_speed == SPEED_10)
5081 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5082 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5083 		else
5084 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5085 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5086 	}
5087 
5088 	tg3_test_and_report_link_chg(tp, current_link_up);
5089 
5090 	return 0;
5091 }
5092 
5093 struct tg3_fiber_aneginfo {
5094 	int state;
5095 #define ANEG_STATE_UNKNOWN		0
5096 #define ANEG_STATE_AN_ENABLE		1
5097 #define ANEG_STATE_RESTART_INIT		2
5098 #define ANEG_STATE_RESTART		3
5099 #define ANEG_STATE_DISABLE_LINK_OK	4
5100 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5101 #define ANEG_STATE_ABILITY_DETECT	6
5102 #define ANEG_STATE_ACK_DETECT_INIT	7
5103 #define ANEG_STATE_ACK_DETECT		8
5104 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5105 #define ANEG_STATE_COMPLETE_ACK		10
5106 #define ANEG_STATE_IDLE_DETECT_INIT	11
5107 #define ANEG_STATE_IDLE_DETECT		12
5108 #define ANEG_STATE_LINK_OK		13
5109 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5110 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5111 
5112 	u32 flags;
5113 #define MR_AN_ENABLE		0x00000001
5114 #define MR_RESTART_AN		0x00000002
5115 #define MR_AN_COMPLETE		0x00000004
5116 #define MR_PAGE_RX		0x00000008
5117 #define MR_NP_LOADED		0x00000010
5118 #define MR_TOGGLE_TX		0x00000020
5119 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5120 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5121 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5122 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5123 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5124 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5125 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5126 #define MR_TOGGLE_RX		0x00002000
5127 #define MR_NP_RX		0x00004000
5128 
5129 #define MR_LINK_OK		0x80000000
5130 
5131 	unsigned long link_time, cur_time;
5132 
5133 	u32 ability_match_cfg;
5134 	int ability_match_count;
5135 
5136 	char ability_match, idle_match, ack_match;
5137 
5138 	u32 txconfig, rxconfig;
5139 #define ANEG_CFG_NP		0x00000080
5140 #define ANEG_CFG_ACK		0x00000040
5141 #define ANEG_CFG_RF2		0x00000020
5142 #define ANEG_CFG_RF1		0x00000010
5143 #define ANEG_CFG_PS2		0x00000001
5144 #define ANEG_CFG_PS1		0x00008000
5145 #define ANEG_CFG_HD		0x00004000
5146 #define ANEG_CFG_FD		0x00002000
5147 #define ANEG_CFG_INVAL		0x00001f06
5148 
5149 };
5150 #define ANEG_OK		0
5151 #define ANEG_DONE	1
5152 #define ANEG_TIMER_ENAB	2
5153 #define ANEG_FAILED	-1
5154 
5155 #define ANEG_STATE_SETTLE_TIME	10000
5156 
5157 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5158 				   struct tg3_fiber_aneginfo *ap)
5159 {
5160 	u16 flowctrl;
5161 	unsigned long delta;
5162 	u32 rx_cfg_reg;
5163 	int ret;
5164 
5165 	if (ap->state == ANEG_STATE_UNKNOWN) {
5166 		ap->rxconfig = 0;
5167 		ap->link_time = 0;
5168 		ap->cur_time = 0;
5169 		ap->ability_match_cfg = 0;
5170 		ap->ability_match_count = 0;
5171 		ap->ability_match = 0;
5172 		ap->idle_match = 0;
5173 		ap->ack_match = 0;
5174 	}
5175 	ap->cur_time++;
5176 
5177 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5178 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5179 
5180 		if (rx_cfg_reg != ap->ability_match_cfg) {
5181 			ap->ability_match_cfg = rx_cfg_reg;
5182 			ap->ability_match = 0;
5183 			ap->ability_match_count = 0;
5184 		} else {
5185 			if (++ap->ability_match_count > 1) {
5186 				ap->ability_match = 1;
5187 				ap->ability_match_cfg = rx_cfg_reg;
5188 			}
5189 		}
5190 		if (rx_cfg_reg & ANEG_CFG_ACK)
5191 			ap->ack_match = 1;
5192 		else
5193 			ap->ack_match = 0;
5194 
5195 		ap->idle_match = 0;
5196 	} else {
5197 		ap->idle_match = 1;
5198 		ap->ability_match_cfg = 0;
5199 		ap->ability_match_count = 0;
5200 		ap->ability_match = 0;
5201 		ap->ack_match = 0;
5202 
5203 		rx_cfg_reg = 0;
5204 	}
5205 
5206 	ap->rxconfig = rx_cfg_reg;
5207 	ret = ANEG_OK;
5208 
5209 	switch (ap->state) {
5210 	case ANEG_STATE_UNKNOWN:
5211 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5212 			ap->state = ANEG_STATE_AN_ENABLE;
5213 
5214 		/* fall through */
5215 	case ANEG_STATE_AN_ENABLE:
5216 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5217 		if (ap->flags & MR_AN_ENABLE) {
5218 			ap->link_time = 0;
5219 			ap->cur_time = 0;
5220 			ap->ability_match_cfg = 0;
5221 			ap->ability_match_count = 0;
5222 			ap->ability_match = 0;
5223 			ap->idle_match = 0;
5224 			ap->ack_match = 0;
5225 
5226 			ap->state = ANEG_STATE_RESTART_INIT;
5227 		} else {
5228 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5229 		}
5230 		break;
5231 
5232 	case ANEG_STATE_RESTART_INIT:
5233 		ap->link_time = ap->cur_time;
5234 		ap->flags &= ~(MR_NP_LOADED);
5235 		ap->txconfig = 0;
5236 		tw32(MAC_TX_AUTO_NEG, 0);
5237 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5238 		tw32_f(MAC_MODE, tp->mac_mode);
5239 		udelay(40);
5240 
5241 		ret = ANEG_TIMER_ENAB;
5242 		ap->state = ANEG_STATE_RESTART;
5243 
5244 		/* fall through */
5245 	case ANEG_STATE_RESTART:
5246 		delta = ap->cur_time - ap->link_time;
5247 		if (delta > ANEG_STATE_SETTLE_TIME)
5248 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5249 		else
5250 			ret = ANEG_TIMER_ENAB;
5251 		break;
5252 
5253 	case ANEG_STATE_DISABLE_LINK_OK:
5254 		ret = ANEG_DONE;
5255 		break;
5256 
5257 	case ANEG_STATE_ABILITY_DETECT_INIT:
5258 		ap->flags &= ~(MR_TOGGLE_TX);
5259 		ap->txconfig = ANEG_CFG_FD;
5260 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5261 		if (flowctrl & ADVERTISE_1000XPAUSE)
5262 			ap->txconfig |= ANEG_CFG_PS1;
5263 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5264 			ap->txconfig |= ANEG_CFG_PS2;
5265 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5266 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5267 		tw32_f(MAC_MODE, tp->mac_mode);
5268 		udelay(40);
5269 
5270 		ap->state = ANEG_STATE_ABILITY_DETECT;
5271 		break;
5272 
5273 	case ANEG_STATE_ABILITY_DETECT:
5274 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5275 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5276 		break;
5277 
5278 	case ANEG_STATE_ACK_DETECT_INIT:
5279 		ap->txconfig |= ANEG_CFG_ACK;
5280 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5281 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5282 		tw32_f(MAC_MODE, tp->mac_mode);
5283 		udelay(40);
5284 
5285 		ap->state = ANEG_STATE_ACK_DETECT;
5286 
5287 		/* fall through */
5288 	case ANEG_STATE_ACK_DETECT:
5289 		if (ap->ack_match != 0) {
5290 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5291 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5292 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5293 			} else {
5294 				ap->state = ANEG_STATE_AN_ENABLE;
5295 			}
5296 		} else if (ap->ability_match != 0 &&
5297 			   ap->rxconfig == 0) {
5298 			ap->state = ANEG_STATE_AN_ENABLE;
5299 		}
5300 		break;
5301 
5302 	case ANEG_STATE_COMPLETE_ACK_INIT:
5303 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5304 			ret = ANEG_FAILED;
5305 			break;
5306 		}
5307 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5308 			       MR_LP_ADV_HALF_DUPLEX |
5309 			       MR_LP_ADV_SYM_PAUSE |
5310 			       MR_LP_ADV_ASYM_PAUSE |
5311 			       MR_LP_ADV_REMOTE_FAULT1 |
5312 			       MR_LP_ADV_REMOTE_FAULT2 |
5313 			       MR_LP_ADV_NEXT_PAGE |
5314 			       MR_TOGGLE_RX |
5315 			       MR_NP_RX);
5316 		if (ap->rxconfig & ANEG_CFG_FD)
5317 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5318 		if (ap->rxconfig & ANEG_CFG_HD)
5319 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5320 		if (ap->rxconfig & ANEG_CFG_PS1)
5321 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5322 		if (ap->rxconfig & ANEG_CFG_PS2)
5323 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5324 		if (ap->rxconfig & ANEG_CFG_RF1)
5325 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5326 		if (ap->rxconfig & ANEG_CFG_RF2)
5327 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5328 		if (ap->rxconfig & ANEG_CFG_NP)
5329 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5330 
5331 		ap->link_time = ap->cur_time;
5332 
5333 		ap->flags ^= (MR_TOGGLE_TX);
5334 		if (ap->rxconfig & 0x0008)
5335 			ap->flags |= MR_TOGGLE_RX;
5336 		if (ap->rxconfig & ANEG_CFG_NP)
5337 			ap->flags |= MR_NP_RX;
5338 		ap->flags |= MR_PAGE_RX;
5339 
5340 		ap->state = ANEG_STATE_COMPLETE_ACK;
5341 		ret = ANEG_TIMER_ENAB;
5342 		break;
5343 
5344 	case ANEG_STATE_COMPLETE_ACK:
5345 		if (ap->ability_match != 0 &&
5346 		    ap->rxconfig == 0) {
5347 			ap->state = ANEG_STATE_AN_ENABLE;
5348 			break;
5349 		}
5350 		delta = ap->cur_time - ap->link_time;
5351 		if (delta > ANEG_STATE_SETTLE_TIME) {
5352 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5353 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5354 			} else {
5355 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5356 				    !(ap->flags & MR_NP_RX)) {
5357 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5358 				} else {
5359 					ret = ANEG_FAILED;
5360 				}
5361 			}
5362 		}
5363 		break;
5364 
5365 	case ANEG_STATE_IDLE_DETECT_INIT:
5366 		ap->link_time = ap->cur_time;
5367 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5368 		tw32_f(MAC_MODE, tp->mac_mode);
5369 		udelay(40);
5370 
5371 		ap->state = ANEG_STATE_IDLE_DETECT;
5372 		ret = ANEG_TIMER_ENAB;
5373 		break;
5374 
5375 	case ANEG_STATE_IDLE_DETECT:
5376 		if (ap->ability_match != 0 &&
5377 		    ap->rxconfig == 0) {
5378 			ap->state = ANEG_STATE_AN_ENABLE;
5379 			break;
5380 		}
5381 		delta = ap->cur_time - ap->link_time;
5382 		if (delta > ANEG_STATE_SETTLE_TIME) {
5383 			/* XXX another gem from the Broadcom driver :( */
5384 			ap->state = ANEG_STATE_LINK_OK;
5385 		}
5386 		break;
5387 
5388 	case ANEG_STATE_LINK_OK:
5389 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5390 		ret = ANEG_DONE;
5391 		break;
5392 
5393 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5394 		/* ??? unimplemented */
5395 		break;
5396 
5397 	case ANEG_STATE_NEXT_PAGE_WAIT:
5398 		/* ??? unimplemented */
5399 		break;
5400 
5401 	default:
5402 		ret = ANEG_FAILED;
5403 		break;
5404 	}
5405 
5406 	return ret;
5407 }
5408 
5409 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5410 {
5411 	int res = 0;
5412 	struct tg3_fiber_aneginfo aninfo;
5413 	int status = ANEG_FAILED;
5414 	unsigned int tick;
5415 	u32 tmp;
5416 
5417 	tw32_f(MAC_TX_AUTO_NEG, 0);
5418 
5419 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5420 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5421 	udelay(40);
5422 
5423 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5424 	udelay(40);
5425 
5426 	memset(&aninfo, 0, sizeof(aninfo));
5427 	aninfo.flags |= MR_AN_ENABLE;
5428 	aninfo.state = ANEG_STATE_UNKNOWN;
5429 	aninfo.cur_time = 0;
5430 	tick = 0;
5431 	while (++tick < 195000) {
5432 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5433 		if (status == ANEG_DONE || status == ANEG_FAILED)
5434 			break;
5435 
5436 		udelay(1);
5437 	}
5438 
5439 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5440 	tw32_f(MAC_MODE, tp->mac_mode);
5441 	udelay(40);
5442 
5443 	*txflags = aninfo.txconfig;
5444 	*rxflags = aninfo.flags;
5445 
5446 	if (status == ANEG_DONE &&
5447 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5448 			     MR_LP_ADV_FULL_DUPLEX)))
5449 		res = 1;
5450 
5451 	return res;
5452 }
5453 
5454 static void tg3_init_bcm8002(struct tg3 *tp)
5455 {
5456 	u32 mac_status = tr32(MAC_STATUS);
5457 	int i;
5458 
5459 	/* Reset when initting first time or we have a link. */
5460 	if (tg3_flag(tp, INIT_COMPLETE) &&
5461 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5462 		return;
5463 
5464 	/* Set PLL lock range. */
5465 	tg3_writephy(tp, 0x16, 0x8007);
5466 
5467 	/* SW reset */
5468 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5469 
5470 	/* Wait for reset to complete. */
5471 	/* XXX schedule_timeout() ... */
5472 	for (i = 0; i < 500; i++)
5473 		udelay(10);
5474 
5475 	/* Config mode; select PMA/Ch 1 regs. */
5476 	tg3_writephy(tp, 0x10, 0x8411);
5477 
5478 	/* Enable auto-lock and comdet, select txclk for tx. */
5479 	tg3_writephy(tp, 0x11, 0x0a10);
5480 
5481 	tg3_writephy(tp, 0x18, 0x00a0);
5482 	tg3_writephy(tp, 0x16, 0x41ff);
5483 
5484 	/* Assert and deassert POR. */
5485 	tg3_writephy(tp, 0x13, 0x0400);
5486 	udelay(40);
5487 	tg3_writephy(tp, 0x13, 0x0000);
5488 
5489 	tg3_writephy(tp, 0x11, 0x0a50);
5490 	udelay(40);
5491 	tg3_writephy(tp, 0x11, 0x0a10);
5492 
5493 	/* Wait for signal to stabilize */
5494 	/* XXX schedule_timeout() ... */
5495 	for (i = 0; i < 15000; i++)
5496 		udelay(10);
5497 
5498 	/* Deselect the channel register so we can read the PHYID
5499 	 * later.
5500 	 */
5501 	tg3_writephy(tp, 0x10, 0x8011);
5502 }
5503 
5504 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5505 {
5506 	u16 flowctrl;
5507 	bool current_link_up;
5508 	u32 sg_dig_ctrl, sg_dig_status;
5509 	u32 serdes_cfg, expected_sg_dig_ctrl;
5510 	int workaround, port_a;
5511 
5512 	serdes_cfg = 0;
5513 	expected_sg_dig_ctrl = 0;
5514 	workaround = 0;
5515 	port_a = 1;
5516 	current_link_up = false;
5517 
5518 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5519 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5520 		workaround = 1;
5521 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5522 			port_a = 0;
5523 
5524 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5525 		/* preserve bits 20-23 for voltage regulator */
5526 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5527 	}
5528 
5529 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5530 
5531 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5532 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5533 			if (workaround) {
5534 				u32 val = serdes_cfg;
5535 
5536 				if (port_a)
5537 					val |= 0xc010000;
5538 				else
5539 					val |= 0x4010000;
5540 				tw32_f(MAC_SERDES_CFG, val);
5541 			}
5542 
5543 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5544 		}
5545 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5546 			tg3_setup_flow_control(tp, 0, 0);
5547 			current_link_up = true;
5548 		}
5549 		goto out;
5550 	}
5551 
5552 	/* Want auto-negotiation.  */
5553 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5554 
5555 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5556 	if (flowctrl & ADVERTISE_1000XPAUSE)
5557 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5558 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5559 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5560 
5561 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5562 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5563 		    tp->serdes_counter &&
5564 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5565 				    MAC_STATUS_RCVD_CFG)) ==
5566 		     MAC_STATUS_PCS_SYNCED)) {
5567 			tp->serdes_counter--;
5568 			current_link_up = true;
5569 			goto out;
5570 		}
5571 restart_autoneg:
5572 		if (workaround)
5573 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5574 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5575 		udelay(5);
5576 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5577 
5578 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5579 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5580 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5581 				 MAC_STATUS_SIGNAL_DET)) {
5582 		sg_dig_status = tr32(SG_DIG_STATUS);
5583 		mac_status = tr32(MAC_STATUS);
5584 
5585 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5586 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5587 			u32 local_adv = 0, remote_adv = 0;
5588 
5589 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5590 				local_adv |= ADVERTISE_1000XPAUSE;
5591 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5592 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5593 
5594 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5595 				remote_adv |= LPA_1000XPAUSE;
5596 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5597 				remote_adv |= LPA_1000XPAUSE_ASYM;
5598 
5599 			tp->link_config.rmt_adv =
5600 					   mii_adv_to_ethtool_adv_x(remote_adv);
5601 
5602 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5603 			current_link_up = true;
5604 			tp->serdes_counter = 0;
5605 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5606 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5607 			if (tp->serdes_counter)
5608 				tp->serdes_counter--;
5609 			else {
5610 				if (workaround) {
5611 					u32 val = serdes_cfg;
5612 
5613 					if (port_a)
5614 						val |= 0xc010000;
5615 					else
5616 						val |= 0x4010000;
5617 
5618 					tw32_f(MAC_SERDES_CFG, val);
5619 				}
5620 
5621 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5622 				udelay(40);
5623 
5624 				/* Link parallel detection - link is up */
5625 				/* only if we have PCS_SYNC and not */
5626 				/* receiving config code words */
5627 				mac_status = tr32(MAC_STATUS);
5628 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5629 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5630 					tg3_setup_flow_control(tp, 0, 0);
5631 					current_link_up = true;
5632 					tp->phy_flags |=
5633 						TG3_PHYFLG_PARALLEL_DETECT;
5634 					tp->serdes_counter =
5635 						SERDES_PARALLEL_DET_TIMEOUT;
5636 				} else
5637 					goto restart_autoneg;
5638 			}
5639 		}
5640 	} else {
5641 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5642 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5643 	}
5644 
5645 out:
5646 	return current_link_up;
5647 }
5648 
5649 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5650 {
5651 	bool current_link_up = false;
5652 
5653 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5654 		goto out;
5655 
5656 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5657 		u32 txflags, rxflags;
5658 		int i;
5659 
5660 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5661 			u32 local_adv = 0, remote_adv = 0;
5662 
5663 			if (txflags & ANEG_CFG_PS1)
5664 				local_adv |= ADVERTISE_1000XPAUSE;
5665 			if (txflags & ANEG_CFG_PS2)
5666 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5667 
5668 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5669 				remote_adv |= LPA_1000XPAUSE;
5670 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5671 				remote_adv |= LPA_1000XPAUSE_ASYM;
5672 
5673 			tp->link_config.rmt_adv =
5674 					   mii_adv_to_ethtool_adv_x(remote_adv);
5675 
5676 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5677 
5678 			current_link_up = true;
5679 		}
5680 		for (i = 0; i < 30; i++) {
5681 			udelay(20);
5682 			tw32_f(MAC_STATUS,
5683 			       (MAC_STATUS_SYNC_CHANGED |
5684 				MAC_STATUS_CFG_CHANGED));
5685 			udelay(40);
5686 			if ((tr32(MAC_STATUS) &
5687 			     (MAC_STATUS_SYNC_CHANGED |
5688 			      MAC_STATUS_CFG_CHANGED)) == 0)
5689 				break;
5690 		}
5691 
5692 		mac_status = tr32(MAC_STATUS);
5693 		if (!current_link_up &&
5694 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5695 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5696 			current_link_up = true;
5697 	} else {
5698 		tg3_setup_flow_control(tp, 0, 0);
5699 
5700 		/* Forcing 1000FD link up. */
5701 		current_link_up = true;
5702 
5703 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5704 		udelay(40);
5705 
5706 		tw32_f(MAC_MODE, tp->mac_mode);
5707 		udelay(40);
5708 	}
5709 
5710 out:
5711 	return current_link_up;
5712 }
5713 
5714 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5715 {
5716 	u32 orig_pause_cfg;
5717 	u16 orig_active_speed;
5718 	u8 orig_active_duplex;
5719 	u32 mac_status;
5720 	bool current_link_up;
5721 	int i;
5722 
5723 	orig_pause_cfg = tp->link_config.active_flowctrl;
5724 	orig_active_speed = tp->link_config.active_speed;
5725 	orig_active_duplex = tp->link_config.active_duplex;
5726 
5727 	if (!tg3_flag(tp, HW_AUTONEG) &&
5728 	    tp->link_up &&
5729 	    tg3_flag(tp, INIT_COMPLETE)) {
5730 		mac_status = tr32(MAC_STATUS);
5731 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5732 			       MAC_STATUS_SIGNAL_DET |
5733 			       MAC_STATUS_CFG_CHANGED |
5734 			       MAC_STATUS_RCVD_CFG);
5735 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5736 				   MAC_STATUS_SIGNAL_DET)) {
5737 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5738 					    MAC_STATUS_CFG_CHANGED));
5739 			return 0;
5740 		}
5741 	}
5742 
5743 	tw32_f(MAC_TX_AUTO_NEG, 0);
5744 
5745 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5746 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5747 	tw32_f(MAC_MODE, tp->mac_mode);
5748 	udelay(40);
5749 
5750 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5751 		tg3_init_bcm8002(tp);
5752 
5753 	/* Enable link change event even when serdes polling.  */
5754 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5755 	udelay(40);
5756 
5757 	current_link_up = false;
5758 	tp->link_config.rmt_adv = 0;
5759 	mac_status = tr32(MAC_STATUS);
5760 
5761 	if (tg3_flag(tp, HW_AUTONEG))
5762 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5763 	else
5764 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5765 
5766 	tp->napi[0].hw_status->status =
5767 		(SD_STATUS_UPDATED |
5768 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5769 
5770 	for (i = 0; i < 100; i++) {
5771 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5772 				    MAC_STATUS_CFG_CHANGED));
5773 		udelay(5);
5774 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5775 					 MAC_STATUS_CFG_CHANGED |
5776 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5777 			break;
5778 	}
5779 
5780 	mac_status = tr32(MAC_STATUS);
5781 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5782 		current_link_up = false;
5783 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5784 		    tp->serdes_counter == 0) {
5785 			tw32_f(MAC_MODE, (tp->mac_mode |
5786 					  MAC_MODE_SEND_CONFIGS));
5787 			udelay(1);
5788 			tw32_f(MAC_MODE, tp->mac_mode);
5789 		}
5790 	}
5791 
5792 	if (current_link_up) {
5793 		tp->link_config.active_speed = SPEED_1000;
5794 		tp->link_config.active_duplex = DUPLEX_FULL;
5795 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5796 				    LED_CTRL_LNKLED_OVERRIDE |
5797 				    LED_CTRL_1000MBPS_ON));
5798 	} else {
5799 		tp->link_config.active_speed = SPEED_UNKNOWN;
5800 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5801 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5802 				    LED_CTRL_LNKLED_OVERRIDE |
5803 				    LED_CTRL_TRAFFIC_OVERRIDE));
5804 	}
5805 
5806 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5807 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5808 		if (orig_pause_cfg != now_pause_cfg ||
5809 		    orig_active_speed != tp->link_config.active_speed ||
5810 		    orig_active_duplex != tp->link_config.active_duplex)
5811 			tg3_link_report(tp);
5812 	}
5813 
5814 	return 0;
5815 }
5816 
5817 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5818 {
5819 	int err = 0;
5820 	u32 bmsr, bmcr;
5821 	u16 current_speed = SPEED_UNKNOWN;
5822 	u8 current_duplex = DUPLEX_UNKNOWN;
5823 	bool current_link_up = false;
5824 	u32 local_adv, remote_adv, sgsr;
5825 
5826 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5827 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5828 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5829 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5830 
5831 		if (force_reset)
5832 			tg3_phy_reset(tp);
5833 
5834 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5835 
5836 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5837 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5838 		} else {
5839 			current_link_up = true;
5840 			if (sgsr & SERDES_TG3_SPEED_1000) {
5841 				current_speed = SPEED_1000;
5842 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5843 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5844 				current_speed = SPEED_100;
5845 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5846 			} else {
5847 				current_speed = SPEED_10;
5848 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5849 			}
5850 
5851 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5852 				current_duplex = DUPLEX_FULL;
5853 			else
5854 				current_duplex = DUPLEX_HALF;
5855 		}
5856 
5857 		tw32_f(MAC_MODE, tp->mac_mode);
5858 		udelay(40);
5859 
5860 		tg3_clear_mac_status(tp);
5861 
5862 		goto fiber_setup_done;
5863 	}
5864 
5865 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5866 	tw32_f(MAC_MODE, tp->mac_mode);
5867 	udelay(40);
5868 
5869 	tg3_clear_mac_status(tp);
5870 
5871 	if (force_reset)
5872 		tg3_phy_reset(tp);
5873 
5874 	tp->link_config.rmt_adv = 0;
5875 
5876 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5877 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5878 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5879 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5880 			bmsr |= BMSR_LSTATUS;
5881 		else
5882 			bmsr &= ~BMSR_LSTATUS;
5883 	}
5884 
5885 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5886 
5887 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5888 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5889 		/* do nothing, just check for link up at the end */
5890 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5891 		u32 adv, newadv;
5892 
5893 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5894 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5895 				 ADVERTISE_1000XPAUSE |
5896 				 ADVERTISE_1000XPSE_ASYM |
5897 				 ADVERTISE_SLCT);
5898 
5899 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5900 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5901 
5902 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5903 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5904 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5905 			tg3_writephy(tp, MII_BMCR, bmcr);
5906 
5907 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5908 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5909 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5910 
5911 			return err;
5912 		}
5913 	} else {
5914 		u32 new_bmcr;
5915 
5916 		bmcr &= ~BMCR_SPEED1000;
5917 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5918 
5919 		if (tp->link_config.duplex == DUPLEX_FULL)
5920 			new_bmcr |= BMCR_FULLDPLX;
5921 
5922 		if (new_bmcr != bmcr) {
5923 			/* BMCR_SPEED1000 is a reserved bit that needs
5924 			 * to be set on write.
5925 			 */
5926 			new_bmcr |= BMCR_SPEED1000;
5927 
5928 			/* Force a linkdown */
5929 			if (tp->link_up) {
5930 				u32 adv;
5931 
5932 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5933 				adv &= ~(ADVERTISE_1000XFULL |
5934 					 ADVERTISE_1000XHALF |
5935 					 ADVERTISE_SLCT);
5936 				tg3_writephy(tp, MII_ADVERTISE, adv);
5937 				tg3_writephy(tp, MII_BMCR, bmcr |
5938 							   BMCR_ANRESTART |
5939 							   BMCR_ANENABLE);
5940 				udelay(10);
5941 				tg3_carrier_off(tp);
5942 			}
5943 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5944 			bmcr = new_bmcr;
5945 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5946 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5947 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5948 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5949 					bmsr |= BMSR_LSTATUS;
5950 				else
5951 					bmsr &= ~BMSR_LSTATUS;
5952 			}
5953 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5954 		}
5955 	}
5956 
5957 	if (bmsr & BMSR_LSTATUS) {
5958 		current_speed = SPEED_1000;
5959 		current_link_up = true;
5960 		if (bmcr & BMCR_FULLDPLX)
5961 			current_duplex = DUPLEX_FULL;
5962 		else
5963 			current_duplex = DUPLEX_HALF;
5964 
5965 		local_adv = 0;
5966 		remote_adv = 0;
5967 
5968 		if (bmcr & BMCR_ANENABLE) {
5969 			u32 common;
5970 
5971 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5972 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5973 			common = local_adv & remote_adv;
5974 			if (common & (ADVERTISE_1000XHALF |
5975 				      ADVERTISE_1000XFULL)) {
5976 				if (common & ADVERTISE_1000XFULL)
5977 					current_duplex = DUPLEX_FULL;
5978 				else
5979 					current_duplex = DUPLEX_HALF;
5980 
5981 				tp->link_config.rmt_adv =
5982 					   mii_adv_to_ethtool_adv_x(remote_adv);
5983 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5984 				/* Link is up via parallel detect */
5985 			} else {
5986 				current_link_up = false;
5987 			}
5988 		}
5989 	}
5990 
5991 fiber_setup_done:
5992 	if (current_link_up && current_duplex == DUPLEX_FULL)
5993 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5994 
5995 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5996 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5997 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5998 
5999 	tw32_f(MAC_MODE, tp->mac_mode);
6000 	udelay(40);
6001 
6002 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6003 
6004 	tp->link_config.active_speed = current_speed;
6005 	tp->link_config.active_duplex = current_duplex;
6006 
6007 	tg3_test_and_report_link_chg(tp, current_link_up);
6008 	return err;
6009 }
6010 
6011 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6012 {
6013 	if (tp->serdes_counter) {
6014 		/* Give autoneg time to complete. */
6015 		tp->serdes_counter--;
6016 		return;
6017 	}
6018 
6019 	if (!tp->link_up &&
6020 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6021 		u32 bmcr;
6022 
6023 		tg3_readphy(tp, MII_BMCR, &bmcr);
6024 		if (bmcr & BMCR_ANENABLE) {
6025 			u32 phy1, phy2;
6026 
6027 			/* Select shadow register 0x1f */
6028 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6029 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6030 
6031 			/* Select expansion interrupt status register */
6032 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6033 					 MII_TG3_DSP_EXP1_INT_STAT);
6034 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6035 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6036 
6037 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6038 				/* We have signal detect and not receiving
6039 				 * config code words, link is up by parallel
6040 				 * detection.
6041 				 */
6042 
6043 				bmcr &= ~BMCR_ANENABLE;
6044 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6045 				tg3_writephy(tp, MII_BMCR, bmcr);
6046 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6047 			}
6048 		}
6049 	} else if (tp->link_up &&
6050 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6051 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6052 		u32 phy2;
6053 
6054 		/* Select expansion interrupt status register */
6055 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6056 				 MII_TG3_DSP_EXP1_INT_STAT);
6057 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6058 		if (phy2 & 0x20) {
6059 			u32 bmcr;
6060 
6061 			/* Config code words received, turn on autoneg. */
6062 			tg3_readphy(tp, MII_BMCR, &bmcr);
6063 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6064 
6065 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6066 
6067 		}
6068 	}
6069 }
6070 
6071 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6072 {
6073 	u32 val;
6074 	int err;
6075 
6076 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6077 		err = tg3_setup_fiber_phy(tp, force_reset);
6078 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6079 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6080 	else
6081 		err = tg3_setup_copper_phy(tp, force_reset);
6082 
6083 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6084 		u32 scale;
6085 
6086 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6087 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6088 			scale = 65;
6089 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6090 			scale = 6;
6091 		else
6092 			scale = 12;
6093 
6094 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6095 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6096 		tw32(GRC_MISC_CFG, val);
6097 	}
6098 
6099 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6100 	      (6 << TX_LENGTHS_IPG_SHIFT);
6101 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6102 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6103 		val |= tr32(MAC_TX_LENGTHS) &
6104 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6105 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6106 
6107 	if (tp->link_config.active_speed == SPEED_1000 &&
6108 	    tp->link_config.active_duplex == DUPLEX_HALF)
6109 		tw32(MAC_TX_LENGTHS, val |
6110 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6111 	else
6112 		tw32(MAC_TX_LENGTHS, val |
6113 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6114 
6115 	if (!tg3_flag(tp, 5705_PLUS)) {
6116 		if (tp->link_up) {
6117 			tw32(HOSTCC_STAT_COAL_TICKS,
6118 			     tp->coal.stats_block_coalesce_usecs);
6119 		} else {
6120 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6121 		}
6122 	}
6123 
6124 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6125 		val = tr32(PCIE_PWR_MGMT_THRESH);
6126 		if (!tp->link_up)
6127 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6128 			      tp->pwrmgmt_thresh;
6129 		else
6130 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6131 		tw32(PCIE_PWR_MGMT_THRESH, val);
6132 	}
6133 
6134 	return err;
6135 }
6136 
6137 /* tp->lock must be held */
6138 static u64 tg3_refclk_read(struct tg3 *tp)
6139 {
6140 	u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6141 	return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6142 }
6143 
6144 /* tp->lock must be held */
6145 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6146 {
6147 	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6148 
6149 	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6150 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6151 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6152 	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6153 }
6154 
6155 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6156 static inline void tg3_full_unlock(struct tg3 *tp);
6157 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6158 {
6159 	struct tg3 *tp = netdev_priv(dev);
6160 
6161 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6162 				SOF_TIMESTAMPING_RX_SOFTWARE |
6163 				SOF_TIMESTAMPING_SOFTWARE;
6164 
6165 	if (tg3_flag(tp, PTP_CAPABLE)) {
6166 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6167 					SOF_TIMESTAMPING_RX_HARDWARE |
6168 					SOF_TIMESTAMPING_RAW_HARDWARE;
6169 	}
6170 
6171 	if (tp->ptp_clock)
6172 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6173 	else
6174 		info->phc_index = -1;
6175 
6176 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6177 
6178 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6179 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6180 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6181 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6182 	return 0;
6183 }
6184 
6185 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6186 {
6187 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6188 	bool neg_adj = false;
6189 	u32 correction = 0;
6190 
6191 	if (ppb < 0) {
6192 		neg_adj = true;
6193 		ppb = -ppb;
6194 	}
6195 
6196 	/* Frequency adjustment is performed using hardware with a 24 bit
6197 	 * accumulator and a programmable correction value. On each clk, the
6198 	 * correction value gets added to the accumulator and when it
6199 	 * overflows, the time counter is incremented/decremented.
6200 	 *
6201 	 * So conversion from ppb to correction value is
6202 	 *		ppb * (1 << 24) / 1000000000
6203 	 */
6204 	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6205 		     TG3_EAV_REF_CLK_CORRECT_MASK;
6206 
6207 	tg3_full_lock(tp, 0);
6208 
6209 	if (correction)
6210 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6211 		     TG3_EAV_REF_CLK_CORRECT_EN |
6212 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6213 	else
6214 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6215 
6216 	tg3_full_unlock(tp);
6217 
6218 	return 0;
6219 }
6220 
6221 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6222 {
6223 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6224 
6225 	tg3_full_lock(tp, 0);
6226 	tp->ptp_adjust += delta;
6227 	tg3_full_unlock(tp);
6228 
6229 	return 0;
6230 }
6231 
6232 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6233 {
6234 	u64 ns;
6235 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6236 
6237 	tg3_full_lock(tp, 0);
6238 	ns = tg3_refclk_read(tp);
6239 	ns += tp->ptp_adjust;
6240 	tg3_full_unlock(tp);
6241 
6242 	*ts = ns_to_timespec64(ns);
6243 
6244 	return 0;
6245 }
6246 
6247 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6248 			   const struct timespec64 *ts)
6249 {
6250 	u64 ns;
6251 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6252 
6253 	ns = timespec64_to_ns(ts);
6254 
6255 	tg3_full_lock(tp, 0);
6256 	tg3_refclk_write(tp, ns);
6257 	tp->ptp_adjust = 0;
6258 	tg3_full_unlock(tp);
6259 
6260 	return 0;
6261 }
6262 
6263 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6264 			  struct ptp_clock_request *rq, int on)
6265 {
6266 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6267 	u32 clock_ctl;
6268 	int rval = 0;
6269 
6270 	switch (rq->type) {
6271 	case PTP_CLK_REQ_PEROUT:
6272 		if (rq->perout.index != 0)
6273 			return -EINVAL;
6274 
6275 		tg3_full_lock(tp, 0);
6276 		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6277 		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6278 
6279 		if (on) {
6280 			u64 nsec;
6281 
6282 			nsec = rq->perout.start.sec * 1000000000ULL +
6283 			       rq->perout.start.nsec;
6284 
6285 			if (rq->perout.period.sec || rq->perout.period.nsec) {
6286 				netdev_warn(tp->dev,
6287 					    "Device supports only a one-shot timesync output, period must be 0\n");
6288 				rval = -EINVAL;
6289 				goto err_out;
6290 			}
6291 
6292 			if (nsec & (1ULL << 63)) {
6293 				netdev_warn(tp->dev,
6294 					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6295 				rval = -EINVAL;
6296 				goto err_out;
6297 			}
6298 
6299 			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6300 			tw32(TG3_EAV_WATCHDOG0_MSB,
6301 			     TG3_EAV_WATCHDOG0_EN |
6302 			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6303 
6304 			tw32(TG3_EAV_REF_CLCK_CTL,
6305 			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6306 		} else {
6307 			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6308 			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6309 		}
6310 
6311 err_out:
6312 		tg3_full_unlock(tp);
6313 		return rval;
6314 
6315 	default:
6316 		break;
6317 	}
6318 
6319 	return -EOPNOTSUPP;
6320 }
6321 
6322 static const struct ptp_clock_info tg3_ptp_caps = {
6323 	.owner		= THIS_MODULE,
6324 	.name		= "tg3 clock",
6325 	.max_adj	= 250000000,
6326 	.n_alarm	= 0,
6327 	.n_ext_ts	= 0,
6328 	.n_per_out	= 1,
6329 	.n_pins		= 0,
6330 	.pps		= 0,
6331 	.adjfreq	= tg3_ptp_adjfreq,
6332 	.adjtime	= tg3_ptp_adjtime,
6333 	.gettime64	= tg3_ptp_gettime,
6334 	.settime64	= tg3_ptp_settime,
6335 	.enable		= tg3_ptp_enable,
6336 };
6337 
6338 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6339 				     struct skb_shared_hwtstamps *timestamp)
6340 {
6341 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6342 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6343 					   tp->ptp_adjust);
6344 }
6345 
6346 /* tp->lock must be held */
6347 static void tg3_ptp_init(struct tg3 *tp)
6348 {
6349 	if (!tg3_flag(tp, PTP_CAPABLE))
6350 		return;
6351 
6352 	/* Initialize the hardware clock to the system time. */
6353 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6354 	tp->ptp_adjust = 0;
6355 	tp->ptp_info = tg3_ptp_caps;
6356 }
6357 
6358 /* tp->lock must be held */
6359 static void tg3_ptp_resume(struct tg3 *tp)
6360 {
6361 	if (!tg3_flag(tp, PTP_CAPABLE))
6362 		return;
6363 
6364 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6365 	tp->ptp_adjust = 0;
6366 }
6367 
6368 static void tg3_ptp_fini(struct tg3 *tp)
6369 {
6370 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6371 		return;
6372 
6373 	ptp_clock_unregister(tp->ptp_clock);
6374 	tp->ptp_clock = NULL;
6375 	tp->ptp_adjust = 0;
6376 }
6377 
6378 static inline int tg3_irq_sync(struct tg3 *tp)
6379 {
6380 	return tp->irq_sync;
6381 }
6382 
6383 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6384 {
6385 	int i;
6386 
6387 	dst = (u32 *)((u8 *)dst + off);
6388 	for (i = 0; i < len; i += sizeof(u32))
6389 		*dst++ = tr32(off + i);
6390 }
6391 
6392 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6393 {
6394 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6395 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6396 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6397 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6398 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6399 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6400 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6401 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6402 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6403 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6404 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6405 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6406 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6407 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6408 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6409 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6410 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6411 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6412 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6413 
6414 	if (tg3_flag(tp, SUPPORT_MSIX))
6415 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6416 
6417 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6418 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6419 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6420 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6421 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6422 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6423 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6424 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6425 
6426 	if (!tg3_flag(tp, 5705_PLUS)) {
6427 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6428 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6429 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6430 	}
6431 
6432 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6433 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6434 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6435 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6436 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6437 
6438 	if (tg3_flag(tp, NVRAM))
6439 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6440 }
6441 
6442 static void tg3_dump_state(struct tg3 *tp)
6443 {
6444 	int i;
6445 	u32 *regs;
6446 
6447 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6448 	if (!regs)
6449 		return;
6450 
6451 	if (tg3_flag(tp, PCI_EXPRESS)) {
6452 		/* Read up to but not including private PCI registers */
6453 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6454 			regs[i / sizeof(u32)] = tr32(i);
6455 	} else
6456 		tg3_dump_legacy_regs(tp, regs);
6457 
6458 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6459 		if (!regs[i + 0] && !regs[i + 1] &&
6460 		    !regs[i + 2] && !regs[i + 3])
6461 			continue;
6462 
6463 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6464 			   i * 4,
6465 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6466 	}
6467 
6468 	kfree(regs);
6469 
6470 	for (i = 0; i < tp->irq_cnt; i++) {
6471 		struct tg3_napi *tnapi = &tp->napi[i];
6472 
6473 		/* SW status block */
6474 		netdev_err(tp->dev,
6475 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6476 			   i,
6477 			   tnapi->hw_status->status,
6478 			   tnapi->hw_status->status_tag,
6479 			   tnapi->hw_status->rx_jumbo_consumer,
6480 			   tnapi->hw_status->rx_consumer,
6481 			   tnapi->hw_status->rx_mini_consumer,
6482 			   tnapi->hw_status->idx[0].rx_producer,
6483 			   tnapi->hw_status->idx[0].tx_consumer);
6484 
6485 		netdev_err(tp->dev,
6486 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6487 			   i,
6488 			   tnapi->last_tag, tnapi->last_irq_tag,
6489 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6490 			   tnapi->rx_rcb_ptr,
6491 			   tnapi->prodring.rx_std_prod_idx,
6492 			   tnapi->prodring.rx_std_cons_idx,
6493 			   tnapi->prodring.rx_jmb_prod_idx,
6494 			   tnapi->prodring.rx_jmb_cons_idx);
6495 	}
6496 }
6497 
6498 /* This is called whenever we suspect that the system chipset is re-
6499  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6500  * is bogus tx completions. We try to recover by setting the
6501  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6502  * in the workqueue.
6503  */
6504 static void tg3_tx_recover(struct tg3 *tp)
6505 {
6506 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6507 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6508 
6509 	netdev_warn(tp->dev,
6510 		    "The system may be re-ordering memory-mapped I/O "
6511 		    "cycles to the network device, attempting to recover. "
6512 		    "Please report the problem to the driver maintainer "
6513 		    "and include system chipset information.\n");
6514 
6515 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6516 }
6517 
6518 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6519 {
6520 	/* Tell compiler to fetch tx indices from memory. */
6521 	barrier();
6522 	return tnapi->tx_pending -
6523 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6524 }
6525 
6526 /* Tigon3 never reports partial packet sends.  So we do not
6527  * need special logic to handle SKBs that have not had all
6528  * of their frags sent yet, like SunGEM does.
6529  */
6530 static void tg3_tx(struct tg3_napi *tnapi)
6531 {
6532 	struct tg3 *tp = tnapi->tp;
6533 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6534 	u32 sw_idx = tnapi->tx_cons;
6535 	struct netdev_queue *txq;
6536 	int index = tnapi - tp->napi;
6537 	unsigned int pkts_compl = 0, bytes_compl = 0;
6538 
6539 	if (tg3_flag(tp, ENABLE_TSS))
6540 		index--;
6541 
6542 	txq = netdev_get_tx_queue(tp->dev, index);
6543 
6544 	while (sw_idx != hw_idx) {
6545 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6546 		struct sk_buff *skb = ri->skb;
6547 		int i, tx_bug = 0;
6548 
6549 		if (unlikely(skb == NULL)) {
6550 			tg3_tx_recover(tp);
6551 			return;
6552 		}
6553 
6554 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6555 			struct skb_shared_hwtstamps timestamp;
6556 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6557 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6558 
6559 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6560 
6561 			skb_tstamp_tx(skb, &timestamp);
6562 		}
6563 
6564 		pci_unmap_single(tp->pdev,
6565 				 dma_unmap_addr(ri, mapping),
6566 				 skb_headlen(skb),
6567 				 PCI_DMA_TODEVICE);
6568 
6569 		ri->skb = NULL;
6570 
6571 		while (ri->fragmented) {
6572 			ri->fragmented = false;
6573 			sw_idx = NEXT_TX(sw_idx);
6574 			ri = &tnapi->tx_buffers[sw_idx];
6575 		}
6576 
6577 		sw_idx = NEXT_TX(sw_idx);
6578 
6579 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6580 			ri = &tnapi->tx_buffers[sw_idx];
6581 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6582 				tx_bug = 1;
6583 
6584 			pci_unmap_page(tp->pdev,
6585 				       dma_unmap_addr(ri, mapping),
6586 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6587 				       PCI_DMA_TODEVICE);
6588 
6589 			while (ri->fragmented) {
6590 				ri->fragmented = false;
6591 				sw_idx = NEXT_TX(sw_idx);
6592 				ri = &tnapi->tx_buffers[sw_idx];
6593 			}
6594 
6595 			sw_idx = NEXT_TX(sw_idx);
6596 		}
6597 
6598 		pkts_compl++;
6599 		bytes_compl += skb->len;
6600 
6601 		dev_consume_skb_any(skb);
6602 
6603 		if (unlikely(tx_bug)) {
6604 			tg3_tx_recover(tp);
6605 			return;
6606 		}
6607 	}
6608 
6609 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6610 
6611 	tnapi->tx_cons = sw_idx;
6612 
6613 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6614 	 * before checking for netif_queue_stopped().  Without the
6615 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6616 	 * will miss it and cause the queue to be stopped forever.
6617 	 */
6618 	smp_mb();
6619 
6620 	if (unlikely(netif_tx_queue_stopped(txq) &&
6621 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6622 		__netif_tx_lock(txq, smp_processor_id());
6623 		if (netif_tx_queue_stopped(txq) &&
6624 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6625 			netif_tx_wake_queue(txq);
6626 		__netif_tx_unlock(txq);
6627 	}
6628 }
6629 
6630 static void tg3_frag_free(bool is_frag, void *data)
6631 {
6632 	if (is_frag)
6633 		skb_free_frag(data);
6634 	else
6635 		kfree(data);
6636 }
6637 
6638 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6639 {
6640 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6641 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6642 
6643 	if (!ri->data)
6644 		return;
6645 
6646 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6647 			 map_sz, PCI_DMA_FROMDEVICE);
6648 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6649 	ri->data = NULL;
6650 }
6651 
6652 
6653 /* Returns size of skb allocated or < 0 on error.
6654  *
6655  * We only need to fill in the address because the other members
6656  * of the RX descriptor are invariant, see tg3_init_rings.
6657  *
6658  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6659  * posting buffers we only dirty the first cache line of the RX
6660  * descriptor (containing the address).  Whereas for the RX status
6661  * buffers the cpu only reads the last cacheline of the RX descriptor
6662  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6663  */
6664 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6665 			     u32 opaque_key, u32 dest_idx_unmasked,
6666 			     unsigned int *frag_size)
6667 {
6668 	struct tg3_rx_buffer_desc *desc;
6669 	struct ring_info *map;
6670 	u8 *data;
6671 	dma_addr_t mapping;
6672 	int skb_size, data_size, dest_idx;
6673 
6674 	switch (opaque_key) {
6675 	case RXD_OPAQUE_RING_STD:
6676 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6677 		desc = &tpr->rx_std[dest_idx];
6678 		map = &tpr->rx_std_buffers[dest_idx];
6679 		data_size = tp->rx_pkt_map_sz;
6680 		break;
6681 
6682 	case RXD_OPAQUE_RING_JUMBO:
6683 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6684 		desc = &tpr->rx_jmb[dest_idx].std;
6685 		map = &tpr->rx_jmb_buffers[dest_idx];
6686 		data_size = TG3_RX_JMB_MAP_SZ;
6687 		break;
6688 
6689 	default:
6690 		return -EINVAL;
6691 	}
6692 
6693 	/* Do not overwrite any of the map or rp information
6694 	 * until we are sure we can commit to a new buffer.
6695 	 *
6696 	 * Callers depend upon this behavior and assume that
6697 	 * we leave everything unchanged if we fail.
6698 	 */
6699 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6700 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6701 	if (skb_size <= PAGE_SIZE) {
6702 		data = netdev_alloc_frag(skb_size);
6703 		*frag_size = skb_size;
6704 	} else {
6705 		data = kmalloc(skb_size, GFP_ATOMIC);
6706 		*frag_size = 0;
6707 	}
6708 	if (!data)
6709 		return -ENOMEM;
6710 
6711 	mapping = pci_map_single(tp->pdev,
6712 				 data + TG3_RX_OFFSET(tp),
6713 				 data_size,
6714 				 PCI_DMA_FROMDEVICE);
6715 	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6716 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6717 		return -EIO;
6718 	}
6719 
6720 	map->data = data;
6721 	dma_unmap_addr_set(map, mapping, mapping);
6722 
6723 	desc->addr_hi = ((u64)mapping >> 32);
6724 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6725 
6726 	return data_size;
6727 }
6728 
6729 /* We only need to move over in the address because the other
6730  * members of the RX descriptor are invariant.  See notes above
6731  * tg3_alloc_rx_data for full details.
6732  */
6733 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6734 			   struct tg3_rx_prodring_set *dpr,
6735 			   u32 opaque_key, int src_idx,
6736 			   u32 dest_idx_unmasked)
6737 {
6738 	struct tg3 *tp = tnapi->tp;
6739 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6740 	struct ring_info *src_map, *dest_map;
6741 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6742 	int dest_idx;
6743 
6744 	switch (opaque_key) {
6745 	case RXD_OPAQUE_RING_STD:
6746 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6747 		dest_desc = &dpr->rx_std[dest_idx];
6748 		dest_map = &dpr->rx_std_buffers[dest_idx];
6749 		src_desc = &spr->rx_std[src_idx];
6750 		src_map = &spr->rx_std_buffers[src_idx];
6751 		break;
6752 
6753 	case RXD_OPAQUE_RING_JUMBO:
6754 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6755 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6756 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6757 		src_desc = &spr->rx_jmb[src_idx].std;
6758 		src_map = &spr->rx_jmb_buffers[src_idx];
6759 		break;
6760 
6761 	default:
6762 		return;
6763 	}
6764 
6765 	dest_map->data = src_map->data;
6766 	dma_unmap_addr_set(dest_map, mapping,
6767 			   dma_unmap_addr(src_map, mapping));
6768 	dest_desc->addr_hi = src_desc->addr_hi;
6769 	dest_desc->addr_lo = src_desc->addr_lo;
6770 
6771 	/* Ensure that the update to the skb happens after the physical
6772 	 * addresses have been transferred to the new BD location.
6773 	 */
6774 	smp_wmb();
6775 
6776 	src_map->data = NULL;
6777 }
6778 
6779 /* The RX ring scheme is composed of multiple rings which post fresh
6780  * buffers to the chip, and one special ring the chip uses to report
6781  * status back to the host.
6782  *
6783  * The special ring reports the status of received packets to the
6784  * host.  The chip does not write into the original descriptor the
6785  * RX buffer was obtained from.  The chip simply takes the original
6786  * descriptor as provided by the host, updates the status and length
6787  * field, then writes this into the next status ring entry.
6788  *
6789  * Each ring the host uses to post buffers to the chip is described
6790  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6791  * it is first placed into the on-chip ram.  When the packet's length
6792  * is known, it walks down the TG3_BDINFO entries to select the ring.
6793  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6794  * which is within the range of the new packet's length is chosen.
6795  *
6796  * The "separate ring for rx status" scheme may sound queer, but it makes
6797  * sense from a cache coherency perspective.  If only the host writes
6798  * to the buffer post rings, and only the chip writes to the rx status
6799  * rings, then cache lines never move beyond shared-modified state.
6800  * If both the host and chip were to write into the same ring, cache line
6801  * eviction could occur since both entities want it in an exclusive state.
6802  */
6803 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6804 {
6805 	struct tg3 *tp = tnapi->tp;
6806 	u32 work_mask, rx_std_posted = 0;
6807 	u32 std_prod_idx, jmb_prod_idx;
6808 	u32 sw_idx = tnapi->rx_rcb_ptr;
6809 	u16 hw_idx;
6810 	int received;
6811 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6812 
6813 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6814 	/*
6815 	 * We need to order the read of hw_idx and the read of
6816 	 * the opaque cookie.
6817 	 */
6818 	rmb();
6819 	work_mask = 0;
6820 	received = 0;
6821 	std_prod_idx = tpr->rx_std_prod_idx;
6822 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6823 	while (sw_idx != hw_idx && budget > 0) {
6824 		struct ring_info *ri;
6825 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6826 		unsigned int len;
6827 		struct sk_buff *skb;
6828 		dma_addr_t dma_addr;
6829 		u32 opaque_key, desc_idx, *post_ptr;
6830 		u8 *data;
6831 		u64 tstamp = 0;
6832 
6833 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6834 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6835 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6836 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6837 			dma_addr = dma_unmap_addr(ri, mapping);
6838 			data = ri->data;
6839 			post_ptr = &std_prod_idx;
6840 			rx_std_posted++;
6841 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6842 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6843 			dma_addr = dma_unmap_addr(ri, mapping);
6844 			data = ri->data;
6845 			post_ptr = &jmb_prod_idx;
6846 		} else
6847 			goto next_pkt_nopost;
6848 
6849 		work_mask |= opaque_key;
6850 
6851 		if (desc->err_vlan & RXD_ERR_MASK) {
6852 		drop_it:
6853 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6854 				       desc_idx, *post_ptr);
6855 		drop_it_no_recycle:
6856 			/* Other statistics kept track of by card. */
6857 			tp->rx_dropped++;
6858 			goto next_pkt;
6859 		}
6860 
6861 		prefetch(data + TG3_RX_OFFSET(tp));
6862 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6863 		      ETH_FCS_LEN;
6864 
6865 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6866 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6867 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6868 		     RXD_FLAG_PTPSTAT_PTPV2) {
6869 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6870 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6871 		}
6872 
6873 		if (len > TG3_RX_COPY_THRESH(tp)) {
6874 			int skb_size;
6875 			unsigned int frag_size;
6876 
6877 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6878 						    *post_ptr, &frag_size);
6879 			if (skb_size < 0)
6880 				goto drop_it;
6881 
6882 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
6883 					 PCI_DMA_FROMDEVICE);
6884 
6885 			/* Ensure that the update to the data happens
6886 			 * after the usage of the old DMA mapping.
6887 			 */
6888 			smp_wmb();
6889 
6890 			ri->data = NULL;
6891 
6892 			skb = build_skb(data, frag_size);
6893 			if (!skb) {
6894 				tg3_frag_free(frag_size != 0, data);
6895 				goto drop_it_no_recycle;
6896 			}
6897 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6898 		} else {
6899 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6900 				       desc_idx, *post_ptr);
6901 
6902 			skb = netdev_alloc_skb(tp->dev,
6903 					       len + TG3_RAW_IP_ALIGN);
6904 			if (skb == NULL)
6905 				goto drop_it_no_recycle;
6906 
6907 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6908 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6909 			memcpy(skb->data,
6910 			       data + TG3_RX_OFFSET(tp),
6911 			       len);
6912 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6913 		}
6914 
6915 		skb_put(skb, len);
6916 		if (tstamp)
6917 			tg3_hwclock_to_timestamp(tp, tstamp,
6918 						 skb_hwtstamps(skb));
6919 
6920 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6921 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6922 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6923 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6924 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6925 		else
6926 			skb_checksum_none_assert(skb);
6927 
6928 		skb->protocol = eth_type_trans(skb, tp->dev);
6929 
6930 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6931 		    skb->protocol != htons(ETH_P_8021Q) &&
6932 		    skb->protocol != htons(ETH_P_8021AD)) {
6933 			dev_kfree_skb_any(skb);
6934 			goto drop_it_no_recycle;
6935 		}
6936 
6937 		if (desc->type_flags & RXD_FLAG_VLAN &&
6938 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6939 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6940 					       desc->err_vlan & RXD_VLAN_MASK);
6941 
6942 		napi_gro_receive(&tnapi->napi, skb);
6943 
6944 		received++;
6945 		budget--;
6946 
6947 next_pkt:
6948 		(*post_ptr)++;
6949 
6950 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6951 			tpr->rx_std_prod_idx = std_prod_idx &
6952 					       tp->rx_std_ring_mask;
6953 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6954 				     tpr->rx_std_prod_idx);
6955 			work_mask &= ~RXD_OPAQUE_RING_STD;
6956 			rx_std_posted = 0;
6957 		}
6958 next_pkt_nopost:
6959 		sw_idx++;
6960 		sw_idx &= tp->rx_ret_ring_mask;
6961 
6962 		/* Refresh hw_idx to see if there is new work */
6963 		if (sw_idx == hw_idx) {
6964 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6965 			rmb();
6966 		}
6967 	}
6968 
6969 	/* ACK the status ring. */
6970 	tnapi->rx_rcb_ptr = sw_idx;
6971 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6972 
6973 	/* Refill RX ring(s). */
6974 	if (!tg3_flag(tp, ENABLE_RSS)) {
6975 		/* Sync BD data before updating mailbox */
6976 		wmb();
6977 
6978 		if (work_mask & RXD_OPAQUE_RING_STD) {
6979 			tpr->rx_std_prod_idx = std_prod_idx &
6980 					       tp->rx_std_ring_mask;
6981 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6982 				     tpr->rx_std_prod_idx);
6983 		}
6984 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6985 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6986 					       tp->rx_jmb_ring_mask;
6987 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6988 				     tpr->rx_jmb_prod_idx);
6989 		}
6990 		mmiowb();
6991 	} else if (work_mask) {
6992 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6993 		 * updated before the producer indices can be updated.
6994 		 */
6995 		smp_wmb();
6996 
6997 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6998 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6999 
7000 		if (tnapi != &tp->napi[1]) {
7001 			tp->rx_refill = true;
7002 			napi_schedule(&tp->napi[1].napi);
7003 		}
7004 	}
7005 
7006 	return received;
7007 }
7008 
7009 static void tg3_poll_link(struct tg3 *tp)
7010 {
7011 	/* handle link change and other phy events */
7012 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7013 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7014 
7015 		if (sblk->status & SD_STATUS_LINK_CHG) {
7016 			sblk->status = SD_STATUS_UPDATED |
7017 				       (sblk->status & ~SD_STATUS_LINK_CHG);
7018 			spin_lock(&tp->lock);
7019 			if (tg3_flag(tp, USE_PHYLIB)) {
7020 				tw32_f(MAC_STATUS,
7021 				     (MAC_STATUS_SYNC_CHANGED |
7022 				      MAC_STATUS_CFG_CHANGED |
7023 				      MAC_STATUS_MI_COMPLETION |
7024 				      MAC_STATUS_LNKSTATE_CHANGED));
7025 				udelay(40);
7026 			} else
7027 				tg3_setup_phy(tp, false);
7028 			spin_unlock(&tp->lock);
7029 		}
7030 	}
7031 }
7032 
7033 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7034 				struct tg3_rx_prodring_set *dpr,
7035 				struct tg3_rx_prodring_set *spr)
7036 {
7037 	u32 si, di, cpycnt, src_prod_idx;
7038 	int i, err = 0;
7039 
7040 	while (1) {
7041 		src_prod_idx = spr->rx_std_prod_idx;
7042 
7043 		/* Make sure updates to the rx_std_buffers[] entries and the
7044 		 * standard producer index are seen in the correct order.
7045 		 */
7046 		smp_rmb();
7047 
7048 		if (spr->rx_std_cons_idx == src_prod_idx)
7049 			break;
7050 
7051 		if (spr->rx_std_cons_idx < src_prod_idx)
7052 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7053 		else
7054 			cpycnt = tp->rx_std_ring_mask + 1 -
7055 				 spr->rx_std_cons_idx;
7056 
7057 		cpycnt = min(cpycnt,
7058 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7059 
7060 		si = spr->rx_std_cons_idx;
7061 		di = dpr->rx_std_prod_idx;
7062 
7063 		for (i = di; i < di + cpycnt; i++) {
7064 			if (dpr->rx_std_buffers[i].data) {
7065 				cpycnt = i - di;
7066 				err = -ENOSPC;
7067 				break;
7068 			}
7069 		}
7070 
7071 		if (!cpycnt)
7072 			break;
7073 
7074 		/* Ensure that updates to the rx_std_buffers ring and the
7075 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7076 		 * ordered correctly WRT the skb check above.
7077 		 */
7078 		smp_rmb();
7079 
7080 		memcpy(&dpr->rx_std_buffers[di],
7081 		       &spr->rx_std_buffers[si],
7082 		       cpycnt * sizeof(struct ring_info));
7083 
7084 		for (i = 0; i < cpycnt; i++, di++, si++) {
7085 			struct tg3_rx_buffer_desc *sbd, *dbd;
7086 			sbd = &spr->rx_std[si];
7087 			dbd = &dpr->rx_std[di];
7088 			dbd->addr_hi = sbd->addr_hi;
7089 			dbd->addr_lo = sbd->addr_lo;
7090 		}
7091 
7092 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7093 				       tp->rx_std_ring_mask;
7094 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7095 				       tp->rx_std_ring_mask;
7096 	}
7097 
7098 	while (1) {
7099 		src_prod_idx = spr->rx_jmb_prod_idx;
7100 
7101 		/* Make sure updates to the rx_jmb_buffers[] entries and
7102 		 * the jumbo producer index are seen in the correct order.
7103 		 */
7104 		smp_rmb();
7105 
7106 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7107 			break;
7108 
7109 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7110 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7111 		else
7112 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7113 				 spr->rx_jmb_cons_idx;
7114 
7115 		cpycnt = min(cpycnt,
7116 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7117 
7118 		si = spr->rx_jmb_cons_idx;
7119 		di = dpr->rx_jmb_prod_idx;
7120 
7121 		for (i = di; i < di + cpycnt; i++) {
7122 			if (dpr->rx_jmb_buffers[i].data) {
7123 				cpycnt = i - di;
7124 				err = -ENOSPC;
7125 				break;
7126 			}
7127 		}
7128 
7129 		if (!cpycnt)
7130 			break;
7131 
7132 		/* Ensure that updates to the rx_jmb_buffers ring and the
7133 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7134 		 * ordered correctly WRT the skb check above.
7135 		 */
7136 		smp_rmb();
7137 
7138 		memcpy(&dpr->rx_jmb_buffers[di],
7139 		       &spr->rx_jmb_buffers[si],
7140 		       cpycnt * sizeof(struct ring_info));
7141 
7142 		for (i = 0; i < cpycnt; i++, di++, si++) {
7143 			struct tg3_rx_buffer_desc *sbd, *dbd;
7144 			sbd = &spr->rx_jmb[si].std;
7145 			dbd = &dpr->rx_jmb[di].std;
7146 			dbd->addr_hi = sbd->addr_hi;
7147 			dbd->addr_lo = sbd->addr_lo;
7148 		}
7149 
7150 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7151 				       tp->rx_jmb_ring_mask;
7152 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7153 				       tp->rx_jmb_ring_mask;
7154 	}
7155 
7156 	return err;
7157 }
7158 
7159 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7160 {
7161 	struct tg3 *tp = tnapi->tp;
7162 
7163 	/* run TX completion thread */
7164 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7165 		tg3_tx(tnapi);
7166 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7167 			return work_done;
7168 	}
7169 
7170 	if (!tnapi->rx_rcb_prod_idx)
7171 		return work_done;
7172 
7173 	/* run RX thread, within the bounds set by NAPI.
7174 	 * All RX "locking" is done by ensuring outside
7175 	 * code synchronizes with tg3->napi.poll()
7176 	 */
7177 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7178 		work_done += tg3_rx(tnapi, budget - work_done);
7179 
7180 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7181 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7182 		int i, err = 0;
7183 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7184 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7185 
7186 		tp->rx_refill = false;
7187 		for (i = 1; i <= tp->rxq_cnt; i++)
7188 			err |= tg3_rx_prodring_xfer(tp, dpr,
7189 						    &tp->napi[i].prodring);
7190 
7191 		wmb();
7192 
7193 		if (std_prod_idx != dpr->rx_std_prod_idx)
7194 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7195 				     dpr->rx_std_prod_idx);
7196 
7197 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7198 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7199 				     dpr->rx_jmb_prod_idx);
7200 
7201 		mmiowb();
7202 
7203 		if (err)
7204 			tw32_f(HOSTCC_MODE, tp->coal_now);
7205 	}
7206 
7207 	return work_done;
7208 }
7209 
7210 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7211 {
7212 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7213 		schedule_work(&tp->reset_task);
7214 }
7215 
7216 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7217 {
7218 	cancel_work_sync(&tp->reset_task);
7219 	tg3_flag_clear(tp, RESET_TASK_PENDING);
7220 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7221 }
7222 
7223 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7224 {
7225 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7226 	struct tg3 *tp = tnapi->tp;
7227 	int work_done = 0;
7228 	struct tg3_hw_status *sblk = tnapi->hw_status;
7229 
7230 	while (1) {
7231 		work_done = tg3_poll_work(tnapi, work_done, budget);
7232 
7233 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7234 			goto tx_recovery;
7235 
7236 		if (unlikely(work_done >= budget))
7237 			break;
7238 
7239 		/* tp->last_tag is used in tg3_int_reenable() below
7240 		 * to tell the hw how much work has been processed,
7241 		 * so we must read it before checking for more work.
7242 		 */
7243 		tnapi->last_tag = sblk->status_tag;
7244 		tnapi->last_irq_tag = tnapi->last_tag;
7245 		rmb();
7246 
7247 		/* check for RX/TX work to do */
7248 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7249 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7250 
7251 			/* This test here is not race free, but will reduce
7252 			 * the number of interrupts by looping again.
7253 			 */
7254 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7255 				continue;
7256 
7257 			napi_complete_done(napi, work_done);
7258 			/* Reenable interrupts. */
7259 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7260 
7261 			/* This test here is synchronized by napi_schedule()
7262 			 * and napi_complete() to close the race condition.
7263 			 */
7264 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7265 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7266 						  HOSTCC_MODE_ENABLE |
7267 						  tnapi->coal_now);
7268 			}
7269 			mmiowb();
7270 			break;
7271 		}
7272 	}
7273 
7274 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7275 	return work_done;
7276 
7277 tx_recovery:
7278 	/* work_done is guaranteed to be less than budget. */
7279 	napi_complete(napi);
7280 	tg3_reset_task_schedule(tp);
7281 	return work_done;
7282 }
7283 
7284 static void tg3_process_error(struct tg3 *tp)
7285 {
7286 	u32 val;
7287 	bool real_error = false;
7288 
7289 	if (tg3_flag(tp, ERROR_PROCESSED))
7290 		return;
7291 
7292 	/* Check Flow Attention register */
7293 	val = tr32(HOSTCC_FLOW_ATTN);
7294 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7295 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7296 		real_error = true;
7297 	}
7298 
7299 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7300 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7301 		real_error = true;
7302 	}
7303 
7304 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7305 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7306 		real_error = true;
7307 	}
7308 
7309 	if (!real_error)
7310 		return;
7311 
7312 	tg3_dump_state(tp);
7313 
7314 	tg3_flag_set(tp, ERROR_PROCESSED);
7315 	tg3_reset_task_schedule(tp);
7316 }
7317 
7318 static int tg3_poll(struct napi_struct *napi, int budget)
7319 {
7320 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7321 	struct tg3 *tp = tnapi->tp;
7322 	int work_done = 0;
7323 	struct tg3_hw_status *sblk = tnapi->hw_status;
7324 
7325 	while (1) {
7326 		if (sblk->status & SD_STATUS_ERROR)
7327 			tg3_process_error(tp);
7328 
7329 		tg3_poll_link(tp);
7330 
7331 		work_done = tg3_poll_work(tnapi, work_done, budget);
7332 
7333 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7334 			goto tx_recovery;
7335 
7336 		if (unlikely(work_done >= budget))
7337 			break;
7338 
7339 		if (tg3_flag(tp, TAGGED_STATUS)) {
7340 			/* tp->last_tag is used in tg3_int_reenable() below
7341 			 * to tell the hw how much work has been processed,
7342 			 * so we must read it before checking for more work.
7343 			 */
7344 			tnapi->last_tag = sblk->status_tag;
7345 			tnapi->last_irq_tag = tnapi->last_tag;
7346 			rmb();
7347 		} else
7348 			sblk->status &= ~SD_STATUS_UPDATED;
7349 
7350 		if (likely(!tg3_has_work(tnapi))) {
7351 			napi_complete_done(napi, work_done);
7352 			tg3_int_reenable(tnapi);
7353 			break;
7354 		}
7355 	}
7356 
7357 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7358 	return work_done;
7359 
7360 tx_recovery:
7361 	/* work_done is guaranteed to be less than budget. */
7362 	napi_complete(napi);
7363 	tg3_reset_task_schedule(tp);
7364 	return work_done;
7365 }
7366 
7367 static void tg3_napi_disable(struct tg3 *tp)
7368 {
7369 	int i;
7370 
7371 	for (i = tp->irq_cnt - 1; i >= 0; i--)
7372 		napi_disable(&tp->napi[i].napi);
7373 }
7374 
7375 static void tg3_napi_enable(struct tg3 *tp)
7376 {
7377 	int i;
7378 
7379 	for (i = 0; i < tp->irq_cnt; i++)
7380 		napi_enable(&tp->napi[i].napi);
7381 }
7382 
7383 static void tg3_napi_init(struct tg3 *tp)
7384 {
7385 	int i;
7386 
7387 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7388 	for (i = 1; i < tp->irq_cnt; i++)
7389 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7390 }
7391 
7392 static void tg3_napi_fini(struct tg3 *tp)
7393 {
7394 	int i;
7395 
7396 	for (i = 0; i < tp->irq_cnt; i++)
7397 		netif_napi_del(&tp->napi[i].napi);
7398 }
7399 
7400 static inline void tg3_netif_stop(struct tg3 *tp)
7401 {
7402 	netif_trans_update(tp->dev);	/* prevent tx timeout */
7403 	tg3_napi_disable(tp);
7404 	netif_carrier_off(tp->dev);
7405 	netif_tx_disable(tp->dev);
7406 }
7407 
7408 /* tp->lock must be held */
7409 static inline void tg3_netif_start(struct tg3 *tp)
7410 {
7411 	tg3_ptp_resume(tp);
7412 
7413 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7414 	 * appropriate so long as all callers are assured to
7415 	 * have free tx slots (such as after tg3_init_hw)
7416 	 */
7417 	netif_tx_wake_all_queues(tp->dev);
7418 
7419 	if (tp->link_up)
7420 		netif_carrier_on(tp->dev);
7421 
7422 	tg3_napi_enable(tp);
7423 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7424 	tg3_enable_ints(tp);
7425 }
7426 
7427 static void tg3_irq_quiesce(struct tg3 *tp)
7428 	__releases(tp->lock)
7429 	__acquires(tp->lock)
7430 {
7431 	int i;
7432 
7433 	BUG_ON(tp->irq_sync);
7434 
7435 	tp->irq_sync = 1;
7436 	smp_mb();
7437 
7438 	spin_unlock_bh(&tp->lock);
7439 
7440 	for (i = 0; i < tp->irq_cnt; i++)
7441 		synchronize_irq(tp->napi[i].irq_vec);
7442 
7443 	spin_lock_bh(&tp->lock);
7444 }
7445 
7446 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7447  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7448  * with as well.  Most of the time, this is not necessary except when
7449  * shutting down the device.
7450  */
7451 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7452 {
7453 	spin_lock_bh(&tp->lock);
7454 	if (irq_sync)
7455 		tg3_irq_quiesce(tp);
7456 }
7457 
7458 static inline void tg3_full_unlock(struct tg3 *tp)
7459 {
7460 	spin_unlock_bh(&tp->lock);
7461 }
7462 
7463 /* One-shot MSI handler - Chip automatically disables interrupt
7464  * after sending MSI so driver doesn't have to do it.
7465  */
7466 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7467 {
7468 	struct tg3_napi *tnapi = dev_id;
7469 	struct tg3 *tp = tnapi->tp;
7470 
7471 	prefetch(tnapi->hw_status);
7472 	if (tnapi->rx_rcb)
7473 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7474 
7475 	if (likely(!tg3_irq_sync(tp)))
7476 		napi_schedule(&tnapi->napi);
7477 
7478 	return IRQ_HANDLED;
7479 }
7480 
7481 /* MSI ISR - No need to check for interrupt sharing and no need to
7482  * flush status block and interrupt mailbox. PCI ordering rules
7483  * guarantee that MSI will arrive after the status block.
7484  */
7485 static irqreturn_t tg3_msi(int irq, void *dev_id)
7486 {
7487 	struct tg3_napi *tnapi = dev_id;
7488 	struct tg3 *tp = tnapi->tp;
7489 
7490 	prefetch(tnapi->hw_status);
7491 	if (tnapi->rx_rcb)
7492 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7493 	/*
7494 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7495 	 * chip-internal interrupt pending events.
7496 	 * Writing non-zero to intr-mbox-0 additional tells the
7497 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7498 	 * event coalescing.
7499 	 */
7500 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7501 	if (likely(!tg3_irq_sync(tp)))
7502 		napi_schedule(&tnapi->napi);
7503 
7504 	return IRQ_RETVAL(1);
7505 }
7506 
7507 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7508 {
7509 	struct tg3_napi *tnapi = dev_id;
7510 	struct tg3 *tp = tnapi->tp;
7511 	struct tg3_hw_status *sblk = tnapi->hw_status;
7512 	unsigned int handled = 1;
7513 
7514 	/* In INTx mode, it is possible for the interrupt to arrive at
7515 	 * the CPU before the status block posted prior to the interrupt.
7516 	 * Reading the PCI State register will confirm whether the
7517 	 * interrupt is ours and will flush the status block.
7518 	 */
7519 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7520 		if (tg3_flag(tp, CHIP_RESETTING) ||
7521 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7522 			handled = 0;
7523 			goto out;
7524 		}
7525 	}
7526 
7527 	/*
7528 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7529 	 * chip-internal interrupt pending events.
7530 	 * Writing non-zero to intr-mbox-0 additional tells the
7531 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7532 	 * event coalescing.
7533 	 *
7534 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7535 	 * spurious interrupts.  The flush impacts performance but
7536 	 * excessive spurious interrupts can be worse in some cases.
7537 	 */
7538 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7539 	if (tg3_irq_sync(tp))
7540 		goto out;
7541 	sblk->status &= ~SD_STATUS_UPDATED;
7542 	if (likely(tg3_has_work(tnapi))) {
7543 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7544 		napi_schedule(&tnapi->napi);
7545 	} else {
7546 		/* No work, shared interrupt perhaps?  re-enable
7547 		 * interrupts, and flush that PCI write
7548 		 */
7549 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7550 			       0x00000000);
7551 	}
7552 out:
7553 	return IRQ_RETVAL(handled);
7554 }
7555 
7556 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7557 {
7558 	struct tg3_napi *tnapi = dev_id;
7559 	struct tg3 *tp = tnapi->tp;
7560 	struct tg3_hw_status *sblk = tnapi->hw_status;
7561 	unsigned int handled = 1;
7562 
7563 	/* In INTx mode, it is possible for the interrupt to arrive at
7564 	 * the CPU before the status block posted prior to the interrupt.
7565 	 * Reading the PCI State register will confirm whether the
7566 	 * interrupt is ours and will flush the status block.
7567 	 */
7568 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7569 		if (tg3_flag(tp, CHIP_RESETTING) ||
7570 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7571 			handled = 0;
7572 			goto out;
7573 		}
7574 	}
7575 
7576 	/*
7577 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7578 	 * chip-internal interrupt pending events.
7579 	 * writing non-zero to intr-mbox-0 additional tells the
7580 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7581 	 * event coalescing.
7582 	 *
7583 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7584 	 * spurious interrupts.  The flush impacts performance but
7585 	 * excessive spurious interrupts can be worse in some cases.
7586 	 */
7587 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7588 
7589 	/*
7590 	 * In a shared interrupt configuration, sometimes other devices'
7591 	 * interrupts will scream.  We record the current status tag here
7592 	 * so that the above check can report that the screaming interrupts
7593 	 * are unhandled.  Eventually they will be silenced.
7594 	 */
7595 	tnapi->last_irq_tag = sblk->status_tag;
7596 
7597 	if (tg3_irq_sync(tp))
7598 		goto out;
7599 
7600 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7601 
7602 	napi_schedule(&tnapi->napi);
7603 
7604 out:
7605 	return IRQ_RETVAL(handled);
7606 }
7607 
7608 /* ISR for interrupt test */
7609 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7610 {
7611 	struct tg3_napi *tnapi = dev_id;
7612 	struct tg3 *tp = tnapi->tp;
7613 	struct tg3_hw_status *sblk = tnapi->hw_status;
7614 
7615 	if ((sblk->status & SD_STATUS_UPDATED) ||
7616 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7617 		tg3_disable_ints(tp);
7618 		return IRQ_RETVAL(1);
7619 	}
7620 	return IRQ_RETVAL(0);
7621 }
7622 
7623 #ifdef CONFIG_NET_POLL_CONTROLLER
7624 static void tg3_poll_controller(struct net_device *dev)
7625 {
7626 	int i;
7627 	struct tg3 *tp = netdev_priv(dev);
7628 
7629 	if (tg3_irq_sync(tp))
7630 		return;
7631 
7632 	for (i = 0; i < tp->irq_cnt; i++)
7633 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7634 }
7635 #endif
7636 
7637 static void tg3_tx_timeout(struct net_device *dev)
7638 {
7639 	struct tg3 *tp = netdev_priv(dev);
7640 
7641 	if (netif_msg_tx_err(tp)) {
7642 		netdev_err(dev, "transmit timed out, resetting\n");
7643 		tg3_dump_state(tp);
7644 	}
7645 
7646 	tg3_reset_task_schedule(tp);
7647 }
7648 
7649 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7650 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7651 {
7652 	u32 base = (u32) mapping & 0xffffffff;
7653 
7654 	return base + len + 8 < base;
7655 }
7656 
7657 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7658  * of any 4GB boundaries: 4G, 8G, etc
7659  */
7660 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7661 					   u32 len, u32 mss)
7662 {
7663 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7664 		u32 base = (u32) mapping & 0xffffffff;
7665 
7666 		return ((base + len + (mss & 0x3fff)) < base);
7667 	}
7668 	return 0;
7669 }
7670 
7671 /* Test for DMA addresses > 40-bit */
7672 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7673 					  int len)
7674 {
7675 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7676 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7677 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7678 	return 0;
7679 #else
7680 	return 0;
7681 #endif
7682 }
7683 
7684 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7685 				 dma_addr_t mapping, u32 len, u32 flags,
7686 				 u32 mss, u32 vlan)
7687 {
7688 	txbd->addr_hi = ((u64) mapping >> 32);
7689 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7690 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7691 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7692 }
7693 
7694 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7695 			    dma_addr_t map, u32 len, u32 flags,
7696 			    u32 mss, u32 vlan)
7697 {
7698 	struct tg3 *tp = tnapi->tp;
7699 	bool hwbug = false;
7700 
7701 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7702 		hwbug = true;
7703 
7704 	if (tg3_4g_overflow_test(map, len))
7705 		hwbug = true;
7706 
7707 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7708 		hwbug = true;
7709 
7710 	if (tg3_40bit_overflow_test(tp, map, len))
7711 		hwbug = true;
7712 
7713 	if (tp->dma_limit) {
7714 		u32 prvidx = *entry;
7715 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7716 		while (len > tp->dma_limit && *budget) {
7717 			u32 frag_len = tp->dma_limit;
7718 			len -= tp->dma_limit;
7719 
7720 			/* Avoid the 8byte DMA problem */
7721 			if (len <= 8) {
7722 				len += tp->dma_limit / 2;
7723 				frag_len = tp->dma_limit / 2;
7724 			}
7725 
7726 			tnapi->tx_buffers[*entry].fragmented = true;
7727 
7728 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7729 				      frag_len, tmp_flag, mss, vlan);
7730 			*budget -= 1;
7731 			prvidx = *entry;
7732 			*entry = NEXT_TX(*entry);
7733 
7734 			map += frag_len;
7735 		}
7736 
7737 		if (len) {
7738 			if (*budget) {
7739 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7740 					      len, flags, mss, vlan);
7741 				*budget -= 1;
7742 				*entry = NEXT_TX(*entry);
7743 			} else {
7744 				hwbug = true;
7745 				tnapi->tx_buffers[prvidx].fragmented = false;
7746 			}
7747 		}
7748 	} else {
7749 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7750 			      len, flags, mss, vlan);
7751 		*entry = NEXT_TX(*entry);
7752 	}
7753 
7754 	return hwbug;
7755 }
7756 
7757 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7758 {
7759 	int i;
7760 	struct sk_buff *skb;
7761 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7762 
7763 	skb = txb->skb;
7764 	txb->skb = NULL;
7765 
7766 	pci_unmap_single(tnapi->tp->pdev,
7767 			 dma_unmap_addr(txb, mapping),
7768 			 skb_headlen(skb),
7769 			 PCI_DMA_TODEVICE);
7770 
7771 	while (txb->fragmented) {
7772 		txb->fragmented = false;
7773 		entry = NEXT_TX(entry);
7774 		txb = &tnapi->tx_buffers[entry];
7775 	}
7776 
7777 	for (i = 0; i <= last; i++) {
7778 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7779 
7780 		entry = NEXT_TX(entry);
7781 		txb = &tnapi->tx_buffers[entry];
7782 
7783 		pci_unmap_page(tnapi->tp->pdev,
7784 			       dma_unmap_addr(txb, mapping),
7785 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
7786 
7787 		while (txb->fragmented) {
7788 			txb->fragmented = false;
7789 			entry = NEXT_TX(entry);
7790 			txb = &tnapi->tx_buffers[entry];
7791 		}
7792 	}
7793 }
7794 
7795 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7796 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7797 				       struct sk_buff **pskb,
7798 				       u32 *entry, u32 *budget,
7799 				       u32 base_flags, u32 mss, u32 vlan)
7800 {
7801 	struct tg3 *tp = tnapi->tp;
7802 	struct sk_buff *new_skb, *skb = *pskb;
7803 	dma_addr_t new_addr = 0;
7804 	int ret = 0;
7805 
7806 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7807 		new_skb = skb_copy(skb, GFP_ATOMIC);
7808 	else {
7809 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7810 
7811 		new_skb = skb_copy_expand(skb,
7812 					  skb_headroom(skb) + more_headroom,
7813 					  skb_tailroom(skb), GFP_ATOMIC);
7814 	}
7815 
7816 	if (!new_skb) {
7817 		ret = -1;
7818 	} else {
7819 		/* New SKB is guaranteed to be linear. */
7820 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7821 					  PCI_DMA_TODEVICE);
7822 		/* Make sure the mapping succeeded */
7823 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7824 			dev_kfree_skb_any(new_skb);
7825 			ret = -1;
7826 		} else {
7827 			u32 save_entry = *entry;
7828 
7829 			base_flags |= TXD_FLAG_END;
7830 
7831 			tnapi->tx_buffers[*entry].skb = new_skb;
7832 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7833 					   mapping, new_addr);
7834 
7835 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7836 					    new_skb->len, base_flags,
7837 					    mss, vlan)) {
7838 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7839 				dev_kfree_skb_any(new_skb);
7840 				ret = -1;
7841 			}
7842 		}
7843 	}
7844 
7845 	dev_consume_skb_any(skb);
7846 	*pskb = new_skb;
7847 	return ret;
7848 }
7849 
7850 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7851 {
7852 	/* Check if we will never have enough descriptors,
7853 	 * as gso_segs can be more than current ring size
7854 	 */
7855 	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7856 }
7857 
7858 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7859 
7860 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7861  * indicated in tg3_tx_frag_set()
7862  */
7863 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7864 		       struct netdev_queue *txq, struct sk_buff *skb)
7865 {
7866 	struct sk_buff *segs, *nskb;
7867 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7868 
7869 	/* Estimate the number of fragments in the worst case */
7870 	if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7871 		netif_tx_stop_queue(txq);
7872 
7873 		/* netif_tx_stop_queue() must be done before checking
7874 		 * checking tx index in tg3_tx_avail() below, because in
7875 		 * tg3_tx(), we update tx index before checking for
7876 		 * netif_tx_queue_stopped().
7877 		 */
7878 		smp_mb();
7879 		if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7880 			return NETDEV_TX_BUSY;
7881 
7882 		netif_tx_wake_queue(txq);
7883 	}
7884 
7885 	segs = skb_gso_segment(skb, tp->dev->features &
7886 				    ~(NETIF_F_TSO | NETIF_F_TSO6));
7887 	if (IS_ERR(segs) || !segs)
7888 		goto tg3_tso_bug_end;
7889 
7890 	do {
7891 		nskb = segs;
7892 		segs = segs->next;
7893 		nskb->next = NULL;
7894 		tg3_start_xmit(nskb, tp->dev);
7895 	} while (segs);
7896 
7897 tg3_tso_bug_end:
7898 	dev_consume_skb_any(skb);
7899 
7900 	return NETDEV_TX_OK;
7901 }
7902 
7903 /* hard_start_xmit for all devices */
7904 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7905 {
7906 	struct tg3 *tp = netdev_priv(dev);
7907 	u32 len, entry, base_flags, mss, vlan = 0;
7908 	u32 budget;
7909 	int i = -1, would_hit_hwbug;
7910 	dma_addr_t mapping;
7911 	struct tg3_napi *tnapi;
7912 	struct netdev_queue *txq;
7913 	unsigned int last;
7914 	struct iphdr *iph = NULL;
7915 	struct tcphdr *tcph = NULL;
7916 	__sum16 tcp_csum = 0, ip_csum = 0;
7917 	__be16 ip_tot_len = 0;
7918 
7919 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7920 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7921 	if (tg3_flag(tp, ENABLE_TSS))
7922 		tnapi++;
7923 
7924 	budget = tg3_tx_avail(tnapi);
7925 
7926 	/* We are running in BH disabled context with netif_tx_lock
7927 	 * and TX reclaim runs via tp->napi.poll inside of a software
7928 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7929 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7930 	 */
7931 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7932 		if (!netif_tx_queue_stopped(txq)) {
7933 			netif_tx_stop_queue(txq);
7934 
7935 			/* This is a hard error, log it. */
7936 			netdev_err(dev,
7937 				   "BUG! Tx Ring full when queue awake!\n");
7938 		}
7939 		return NETDEV_TX_BUSY;
7940 	}
7941 
7942 	entry = tnapi->tx_prod;
7943 	base_flags = 0;
7944 
7945 	mss = skb_shinfo(skb)->gso_size;
7946 	if (mss) {
7947 		u32 tcp_opt_len, hdr_len;
7948 
7949 		if (skb_cow_head(skb, 0))
7950 			goto drop;
7951 
7952 		iph = ip_hdr(skb);
7953 		tcp_opt_len = tcp_optlen(skb);
7954 
7955 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7956 
7957 		/* HW/FW can not correctly segment packets that have been
7958 		 * vlan encapsulated.
7959 		 */
7960 		if (skb->protocol == htons(ETH_P_8021Q) ||
7961 		    skb->protocol == htons(ETH_P_8021AD)) {
7962 			if (tg3_tso_bug_gso_check(tnapi, skb))
7963 				return tg3_tso_bug(tp, tnapi, txq, skb);
7964 			goto drop;
7965 		}
7966 
7967 		if (!skb_is_gso_v6(skb)) {
7968 			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7969 			    tg3_flag(tp, TSO_BUG)) {
7970 				if (tg3_tso_bug_gso_check(tnapi, skb))
7971 					return tg3_tso_bug(tp, tnapi, txq, skb);
7972 				goto drop;
7973 			}
7974 			ip_csum = iph->check;
7975 			ip_tot_len = iph->tot_len;
7976 			iph->check = 0;
7977 			iph->tot_len = htons(mss + hdr_len);
7978 		}
7979 
7980 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7981 			       TXD_FLAG_CPU_POST_DMA);
7982 
7983 		tcph = tcp_hdr(skb);
7984 		tcp_csum = tcph->check;
7985 
7986 		if (tg3_flag(tp, HW_TSO_1) ||
7987 		    tg3_flag(tp, HW_TSO_2) ||
7988 		    tg3_flag(tp, HW_TSO_3)) {
7989 			tcph->check = 0;
7990 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7991 		} else {
7992 			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7993 							 0, IPPROTO_TCP, 0);
7994 		}
7995 
7996 		if (tg3_flag(tp, HW_TSO_3)) {
7997 			mss |= (hdr_len & 0xc) << 12;
7998 			if (hdr_len & 0x10)
7999 				base_flags |= 0x00000010;
8000 			base_flags |= (hdr_len & 0x3e0) << 5;
8001 		} else if (tg3_flag(tp, HW_TSO_2))
8002 			mss |= hdr_len << 9;
8003 		else if (tg3_flag(tp, HW_TSO_1) ||
8004 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
8005 			if (tcp_opt_len || iph->ihl > 5) {
8006 				int tsflags;
8007 
8008 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8009 				mss |= (tsflags << 11);
8010 			}
8011 		} else {
8012 			if (tcp_opt_len || iph->ihl > 5) {
8013 				int tsflags;
8014 
8015 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8016 				base_flags |= tsflags << 12;
8017 			}
8018 		}
8019 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8020 		/* HW/FW can not correctly checksum packets that have been
8021 		 * vlan encapsulated.
8022 		 */
8023 		if (skb->protocol == htons(ETH_P_8021Q) ||
8024 		    skb->protocol == htons(ETH_P_8021AD)) {
8025 			if (skb_checksum_help(skb))
8026 				goto drop;
8027 		} else  {
8028 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
8029 		}
8030 	}
8031 
8032 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8033 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
8034 		base_flags |= TXD_FLAG_JMB_PKT;
8035 
8036 	if (skb_vlan_tag_present(skb)) {
8037 		base_flags |= TXD_FLAG_VLAN;
8038 		vlan = skb_vlan_tag_get(skb);
8039 	}
8040 
8041 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8042 	    tg3_flag(tp, TX_TSTAMP_EN)) {
8043 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8044 		base_flags |= TXD_FLAG_HWTSTAMP;
8045 	}
8046 
8047 	len = skb_headlen(skb);
8048 
8049 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8050 	if (pci_dma_mapping_error(tp->pdev, mapping))
8051 		goto drop;
8052 
8053 
8054 	tnapi->tx_buffers[entry].skb = skb;
8055 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8056 
8057 	would_hit_hwbug = 0;
8058 
8059 	if (tg3_flag(tp, 5701_DMA_BUG))
8060 		would_hit_hwbug = 1;
8061 
8062 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8063 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8064 			    mss, vlan)) {
8065 		would_hit_hwbug = 1;
8066 	} else if (skb_shinfo(skb)->nr_frags > 0) {
8067 		u32 tmp_mss = mss;
8068 
8069 		if (!tg3_flag(tp, HW_TSO_1) &&
8070 		    !tg3_flag(tp, HW_TSO_2) &&
8071 		    !tg3_flag(tp, HW_TSO_3))
8072 			tmp_mss = 0;
8073 
8074 		/* Now loop through additional data
8075 		 * fragments, and queue them.
8076 		 */
8077 		last = skb_shinfo(skb)->nr_frags - 1;
8078 		for (i = 0; i <= last; i++) {
8079 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8080 
8081 			len = skb_frag_size(frag);
8082 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8083 						   len, DMA_TO_DEVICE);
8084 
8085 			tnapi->tx_buffers[entry].skb = NULL;
8086 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8087 					   mapping);
8088 			if (dma_mapping_error(&tp->pdev->dev, mapping))
8089 				goto dma_error;
8090 
8091 			if (!budget ||
8092 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8093 					    len, base_flags |
8094 					    ((i == last) ? TXD_FLAG_END : 0),
8095 					    tmp_mss, vlan)) {
8096 				would_hit_hwbug = 1;
8097 				break;
8098 			}
8099 		}
8100 	}
8101 
8102 	if (would_hit_hwbug) {
8103 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8104 
8105 		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8106 			/* If it's a TSO packet, do GSO instead of
8107 			 * allocating and copying to a large linear SKB
8108 			 */
8109 			if (ip_tot_len) {
8110 				iph->check = ip_csum;
8111 				iph->tot_len = ip_tot_len;
8112 			}
8113 			tcph->check = tcp_csum;
8114 			return tg3_tso_bug(tp, tnapi, txq, skb);
8115 		}
8116 
8117 		/* If the workaround fails due to memory/mapping
8118 		 * failure, silently drop this packet.
8119 		 */
8120 		entry = tnapi->tx_prod;
8121 		budget = tg3_tx_avail(tnapi);
8122 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8123 						base_flags, mss, vlan))
8124 			goto drop_nofree;
8125 	}
8126 
8127 	skb_tx_timestamp(skb);
8128 	netdev_tx_sent_queue(txq, skb->len);
8129 
8130 	/* Sync BD data before updating mailbox */
8131 	wmb();
8132 
8133 	tnapi->tx_prod = entry;
8134 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8135 		netif_tx_stop_queue(txq);
8136 
8137 		/* netif_tx_stop_queue() must be done before checking
8138 		 * checking tx index in tg3_tx_avail() below, because in
8139 		 * tg3_tx(), we update tx index before checking for
8140 		 * netif_tx_queue_stopped().
8141 		 */
8142 		smp_mb();
8143 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8144 			netif_tx_wake_queue(txq);
8145 	}
8146 
8147 	if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8148 		/* Packets are ready, update Tx producer idx on card. */
8149 		tw32_tx_mbox(tnapi->prodmbox, entry);
8150 		mmiowb();
8151 	}
8152 
8153 	return NETDEV_TX_OK;
8154 
8155 dma_error:
8156 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8157 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8158 drop:
8159 	dev_kfree_skb_any(skb);
8160 drop_nofree:
8161 	tp->tx_dropped++;
8162 	return NETDEV_TX_OK;
8163 }
8164 
8165 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8166 {
8167 	if (enable) {
8168 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8169 				  MAC_MODE_PORT_MODE_MASK);
8170 
8171 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8172 
8173 		if (!tg3_flag(tp, 5705_PLUS))
8174 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8175 
8176 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8177 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8178 		else
8179 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8180 	} else {
8181 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8182 
8183 		if (tg3_flag(tp, 5705_PLUS) ||
8184 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8185 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8186 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8187 	}
8188 
8189 	tw32(MAC_MODE, tp->mac_mode);
8190 	udelay(40);
8191 }
8192 
8193 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8194 {
8195 	u32 val, bmcr, mac_mode, ptest = 0;
8196 
8197 	tg3_phy_toggle_apd(tp, false);
8198 	tg3_phy_toggle_automdix(tp, false);
8199 
8200 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8201 		return -EIO;
8202 
8203 	bmcr = BMCR_FULLDPLX;
8204 	switch (speed) {
8205 	case SPEED_10:
8206 		break;
8207 	case SPEED_100:
8208 		bmcr |= BMCR_SPEED100;
8209 		break;
8210 	case SPEED_1000:
8211 	default:
8212 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8213 			speed = SPEED_100;
8214 			bmcr |= BMCR_SPEED100;
8215 		} else {
8216 			speed = SPEED_1000;
8217 			bmcr |= BMCR_SPEED1000;
8218 		}
8219 	}
8220 
8221 	if (extlpbk) {
8222 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8223 			tg3_readphy(tp, MII_CTRL1000, &val);
8224 			val |= CTL1000_AS_MASTER |
8225 			       CTL1000_ENABLE_MASTER;
8226 			tg3_writephy(tp, MII_CTRL1000, val);
8227 		} else {
8228 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8229 				MII_TG3_FET_PTEST_TRIM_2;
8230 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8231 		}
8232 	} else
8233 		bmcr |= BMCR_LOOPBACK;
8234 
8235 	tg3_writephy(tp, MII_BMCR, bmcr);
8236 
8237 	/* The write needs to be flushed for the FETs */
8238 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8239 		tg3_readphy(tp, MII_BMCR, &bmcr);
8240 
8241 	udelay(40);
8242 
8243 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8244 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8245 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8246 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8247 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8248 
8249 		/* The write needs to be flushed for the AC131 */
8250 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8251 	}
8252 
8253 	/* Reset to prevent losing 1st rx packet intermittently */
8254 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8255 	    tg3_flag(tp, 5780_CLASS)) {
8256 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8257 		udelay(10);
8258 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8259 	}
8260 
8261 	mac_mode = tp->mac_mode &
8262 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8263 	if (speed == SPEED_1000)
8264 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8265 	else
8266 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8267 
8268 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8269 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8270 
8271 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8272 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8273 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8274 			mac_mode |= MAC_MODE_LINK_POLARITY;
8275 
8276 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8277 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8278 	}
8279 
8280 	tw32(MAC_MODE, mac_mode);
8281 	udelay(40);
8282 
8283 	return 0;
8284 }
8285 
8286 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8287 {
8288 	struct tg3 *tp = netdev_priv(dev);
8289 
8290 	if (features & NETIF_F_LOOPBACK) {
8291 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8292 			return;
8293 
8294 		spin_lock_bh(&tp->lock);
8295 		tg3_mac_loopback(tp, true);
8296 		netif_carrier_on(tp->dev);
8297 		spin_unlock_bh(&tp->lock);
8298 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8299 	} else {
8300 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8301 			return;
8302 
8303 		spin_lock_bh(&tp->lock);
8304 		tg3_mac_loopback(tp, false);
8305 		/* Force link status check */
8306 		tg3_setup_phy(tp, true);
8307 		spin_unlock_bh(&tp->lock);
8308 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8309 	}
8310 }
8311 
8312 static netdev_features_t tg3_fix_features(struct net_device *dev,
8313 	netdev_features_t features)
8314 {
8315 	struct tg3 *tp = netdev_priv(dev);
8316 
8317 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8318 		features &= ~NETIF_F_ALL_TSO;
8319 
8320 	return features;
8321 }
8322 
8323 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8324 {
8325 	netdev_features_t changed = dev->features ^ features;
8326 
8327 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8328 		tg3_set_loopback(dev, features);
8329 
8330 	return 0;
8331 }
8332 
8333 static void tg3_rx_prodring_free(struct tg3 *tp,
8334 				 struct tg3_rx_prodring_set *tpr)
8335 {
8336 	int i;
8337 
8338 	if (tpr != &tp->napi[0].prodring) {
8339 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8340 		     i = (i + 1) & tp->rx_std_ring_mask)
8341 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8342 					tp->rx_pkt_map_sz);
8343 
8344 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8345 			for (i = tpr->rx_jmb_cons_idx;
8346 			     i != tpr->rx_jmb_prod_idx;
8347 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8348 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8349 						TG3_RX_JMB_MAP_SZ);
8350 			}
8351 		}
8352 
8353 		return;
8354 	}
8355 
8356 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8357 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8358 				tp->rx_pkt_map_sz);
8359 
8360 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8361 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8362 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8363 					TG3_RX_JMB_MAP_SZ);
8364 	}
8365 }
8366 
8367 /* Initialize rx rings for packet processing.
8368  *
8369  * The chip has been shut down and the driver detached from
8370  * the networking, so no interrupts or new tx packets will
8371  * end up in the driver.  tp->{tx,}lock are held and thus
8372  * we may not sleep.
8373  */
8374 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8375 				 struct tg3_rx_prodring_set *tpr)
8376 {
8377 	u32 i, rx_pkt_dma_sz;
8378 
8379 	tpr->rx_std_cons_idx = 0;
8380 	tpr->rx_std_prod_idx = 0;
8381 	tpr->rx_jmb_cons_idx = 0;
8382 	tpr->rx_jmb_prod_idx = 0;
8383 
8384 	if (tpr != &tp->napi[0].prodring) {
8385 		memset(&tpr->rx_std_buffers[0], 0,
8386 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8387 		if (tpr->rx_jmb_buffers)
8388 			memset(&tpr->rx_jmb_buffers[0], 0,
8389 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8390 		goto done;
8391 	}
8392 
8393 	/* Zero out all descriptors. */
8394 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8395 
8396 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8397 	if (tg3_flag(tp, 5780_CLASS) &&
8398 	    tp->dev->mtu > ETH_DATA_LEN)
8399 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8400 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8401 
8402 	/* Initialize invariants of the rings, we only set this
8403 	 * stuff once.  This works because the card does not
8404 	 * write into the rx buffer posting rings.
8405 	 */
8406 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8407 		struct tg3_rx_buffer_desc *rxd;
8408 
8409 		rxd = &tpr->rx_std[i];
8410 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8411 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8412 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8413 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8414 	}
8415 
8416 	/* Now allocate fresh SKBs for each rx ring. */
8417 	for (i = 0; i < tp->rx_pending; i++) {
8418 		unsigned int frag_size;
8419 
8420 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8421 				      &frag_size) < 0) {
8422 			netdev_warn(tp->dev,
8423 				    "Using a smaller RX standard ring. Only "
8424 				    "%d out of %d buffers were allocated "
8425 				    "successfully\n", i, tp->rx_pending);
8426 			if (i == 0)
8427 				goto initfail;
8428 			tp->rx_pending = i;
8429 			break;
8430 		}
8431 	}
8432 
8433 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8434 		goto done;
8435 
8436 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8437 
8438 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8439 		goto done;
8440 
8441 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8442 		struct tg3_rx_buffer_desc *rxd;
8443 
8444 		rxd = &tpr->rx_jmb[i].std;
8445 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8446 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8447 				  RXD_FLAG_JUMBO;
8448 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8449 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8450 	}
8451 
8452 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8453 		unsigned int frag_size;
8454 
8455 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8456 				      &frag_size) < 0) {
8457 			netdev_warn(tp->dev,
8458 				    "Using a smaller RX jumbo ring. Only %d "
8459 				    "out of %d buffers were allocated "
8460 				    "successfully\n", i, tp->rx_jumbo_pending);
8461 			if (i == 0)
8462 				goto initfail;
8463 			tp->rx_jumbo_pending = i;
8464 			break;
8465 		}
8466 	}
8467 
8468 done:
8469 	return 0;
8470 
8471 initfail:
8472 	tg3_rx_prodring_free(tp, tpr);
8473 	return -ENOMEM;
8474 }
8475 
8476 static void tg3_rx_prodring_fini(struct tg3 *tp,
8477 				 struct tg3_rx_prodring_set *tpr)
8478 {
8479 	kfree(tpr->rx_std_buffers);
8480 	tpr->rx_std_buffers = NULL;
8481 	kfree(tpr->rx_jmb_buffers);
8482 	tpr->rx_jmb_buffers = NULL;
8483 	if (tpr->rx_std) {
8484 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8485 				  tpr->rx_std, tpr->rx_std_mapping);
8486 		tpr->rx_std = NULL;
8487 	}
8488 	if (tpr->rx_jmb) {
8489 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8490 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8491 		tpr->rx_jmb = NULL;
8492 	}
8493 }
8494 
8495 static int tg3_rx_prodring_init(struct tg3 *tp,
8496 				struct tg3_rx_prodring_set *tpr)
8497 {
8498 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8499 				      GFP_KERNEL);
8500 	if (!tpr->rx_std_buffers)
8501 		return -ENOMEM;
8502 
8503 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8504 					 TG3_RX_STD_RING_BYTES(tp),
8505 					 &tpr->rx_std_mapping,
8506 					 GFP_KERNEL);
8507 	if (!tpr->rx_std)
8508 		goto err_out;
8509 
8510 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8511 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8512 					      GFP_KERNEL);
8513 		if (!tpr->rx_jmb_buffers)
8514 			goto err_out;
8515 
8516 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8517 						 TG3_RX_JMB_RING_BYTES(tp),
8518 						 &tpr->rx_jmb_mapping,
8519 						 GFP_KERNEL);
8520 		if (!tpr->rx_jmb)
8521 			goto err_out;
8522 	}
8523 
8524 	return 0;
8525 
8526 err_out:
8527 	tg3_rx_prodring_fini(tp, tpr);
8528 	return -ENOMEM;
8529 }
8530 
8531 /* Free up pending packets in all rx/tx rings.
8532  *
8533  * The chip has been shut down and the driver detached from
8534  * the networking, so no interrupts or new tx packets will
8535  * end up in the driver.  tp->{tx,}lock is not held and we are not
8536  * in an interrupt context and thus may sleep.
8537  */
8538 static void tg3_free_rings(struct tg3 *tp)
8539 {
8540 	int i, j;
8541 
8542 	for (j = 0; j < tp->irq_cnt; j++) {
8543 		struct tg3_napi *tnapi = &tp->napi[j];
8544 
8545 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8546 
8547 		if (!tnapi->tx_buffers)
8548 			continue;
8549 
8550 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8551 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8552 
8553 			if (!skb)
8554 				continue;
8555 
8556 			tg3_tx_skb_unmap(tnapi, i,
8557 					 skb_shinfo(skb)->nr_frags - 1);
8558 
8559 			dev_consume_skb_any(skb);
8560 		}
8561 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8562 	}
8563 }
8564 
8565 /* Initialize tx/rx rings for packet processing.
8566  *
8567  * The chip has been shut down and the driver detached from
8568  * the networking, so no interrupts or new tx packets will
8569  * end up in the driver.  tp->{tx,}lock are held and thus
8570  * we may not sleep.
8571  */
8572 static int tg3_init_rings(struct tg3 *tp)
8573 {
8574 	int i;
8575 
8576 	/* Free up all the SKBs. */
8577 	tg3_free_rings(tp);
8578 
8579 	for (i = 0; i < tp->irq_cnt; i++) {
8580 		struct tg3_napi *tnapi = &tp->napi[i];
8581 
8582 		tnapi->last_tag = 0;
8583 		tnapi->last_irq_tag = 0;
8584 		tnapi->hw_status->status = 0;
8585 		tnapi->hw_status->status_tag = 0;
8586 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8587 
8588 		tnapi->tx_prod = 0;
8589 		tnapi->tx_cons = 0;
8590 		if (tnapi->tx_ring)
8591 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8592 
8593 		tnapi->rx_rcb_ptr = 0;
8594 		if (tnapi->rx_rcb)
8595 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8596 
8597 		if (tnapi->prodring.rx_std &&
8598 		    tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8599 			tg3_free_rings(tp);
8600 			return -ENOMEM;
8601 		}
8602 	}
8603 
8604 	return 0;
8605 }
8606 
8607 static void tg3_mem_tx_release(struct tg3 *tp)
8608 {
8609 	int i;
8610 
8611 	for (i = 0; i < tp->irq_max; i++) {
8612 		struct tg3_napi *tnapi = &tp->napi[i];
8613 
8614 		if (tnapi->tx_ring) {
8615 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8616 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8617 			tnapi->tx_ring = NULL;
8618 		}
8619 
8620 		kfree(tnapi->tx_buffers);
8621 		tnapi->tx_buffers = NULL;
8622 	}
8623 }
8624 
8625 static int tg3_mem_tx_acquire(struct tg3 *tp)
8626 {
8627 	int i;
8628 	struct tg3_napi *tnapi = &tp->napi[0];
8629 
8630 	/* If multivector TSS is enabled, vector 0 does not handle
8631 	 * tx interrupts.  Don't allocate any resources for it.
8632 	 */
8633 	if (tg3_flag(tp, ENABLE_TSS))
8634 		tnapi++;
8635 
8636 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8637 		tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8638 					    sizeof(struct tg3_tx_ring_info),
8639 					    GFP_KERNEL);
8640 		if (!tnapi->tx_buffers)
8641 			goto err_out;
8642 
8643 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8644 						    TG3_TX_RING_BYTES,
8645 						    &tnapi->tx_desc_mapping,
8646 						    GFP_KERNEL);
8647 		if (!tnapi->tx_ring)
8648 			goto err_out;
8649 	}
8650 
8651 	return 0;
8652 
8653 err_out:
8654 	tg3_mem_tx_release(tp);
8655 	return -ENOMEM;
8656 }
8657 
8658 static void tg3_mem_rx_release(struct tg3 *tp)
8659 {
8660 	int i;
8661 
8662 	for (i = 0; i < tp->irq_max; i++) {
8663 		struct tg3_napi *tnapi = &tp->napi[i];
8664 
8665 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8666 
8667 		if (!tnapi->rx_rcb)
8668 			continue;
8669 
8670 		dma_free_coherent(&tp->pdev->dev,
8671 				  TG3_RX_RCB_RING_BYTES(tp),
8672 				  tnapi->rx_rcb,
8673 				  tnapi->rx_rcb_mapping);
8674 		tnapi->rx_rcb = NULL;
8675 	}
8676 }
8677 
8678 static int tg3_mem_rx_acquire(struct tg3 *tp)
8679 {
8680 	unsigned int i, limit;
8681 
8682 	limit = tp->rxq_cnt;
8683 
8684 	/* If RSS is enabled, we need a (dummy) producer ring
8685 	 * set on vector zero.  This is the true hw prodring.
8686 	 */
8687 	if (tg3_flag(tp, ENABLE_RSS))
8688 		limit++;
8689 
8690 	for (i = 0; i < limit; i++) {
8691 		struct tg3_napi *tnapi = &tp->napi[i];
8692 
8693 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8694 			goto err_out;
8695 
8696 		/* If multivector RSS is enabled, vector 0
8697 		 * does not handle rx or tx interrupts.
8698 		 * Don't allocate any resources for it.
8699 		 */
8700 		if (!i && tg3_flag(tp, ENABLE_RSS))
8701 			continue;
8702 
8703 		tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8704 						    TG3_RX_RCB_RING_BYTES(tp),
8705 						    &tnapi->rx_rcb_mapping,
8706 						    GFP_KERNEL);
8707 		if (!tnapi->rx_rcb)
8708 			goto err_out;
8709 	}
8710 
8711 	return 0;
8712 
8713 err_out:
8714 	tg3_mem_rx_release(tp);
8715 	return -ENOMEM;
8716 }
8717 
8718 /*
8719  * Must not be invoked with interrupt sources disabled and
8720  * the hardware shutdown down.
8721  */
8722 static void tg3_free_consistent(struct tg3 *tp)
8723 {
8724 	int i;
8725 
8726 	for (i = 0; i < tp->irq_cnt; i++) {
8727 		struct tg3_napi *tnapi = &tp->napi[i];
8728 
8729 		if (tnapi->hw_status) {
8730 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8731 					  tnapi->hw_status,
8732 					  tnapi->status_mapping);
8733 			tnapi->hw_status = NULL;
8734 		}
8735 	}
8736 
8737 	tg3_mem_rx_release(tp);
8738 	tg3_mem_tx_release(tp);
8739 
8740 	/* tp->hw_stats can be referenced safely:
8741 	 *     1. under rtnl_lock
8742 	 *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8743 	 */
8744 	if (tp->hw_stats) {
8745 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8746 				  tp->hw_stats, tp->stats_mapping);
8747 		tp->hw_stats = NULL;
8748 	}
8749 }
8750 
8751 /*
8752  * Must not be invoked with interrupt sources disabled and
8753  * the hardware shutdown down.  Can sleep.
8754  */
8755 static int tg3_alloc_consistent(struct tg3 *tp)
8756 {
8757 	int i;
8758 
8759 	tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8760 					   sizeof(struct tg3_hw_stats),
8761 					   &tp->stats_mapping, GFP_KERNEL);
8762 	if (!tp->hw_stats)
8763 		goto err_out;
8764 
8765 	for (i = 0; i < tp->irq_cnt; i++) {
8766 		struct tg3_napi *tnapi = &tp->napi[i];
8767 		struct tg3_hw_status *sblk;
8768 
8769 		tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8770 						       TG3_HW_STATUS_SIZE,
8771 						       &tnapi->status_mapping,
8772 						       GFP_KERNEL);
8773 		if (!tnapi->hw_status)
8774 			goto err_out;
8775 
8776 		sblk = tnapi->hw_status;
8777 
8778 		if (tg3_flag(tp, ENABLE_RSS)) {
8779 			u16 *prodptr = NULL;
8780 
8781 			/*
8782 			 * When RSS is enabled, the status block format changes
8783 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8784 			 * and "rx_mini_consumer" members get mapped to the
8785 			 * other three rx return ring producer indexes.
8786 			 */
8787 			switch (i) {
8788 			case 1:
8789 				prodptr = &sblk->idx[0].rx_producer;
8790 				break;
8791 			case 2:
8792 				prodptr = &sblk->rx_jumbo_consumer;
8793 				break;
8794 			case 3:
8795 				prodptr = &sblk->reserved;
8796 				break;
8797 			case 4:
8798 				prodptr = &sblk->rx_mini_consumer;
8799 				break;
8800 			}
8801 			tnapi->rx_rcb_prod_idx = prodptr;
8802 		} else {
8803 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8804 		}
8805 	}
8806 
8807 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8808 		goto err_out;
8809 
8810 	return 0;
8811 
8812 err_out:
8813 	tg3_free_consistent(tp);
8814 	return -ENOMEM;
8815 }
8816 
8817 #define MAX_WAIT_CNT 1000
8818 
8819 /* To stop a block, clear the enable bit and poll till it
8820  * clears.  tp->lock is held.
8821  */
8822 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8823 {
8824 	unsigned int i;
8825 	u32 val;
8826 
8827 	if (tg3_flag(tp, 5705_PLUS)) {
8828 		switch (ofs) {
8829 		case RCVLSC_MODE:
8830 		case DMAC_MODE:
8831 		case MBFREE_MODE:
8832 		case BUFMGR_MODE:
8833 		case MEMARB_MODE:
8834 			/* We can't enable/disable these bits of the
8835 			 * 5705/5750, just say success.
8836 			 */
8837 			return 0;
8838 
8839 		default:
8840 			break;
8841 		}
8842 	}
8843 
8844 	val = tr32(ofs);
8845 	val &= ~enable_bit;
8846 	tw32_f(ofs, val);
8847 
8848 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8849 		if (pci_channel_offline(tp->pdev)) {
8850 			dev_err(&tp->pdev->dev,
8851 				"tg3_stop_block device offline, "
8852 				"ofs=%lx enable_bit=%x\n",
8853 				ofs, enable_bit);
8854 			return -ENODEV;
8855 		}
8856 
8857 		udelay(100);
8858 		val = tr32(ofs);
8859 		if ((val & enable_bit) == 0)
8860 			break;
8861 	}
8862 
8863 	if (i == MAX_WAIT_CNT && !silent) {
8864 		dev_err(&tp->pdev->dev,
8865 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8866 			ofs, enable_bit);
8867 		return -ENODEV;
8868 	}
8869 
8870 	return 0;
8871 }
8872 
8873 /* tp->lock is held. */
8874 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8875 {
8876 	int i, err;
8877 
8878 	tg3_disable_ints(tp);
8879 
8880 	if (pci_channel_offline(tp->pdev)) {
8881 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8882 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8883 		err = -ENODEV;
8884 		goto err_no_dev;
8885 	}
8886 
8887 	tp->rx_mode &= ~RX_MODE_ENABLE;
8888 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8889 	udelay(10);
8890 
8891 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8892 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8893 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8894 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8895 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8896 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8897 
8898 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8899 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8900 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8901 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8902 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8903 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8904 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8905 
8906 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8907 	tw32_f(MAC_MODE, tp->mac_mode);
8908 	udelay(40);
8909 
8910 	tp->tx_mode &= ~TX_MODE_ENABLE;
8911 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8912 
8913 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8914 		udelay(100);
8915 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8916 			break;
8917 	}
8918 	if (i >= MAX_WAIT_CNT) {
8919 		dev_err(&tp->pdev->dev,
8920 			"%s timed out, TX_MODE_ENABLE will not clear "
8921 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8922 		err |= -ENODEV;
8923 	}
8924 
8925 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8926 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8927 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8928 
8929 	tw32(FTQ_RESET, 0xffffffff);
8930 	tw32(FTQ_RESET, 0x00000000);
8931 
8932 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8933 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8934 
8935 err_no_dev:
8936 	for (i = 0; i < tp->irq_cnt; i++) {
8937 		struct tg3_napi *tnapi = &tp->napi[i];
8938 		if (tnapi->hw_status)
8939 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8940 	}
8941 
8942 	return err;
8943 }
8944 
8945 /* Save PCI command register before chip reset */
8946 static void tg3_save_pci_state(struct tg3 *tp)
8947 {
8948 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8949 }
8950 
8951 /* Restore PCI state after chip reset */
8952 static void tg3_restore_pci_state(struct tg3 *tp)
8953 {
8954 	u32 val;
8955 
8956 	/* Re-enable indirect register accesses. */
8957 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8958 			       tp->misc_host_ctrl);
8959 
8960 	/* Set MAX PCI retry to zero. */
8961 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8962 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8963 	    tg3_flag(tp, PCIX_MODE))
8964 		val |= PCISTATE_RETRY_SAME_DMA;
8965 	/* Allow reads and writes to the APE register and memory space. */
8966 	if (tg3_flag(tp, ENABLE_APE))
8967 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8968 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8969 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8970 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8971 
8972 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8973 
8974 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8975 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8976 				      tp->pci_cacheline_sz);
8977 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8978 				      tp->pci_lat_timer);
8979 	}
8980 
8981 	/* Make sure PCI-X relaxed ordering bit is clear. */
8982 	if (tg3_flag(tp, PCIX_MODE)) {
8983 		u16 pcix_cmd;
8984 
8985 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8986 				     &pcix_cmd);
8987 		pcix_cmd &= ~PCI_X_CMD_ERO;
8988 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8989 				      pcix_cmd);
8990 	}
8991 
8992 	if (tg3_flag(tp, 5780_CLASS)) {
8993 
8994 		/* Chip reset on 5780 will reset MSI enable bit,
8995 		 * so need to restore it.
8996 		 */
8997 		if (tg3_flag(tp, USING_MSI)) {
8998 			u16 ctrl;
8999 
9000 			pci_read_config_word(tp->pdev,
9001 					     tp->msi_cap + PCI_MSI_FLAGS,
9002 					     &ctrl);
9003 			pci_write_config_word(tp->pdev,
9004 					      tp->msi_cap + PCI_MSI_FLAGS,
9005 					      ctrl | PCI_MSI_FLAGS_ENABLE);
9006 			val = tr32(MSGINT_MODE);
9007 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9008 		}
9009 	}
9010 }
9011 
9012 static void tg3_override_clk(struct tg3 *tp)
9013 {
9014 	u32 val;
9015 
9016 	switch (tg3_asic_rev(tp)) {
9017 	case ASIC_REV_5717:
9018 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9019 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9020 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9021 		break;
9022 
9023 	case ASIC_REV_5719:
9024 	case ASIC_REV_5720:
9025 		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9026 		break;
9027 
9028 	default:
9029 		return;
9030 	}
9031 }
9032 
9033 static void tg3_restore_clk(struct tg3 *tp)
9034 {
9035 	u32 val;
9036 
9037 	switch (tg3_asic_rev(tp)) {
9038 	case ASIC_REV_5717:
9039 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9040 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9041 		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9042 		break;
9043 
9044 	case ASIC_REV_5719:
9045 	case ASIC_REV_5720:
9046 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9047 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9048 		break;
9049 
9050 	default:
9051 		return;
9052 	}
9053 }
9054 
9055 /* tp->lock is held. */
9056 static int tg3_chip_reset(struct tg3 *tp)
9057 	__releases(tp->lock)
9058 	__acquires(tp->lock)
9059 {
9060 	u32 val;
9061 	void (*write_op)(struct tg3 *, u32, u32);
9062 	int i, err;
9063 
9064 	if (!pci_device_is_present(tp->pdev))
9065 		return -ENODEV;
9066 
9067 	tg3_nvram_lock(tp);
9068 
9069 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9070 
9071 	/* No matching tg3_nvram_unlock() after this because
9072 	 * chip reset below will undo the nvram lock.
9073 	 */
9074 	tp->nvram_lock_cnt = 0;
9075 
9076 	/* GRC_MISC_CFG core clock reset will clear the memory
9077 	 * enable bit in PCI register 4 and the MSI enable bit
9078 	 * on some chips, so we save relevant registers here.
9079 	 */
9080 	tg3_save_pci_state(tp);
9081 
9082 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9083 	    tg3_flag(tp, 5755_PLUS))
9084 		tw32(GRC_FASTBOOT_PC, 0);
9085 
9086 	/*
9087 	 * We must avoid the readl() that normally takes place.
9088 	 * It locks machines, causes machine checks, and other
9089 	 * fun things.  So, temporarily disable the 5701
9090 	 * hardware workaround, while we do the reset.
9091 	 */
9092 	write_op = tp->write32;
9093 	if (write_op == tg3_write_flush_reg32)
9094 		tp->write32 = tg3_write32;
9095 
9096 	/* Prevent the irq handler from reading or writing PCI registers
9097 	 * during chip reset when the memory enable bit in the PCI command
9098 	 * register may be cleared.  The chip does not generate interrupt
9099 	 * at this time, but the irq handler may still be called due to irq
9100 	 * sharing or irqpoll.
9101 	 */
9102 	tg3_flag_set(tp, CHIP_RESETTING);
9103 	for (i = 0; i < tp->irq_cnt; i++) {
9104 		struct tg3_napi *tnapi = &tp->napi[i];
9105 		if (tnapi->hw_status) {
9106 			tnapi->hw_status->status = 0;
9107 			tnapi->hw_status->status_tag = 0;
9108 		}
9109 		tnapi->last_tag = 0;
9110 		tnapi->last_irq_tag = 0;
9111 	}
9112 	smp_mb();
9113 
9114 	tg3_full_unlock(tp);
9115 
9116 	for (i = 0; i < tp->irq_cnt; i++)
9117 		synchronize_irq(tp->napi[i].irq_vec);
9118 
9119 	tg3_full_lock(tp, 0);
9120 
9121 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9122 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9123 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9124 	}
9125 
9126 	/* do the reset */
9127 	val = GRC_MISC_CFG_CORECLK_RESET;
9128 
9129 	if (tg3_flag(tp, PCI_EXPRESS)) {
9130 		/* Force PCIe 1.0a mode */
9131 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9132 		    !tg3_flag(tp, 57765_PLUS) &&
9133 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
9134 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9135 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9136 
9137 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9138 			tw32(GRC_MISC_CFG, (1 << 29));
9139 			val |= (1 << 29);
9140 		}
9141 	}
9142 
9143 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9144 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9145 		tw32(GRC_VCPU_EXT_CTRL,
9146 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9147 	}
9148 
9149 	/* Set the clock to the highest frequency to avoid timeouts. With link
9150 	 * aware mode, the clock speed could be slow and bootcode does not
9151 	 * complete within the expected time. Override the clock to allow the
9152 	 * bootcode to finish sooner and then restore it.
9153 	 */
9154 	tg3_override_clk(tp);
9155 
9156 	/* Manage gphy power for all CPMU absent PCIe devices. */
9157 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9158 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9159 
9160 	tw32(GRC_MISC_CFG, val);
9161 
9162 	/* restore 5701 hardware bug workaround write method */
9163 	tp->write32 = write_op;
9164 
9165 	/* Unfortunately, we have to delay before the PCI read back.
9166 	 * Some 575X chips even will not respond to a PCI cfg access
9167 	 * when the reset command is given to the chip.
9168 	 *
9169 	 * How do these hardware designers expect things to work
9170 	 * properly if the PCI write is posted for a long period
9171 	 * of time?  It is always necessary to have some method by
9172 	 * which a register read back can occur to push the write
9173 	 * out which does the reset.
9174 	 *
9175 	 * For most tg3 variants the trick below was working.
9176 	 * Ho hum...
9177 	 */
9178 	udelay(120);
9179 
9180 	/* Flush PCI posted writes.  The normal MMIO registers
9181 	 * are inaccessible at this time so this is the only
9182 	 * way to make this reliably (actually, this is no longer
9183 	 * the case, see above).  I tried to use indirect
9184 	 * register read/write but this upset some 5701 variants.
9185 	 */
9186 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9187 
9188 	udelay(120);
9189 
9190 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9191 		u16 val16;
9192 
9193 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9194 			int j;
9195 			u32 cfg_val;
9196 
9197 			/* Wait for link training to complete.  */
9198 			for (j = 0; j < 5000; j++)
9199 				udelay(100);
9200 
9201 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9202 			pci_write_config_dword(tp->pdev, 0xc4,
9203 					       cfg_val | (1 << 15));
9204 		}
9205 
9206 		/* Clear the "no snoop" and "relaxed ordering" bits. */
9207 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9208 		/*
9209 		 * Older PCIe devices only support the 128 byte
9210 		 * MPS setting.  Enforce the restriction.
9211 		 */
9212 		if (!tg3_flag(tp, CPMU_PRESENT))
9213 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9214 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9215 
9216 		/* Clear error status */
9217 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9218 				      PCI_EXP_DEVSTA_CED |
9219 				      PCI_EXP_DEVSTA_NFED |
9220 				      PCI_EXP_DEVSTA_FED |
9221 				      PCI_EXP_DEVSTA_URD);
9222 	}
9223 
9224 	tg3_restore_pci_state(tp);
9225 
9226 	tg3_flag_clear(tp, CHIP_RESETTING);
9227 	tg3_flag_clear(tp, ERROR_PROCESSED);
9228 
9229 	val = 0;
9230 	if (tg3_flag(tp, 5780_CLASS))
9231 		val = tr32(MEMARB_MODE);
9232 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9233 
9234 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9235 		tg3_stop_fw(tp);
9236 		tw32(0x5000, 0x400);
9237 	}
9238 
9239 	if (tg3_flag(tp, IS_SSB_CORE)) {
9240 		/*
9241 		 * BCM4785: In order to avoid repercussions from using
9242 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9243 		 * which is not required.
9244 		 */
9245 		tg3_stop_fw(tp);
9246 		tg3_halt_cpu(tp, RX_CPU_BASE);
9247 	}
9248 
9249 	err = tg3_poll_fw(tp);
9250 	if (err)
9251 		return err;
9252 
9253 	tw32(GRC_MODE, tp->grc_mode);
9254 
9255 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9256 		val = tr32(0xc4);
9257 
9258 		tw32(0xc4, val | (1 << 15));
9259 	}
9260 
9261 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9262 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9263 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9264 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9265 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9266 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9267 	}
9268 
9269 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9270 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9271 		val = tp->mac_mode;
9272 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9273 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9274 		val = tp->mac_mode;
9275 	} else
9276 		val = 0;
9277 
9278 	tw32_f(MAC_MODE, val);
9279 	udelay(40);
9280 
9281 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9282 
9283 	tg3_mdio_start(tp);
9284 
9285 	if (tg3_flag(tp, PCI_EXPRESS) &&
9286 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9287 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9288 	    !tg3_flag(tp, 57765_PLUS)) {
9289 		val = tr32(0x7c00);
9290 
9291 		tw32(0x7c00, val | (1 << 25));
9292 	}
9293 
9294 	tg3_restore_clk(tp);
9295 
9296 	/* Increase the core clock speed to fix tx timeout issue for 5762
9297 	 * with 100Mbps link speed.
9298 	 */
9299 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9300 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9301 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9302 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9303 	}
9304 
9305 	/* Reprobe ASF enable state.  */
9306 	tg3_flag_clear(tp, ENABLE_ASF);
9307 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9308 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9309 
9310 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9311 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9312 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9313 		u32 nic_cfg;
9314 
9315 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9316 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9317 			tg3_flag_set(tp, ENABLE_ASF);
9318 			tp->last_event_jiffies = jiffies;
9319 			if (tg3_flag(tp, 5750_PLUS))
9320 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9321 
9322 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9323 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9324 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9325 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9326 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9327 		}
9328 	}
9329 
9330 	return 0;
9331 }
9332 
9333 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9334 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9335 static void __tg3_set_rx_mode(struct net_device *);
9336 
9337 /* tp->lock is held. */
9338 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9339 {
9340 	int err;
9341 
9342 	tg3_stop_fw(tp);
9343 
9344 	tg3_write_sig_pre_reset(tp, kind);
9345 
9346 	tg3_abort_hw(tp, silent);
9347 	err = tg3_chip_reset(tp);
9348 
9349 	__tg3_set_mac_addr(tp, false);
9350 
9351 	tg3_write_sig_legacy(tp, kind);
9352 	tg3_write_sig_post_reset(tp, kind);
9353 
9354 	if (tp->hw_stats) {
9355 		/* Save the stats across chip resets... */
9356 		tg3_get_nstats(tp, &tp->net_stats_prev);
9357 		tg3_get_estats(tp, &tp->estats_prev);
9358 
9359 		/* And make sure the next sample is new data */
9360 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9361 	}
9362 
9363 	return err;
9364 }
9365 
9366 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9367 {
9368 	struct tg3 *tp = netdev_priv(dev);
9369 	struct sockaddr *addr = p;
9370 	int err = 0;
9371 	bool skip_mac_1 = false;
9372 
9373 	if (!is_valid_ether_addr(addr->sa_data))
9374 		return -EADDRNOTAVAIL;
9375 
9376 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9377 
9378 	if (!netif_running(dev))
9379 		return 0;
9380 
9381 	if (tg3_flag(tp, ENABLE_ASF)) {
9382 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9383 
9384 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9385 		addr0_low = tr32(MAC_ADDR_0_LOW);
9386 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9387 		addr1_low = tr32(MAC_ADDR_1_LOW);
9388 
9389 		/* Skip MAC addr 1 if ASF is using it. */
9390 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9391 		    !(addr1_high == 0 && addr1_low == 0))
9392 			skip_mac_1 = true;
9393 	}
9394 	spin_lock_bh(&tp->lock);
9395 	__tg3_set_mac_addr(tp, skip_mac_1);
9396 	__tg3_set_rx_mode(dev);
9397 	spin_unlock_bh(&tp->lock);
9398 
9399 	return err;
9400 }
9401 
9402 /* tp->lock is held. */
9403 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9404 			   dma_addr_t mapping, u32 maxlen_flags,
9405 			   u32 nic_addr)
9406 {
9407 	tg3_write_mem(tp,
9408 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9409 		      ((u64) mapping >> 32));
9410 	tg3_write_mem(tp,
9411 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9412 		      ((u64) mapping & 0xffffffff));
9413 	tg3_write_mem(tp,
9414 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9415 		       maxlen_flags);
9416 
9417 	if (!tg3_flag(tp, 5705_PLUS))
9418 		tg3_write_mem(tp,
9419 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9420 			      nic_addr);
9421 }
9422 
9423 
9424 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9425 {
9426 	int i = 0;
9427 
9428 	if (!tg3_flag(tp, ENABLE_TSS)) {
9429 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9430 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9431 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9432 	} else {
9433 		tw32(HOSTCC_TXCOL_TICKS, 0);
9434 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9435 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9436 
9437 		for (; i < tp->txq_cnt; i++) {
9438 			u32 reg;
9439 
9440 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9441 			tw32(reg, ec->tx_coalesce_usecs);
9442 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9443 			tw32(reg, ec->tx_max_coalesced_frames);
9444 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9445 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9446 		}
9447 	}
9448 
9449 	for (; i < tp->irq_max - 1; i++) {
9450 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9451 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9452 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9453 	}
9454 }
9455 
9456 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9457 {
9458 	int i = 0;
9459 	u32 limit = tp->rxq_cnt;
9460 
9461 	if (!tg3_flag(tp, ENABLE_RSS)) {
9462 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9463 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9464 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9465 		limit--;
9466 	} else {
9467 		tw32(HOSTCC_RXCOL_TICKS, 0);
9468 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9469 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9470 	}
9471 
9472 	for (; i < limit; i++) {
9473 		u32 reg;
9474 
9475 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9476 		tw32(reg, ec->rx_coalesce_usecs);
9477 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9478 		tw32(reg, ec->rx_max_coalesced_frames);
9479 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9480 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9481 	}
9482 
9483 	for (; i < tp->irq_max - 1; i++) {
9484 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9485 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9486 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9487 	}
9488 }
9489 
9490 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9491 {
9492 	tg3_coal_tx_init(tp, ec);
9493 	tg3_coal_rx_init(tp, ec);
9494 
9495 	if (!tg3_flag(tp, 5705_PLUS)) {
9496 		u32 val = ec->stats_block_coalesce_usecs;
9497 
9498 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9499 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9500 
9501 		if (!tp->link_up)
9502 			val = 0;
9503 
9504 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9505 	}
9506 }
9507 
9508 /* tp->lock is held. */
9509 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9510 {
9511 	u32 txrcb, limit;
9512 
9513 	/* Disable all transmit rings but the first. */
9514 	if (!tg3_flag(tp, 5705_PLUS))
9515 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9516 	else if (tg3_flag(tp, 5717_PLUS))
9517 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9518 	else if (tg3_flag(tp, 57765_CLASS) ||
9519 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9520 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9521 	else
9522 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9523 
9524 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9525 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9526 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9527 			      BDINFO_FLAGS_DISABLED);
9528 }
9529 
9530 /* tp->lock is held. */
9531 static void tg3_tx_rcbs_init(struct tg3 *tp)
9532 {
9533 	int i = 0;
9534 	u32 txrcb = NIC_SRAM_SEND_RCB;
9535 
9536 	if (tg3_flag(tp, ENABLE_TSS))
9537 		i++;
9538 
9539 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9540 		struct tg3_napi *tnapi = &tp->napi[i];
9541 
9542 		if (!tnapi->tx_ring)
9543 			continue;
9544 
9545 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9546 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9547 			       NIC_SRAM_TX_BUFFER_DESC);
9548 	}
9549 }
9550 
9551 /* tp->lock is held. */
9552 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9553 {
9554 	u32 rxrcb, limit;
9555 
9556 	/* Disable all receive return rings but the first. */
9557 	if (tg3_flag(tp, 5717_PLUS))
9558 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9559 	else if (!tg3_flag(tp, 5705_PLUS))
9560 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9561 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9562 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9563 		 tg3_flag(tp, 57765_CLASS))
9564 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9565 	else
9566 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9567 
9568 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9569 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9570 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9571 			      BDINFO_FLAGS_DISABLED);
9572 }
9573 
9574 /* tp->lock is held. */
9575 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9576 {
9577 	int i = 0;
9578 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9579 
9580 	if (tg3_flag(tp, ENABLE_RSS))
9581 		i++;
9582 
9583 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9584 		struct tg3_napi *tnapi = &tp->napi[i];
9585 
9586 		if (!tnapi->rx_rcb)
9587 			continue;
9588 
9589 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9590 			       (tp->rx_ret_ring_mask + 1) <<
9591 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9592 	}
9593 }
9594 
9595 /* tp->lock is held. */
9596 static void tg3_rings_reset(struct tg3 *tp)
9597 {
9598 	int i;
9599 	u32 stblk;
9600 	struct tg3_napi *tnapi = &tp->napi[0];
9601 
9602 	tg3_tx_rcbs_disable(tp);
9603 
9604 	tg3_rx_ret_rcbs_disable(tp);
9605 
9606 	/* Disable interrupts */
9607 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9608 	tp->napi[0].chk_msi_cnt = 0;
9609 	tp->napi[0].last_rx_cons = 0;
9610 	tp->napi[0].last_tx_cons = 0;
9611 
9612 	/* Zero mailbox registers. */
9613 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9614 		for (i = 1; i < tp->irq_max; i++) {
9615 			tp->napi[i].tx_prod = 0;
9616 			tp->napi[i].tx_cons = 0;
9617 			if (tg3_flag(tp, ENABLE_TSS))
9618 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9619 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9620 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9621 			tp->napi[i].chk_msi_cnt = 0;
9622 			tp->napi[i].last_rx_cons = 0;
9623 			tp->napi[i].last_tx_cons = 0;
9624 		}
9625 		if (!tg3_flag(tp, ENABLE_TSS))
9626 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9627 	} else {
9628 		tp->napi[0].tx_prod = 0;
9629 		tp->napi[0].tx_cons = 0;
9630 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9631 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9632 	}
9633 
9634 	/* Make sure the NIC-based send BD rings are disabled. */
9635 	if (!tg3_flag(tp, 5705_PLUS)) {
9636 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9637 		for (i = 0; i < 16; i++)
9638 			tw32_tx_mbox(mbox + i * 8, 0);
9639 	}
9640 
9641 	/* Clear status block in ram. */
9642 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9643 
9644 	/* Set status block DMA address */
9645 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9646 	     ((u64) tnapi->status_mapping >> 32));
9647 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9648 	     ((u64) tnapi->status_mapping & 0xffffffff));
9649 
9650 	stblk = HOSTCC_STATBLCK_RING1;
9651 
9652 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9653 		u64 mapping = (u64)tnapi->status_mapping;
9654 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9655 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9656 		stblk += 8;
9657 
9658 		/* Clear status block in ram. */
9659 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9660 	}
9661 
9662 	tg3_tx_rcbs_init(tp);
9663 	tg3_rx_ret_rcbs_init(tp);
9664 }
9665 
9666 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9667 {
9668 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9669 
9670 	if (!tg3_flag(tp, 5750_PLUS) ||
9671 	    tg3_flag(tp, 5780_CLASS) ||
9672 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9673 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9674 	    tg3_flag(tp, 57765_PLUS))
9675 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9676 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9677 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9678 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9679 	else
9680 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9681 
9682 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9683 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9684 
9685 	val = min(nic_rep_thresh, host_rep_thresh);
9686 	tw32(RCVBDI_STD_THRESH, val);
9687 
9688 	if (tg3_flag(tp, 57765_PLUS))
9689 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9690 
9691 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9692 		return;
9693 
9694 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9695 
9696 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9697 
9698 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9699 	tw32(RCVBDI_JUMBO_THRESH, val);
9700 
9701 	if (tg3_flag(tp, 57765_PLUS))
9702 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9703 }
9704 
9705 static inline u32 calc_crc(unsigned char *buf, int len)
9706 {
9707 	u32 reg;
9708 	u32 tmp;
9709 	int j, k;
9710 
9711 	reg = 0xffffffff;
9712 
9713 	for (j = 0; j < len; j++) {
9714 		reg ^= buf[j];
9715 
9716 		for (k = 0; k < 8; k++) {
9717 			tmp = reg & 0x01;
9718 
9719 			reg >>= 1;
9720 
9721 			if (tmp)
9722 				reg ^= CRC32_POLY_LE;
9723 		}
9724 	}
9725 
9726 	return ~reg;
9727 }
9728 
9729 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9730 {
9731 	/* accept or reject all multicast frames */
9732 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9733 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9734 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9735 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9736 }
9737 
9738 static void __tg3_set_rx_mode(struct net_device *dev)
9739 {
9740 	struct tg3 *tp = netdev_priv(dev);
9741 	u32 rx_mode;
9742 
9743 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9744 				  RX_MODE_KEEP_VLAN_TAG);
9745 
9746 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9747 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9748 	 * flag clear.
9749 	 */
9750 	if (!tg3_flag(tp, ENABLE_ASF))
9751 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9752 #endif
9753 
9754 	if (dev->flags & IFF_PROMISC) {
9755 		/* Promiscuous mode. */
9756 		rx_mode |= RX_MODE_PROMISC;
9757 	} else if (dev->flags & IFF_ALLMULTI) {
9758 		/* Accept all multicast. */
9759 		tg3_set_multi(tp, 1);
9760 	} else if (netdev_mc_empty(dev)) {
9761 		/* Reject all multicast. */
9762 		tg3_set_multi(tp, 0);
9763 	} else {
9764 		/* Accept one or more multicast(s). */
9765 		struct netdev_hw_addr *ha;
9766 		u32 mc_filter[4] = { 0, };
9767 		u32 regidx;
9768 		u32 bit;
9769 		u32 crc;
9770 
9771 		netdev_for_each_mc_addr(ha, dev) {
9772 			crc = calc_crc(ha->addr, ETH_ALEN);
9773 			bit = ~crc & 0x7f;
9774 			regidx = (bit & 0x60) >> 5;
9775 			bit &= 0x1f;
9776 			mc_filter[regidx] |= (1 << bit);
9777 		}
9778 
9779 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9780 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9781 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9782 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9783 	}
9784 
9785 	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9786 		rx_mode |= RX_MODE_PROMISC;
9787 	} else if (!(dev->flags & IFF_PROMISC)) {
9788 		/* Add all entries into to the mac addr filter list */
9789 		int i = 0;
9790 		struct netdev_hw_addr *ha;
9791 
9792 		netdev_for_each_uc_addr(ha, dev) {
9793 			__tg3_set_one_mac_addr(tp, ha->addr,
9794 					       i + TG3_UCAST_ADDR_IDX(tp));
9795 			i++;
9796 		}
9797 	}
9798 
9799 	if (rx_mode != tp->rx_mode) {
9800 		tp->rx_mode = rx_mode;
9801 		tw32_f(MAC_RX_MODE, rx_mode);
9802 		udelay(10);
9803 	}
9804 }
9805 
9806 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9807 {
9808 	int i;
9809 
9810 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9811 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9812 }
9813 
9814 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9815 {
9816 	int i;
9817 
9818 	if (!tg3_flag(tp, SUPPORT_MSIX))
9819 		return;
9820 
9821 	if (tp->rxq_cnt == 1) {
9822 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9823 		return;
9824 	}
9825 
9826 	/* Validate table against current IRQ count */
9827 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9828 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9829 			break;
9830 	}
9831 
9832 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9833 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9834 }
9835 
9836 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9837 {
9838 	int i = 0;
9839 	u32 reg = MAC_RSS_INDIR_TBL_0;
9840 
9841 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9842 		u32 val = tp->rss_ind_tbl[i];
9843 		i++;
9844 		for (; i % 8; i++) {
9845 			val <<= 4;
9846 			val |= tp->rss_ind_tbl[i];
9847 		}
9848 		tw32(reg, val);
9849 		reg += 4;
9850 	}
9851 }
9852 
9853 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9854 {
9855 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9856 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9857 	else
9858 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9859 }
9860 
9861 /* tp->lock is held. */
9862 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9863 {
9864 	u32 val, rdmac_mode;
9865 	int i, err, limit;
9866 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9867 
9868 	tg3_disable_ints(tp);
9869 
9870 	tg3_stop_fw(tp);
9871 
9872 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9873 
9874 	if (tg3_flag(tp, INIT_COMPLETE))
9875 		tg3_abort_hw(tp, 1);
9876 
9877 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9878 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9879 		tg3_phy_pull_config(tp);
9880 		tg3_eee_pull_config(tp, NULL);
9881 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9882 	}
9883 
9884 	/* Enable MAC control of LPI */
9885 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9886 		tg3_setup_eee(tp);
9887 
9888 	if (reset_phy)
9889 		tg3_phy_reset(tp);
9890 
9891 	err = tg3_chip_reset(tp);
9892 	if (err)
9893 		return err;
9894 
9895 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9896 
9897 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9898 		val = tr32(TG3_CPMU_CTRL);
9899 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9900 		tw32(TG3_CPMU_CTRL, val);
9901 
9902 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9903 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9904 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9905 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9906 
9907 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9908 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9909 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9910 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9911 
9912 		val = tr32(TG3_CPMU_HST_ACC);
9913 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9914 		val |= CPMU_HST_ACC_MACCLK_6_25;
9915 		tw32(TG3_CPMU_HST_ACC, val);
9916 	}
9917 
9918 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9919 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9920 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9921 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9922 		tw32(PCIE_PWR_MGMT_THRESH, val);
9923 
9924 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9925 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9926 
9927 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9928 
9929 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9930 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9931 	}
9932 
9933 	if (tg3_flag(tp, L1PLLPD_EN)) {
9934 		u32 grc_mode = tr32(GRC_MODE);
9935 
9936 		/* Access the lower 1K of PL PCIE block registers. */
9937 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9938 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9939 
9940 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9941 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9942 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9943 
9944 		tw32(GRC_MODE, grc_mode);
9945 	}
9946 
9947 	if (tg3_flag(tp, 57765_CLASS)) {
9948 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9949 			u32 grc_mode = tr32(GRC_MODE);
9950 
9951 			/* Access the lower 1K of PL PCIE block registers. */
9952 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9953 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9954 
9955 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9956 				   TG3_PCIE_PL_LO_PHYCTL5);
9957 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9958 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9959 
9960 			tw32(GRC_MODE, grc_mode);
9961 		}
9962 
9963 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9964 			u32 grc_mode;
9965 
9966 			/* Fix transmit hangs */
9967 			val = tr32(TG3_CPMU_PADRNG_CTL);
9968 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9969 			tw32(TG3_CPMU_PADRNG_CTL, val);
9970 
9971 			grc_mode = tr32(GRC_MODE);
9972 
9973 			/* Access the lower 1K of DL PCIE block registers. */
9974 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9975 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9976 
9977 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9978 				   TG3_PCIE_DL_LO_FTSMAX);
9979 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9980 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9981 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9982 
9983 			tw32(GRC_MODE, grc_mode);
9984 		}
9985 
9986 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9987 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9988 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9989 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9990 	}
9991 
9992 	/* This works around an issue with Athlon chipsets on
9993 	 * B3 tigon3 silicon.  This bit has no effect on any
9994 	 * other revision.  But do not set this on PCI Express
9995 	 * chips and don't even touch the clocks if the CPMU is present.
9996 	 */
9997 	if (!tg3_flag(tp, CPMU_PRESENT)) {
9998 		if (!tg3_flag(tp, PCI_EXPRESS))
9999 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10000 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10001 	}
10002 
10003 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10004 	    tg3_flag(tp, PCIX_MODE)) {
10005 		val = tr32(TG3PCI_PCISTATE);
10006 		val |= PCISTATE_RETRY_SAME_DMA;
10007 		tw32(TG3PCI_PCISTATE, val);
10008 	}
10009 
10010 	if (tg3_flag(tp, ENABLE_APE)) {
10011 		/* Allow reads and writes to the
10012 		 * APE register and memory space.
10013 		 */
10014 		val = tr32(TG3PCI_PCISTATE);
10015 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10016 		       PCISTATE_ALLOW_APE_SHMEM_WR |
10017 		       PCISTATE_ALLOW_APE_PSPACE_WR;
10018 		tw32(TG3PCI_PCISTATE, val);
10019 	}
10020 
10021 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10022 		/* Enable some hw fixes.  */
10023 		val = tr32(TG3PCI_MSI_DATA);
10024 		val |= (1 << 26) | (1 << 28) | (1 << 29);
10025 		tw32(TG3PCI_MSI_DATA, val);
10026 	}
10027 
10028 	/* Descriptor ring init may make accesses to the
10029 	 * NIC SRAM area to setup the TX descriptors, so we
10030 	 * can only do this after the hardware has been
10031 	 * successfully reset.
10032 	 */
10033 	err = tg3_init_rings(tp);
10034 	if (err)
10035 		return err;
10036 
10037 	if (tg3_flag(tp, 57765_PLUS)) {
10038 		val = tr32(TG3PCI_DMA_RW_CTRL) &
10039 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10040 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10041 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10042 		if (!tg3_flag(tp, 57765_CLASS) &&
10043 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10044 		    tg3_asic_rev(tp) != ASIC_REV_5762)
10045 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
10046 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10047 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10048 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
10049 		/* This value is determined during the probe time DMA
10050 		 * engine test, tg3_test_dma.
10051 		 */
10052 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10053 	}
10054 
10055 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10056 			  GRC_MODE_4X_NIC_SEND_RINGS |
10057 			  GRC_MODE_NO_TX_PHDR_CSUM |
10058 			  GRC_MODE_NO_RX_PHDR_CSUM);
10059 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10060 
10061 	/* Pseudo-header checksum is done by hardware logic and not
10062 	 * the offload processers, so make the chip do the pseudo-
10063 	 * header checksums on receive.  For transmit it is more
10064 	 * convenient to do the pseudo-header checksum in software
10065 	 * as Linux does that on transmit for us in all cases.
10066 	 */
10067 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10068 
10069 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10070 	if (tp->rxptpctl)
10071 		tw32(TG3_RX_PTP_CTL,
10072 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10073 
10074 	if (tg3_flag(tp, PTP_CAPABLE))
10075 		val |= GRC_MODE_TIME_SYNC_ENABLE;
10076 
10077 	tw32(GRC_MODE, tp->grc_mode | val);
10078 
10079 	/* On one of the AMD platform, MRRS is restricted to 4000 because of
10080 	 * south bridge limitation. As a workaround, Driver is setting MRRS
10081 	 * to 2048 instead of default 4096.
10082 	 */
10083 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10084 	    tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10085 		val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10086 		tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10087 	}
10088 
10089 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
10090 	val = tr32(GRC_MISC_CFG);
10091 	val &= ~0xff;
10092 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10093 	tw32(GRC_MISC_CFG, val);
10094 
10095 	/* Initialize MBUF/DESC pool. */
10096 	if (tg3_flag(tp, 5750_PLUS)) {
10097 		/* Do nothing.  */
10098 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10099 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10100 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
10101 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10102 		else
10103 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10104 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10105 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10106 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
10107 		int fw_len;
10108 
10109 		fw_len = tp->fw_len;
10110 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10111 		tw32(BUFMGR_MB_POOL_ADDR,
10112 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10113 		tw32(BUFMGR_MB_POOL_SIZE,
10114 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10115 	}
10116 
10117 	if (tp->dev->mtu <= ETH_DATA_LEN) {
10118 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10119 		     tp->bufmgr_config.mbuf_read_dma_low_water);
10120 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10121 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
10122 		tw32(BUFMGR_MB_HIGH_WATER,
10123 		     tp->bufmgr_config.mbuf_high_water);
10124 	} else {
10125 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10126 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10127 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10128 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10129 		tw32(BUFMGR_MB_HIGH_WATER,
10130 		     tp->bufmgr_config.mbuf_high_water_jumbo);
10131 	}
10132 	tw32(BUFMGR_DMA_LOW_WATER,
10133 	     tp->bufmgr_config.dma_low_water);
10134 	tw32(BUFMGR_DMA_HIGH_WATER,
10135 	     tp->bufmgr_config.dma_high_water);
10136 
10137 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10138 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
10139 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10140 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10141 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
10142 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10143 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10144 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10145 	tw32(BUFMGR_MODE, val);
10146 	for (i = 0; i < 2000; i++) {
10147 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10148 			break;
10149 		udelay(10);
10150 	}
10151 	if (i >= 2000) {
10152 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10153 		return -ENODEV;
10154 	}
10155 
10156 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10157 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10158 
10159 	tg3_setup_rxbd_thresholds(tp);
10160 
10161 	/* Initialize TG3_BDINFO's at:
10162 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
10163 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
10164 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
10165 	 *
10166 	 * like so:
10167 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
10168 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
10169 	 *                              ring attribute flags
10170 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
10171 	 *
10172 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10173 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10174 	 *
10175 	 * The size of each ring is fixed in the firmware, but the location is
10176 	 * configurable.
10177 	 */
10178 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10179 	     ((u64) tpr->rx_std_mapping >> 32));
10180 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10181 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10182 	if (!tg3_flag(tp, 5717_PLUS))
10183 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10184 		     NIC_SRAM_RX_BUFFER_DESC);
10185 
10186 	/* Disable the mini ring */
10187 	if (!tg3_flag(tp, 5705_PLUS))
10188 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10189 		     BDINFO_FLAGS_DISABLED);
10190 
10191 	/* Program the jumbo buffer descriptor ring control
10192 	 * blocks on those devices that have them.
10193 	 */
10194 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10195 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10196 
10197 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10198 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10199 			     ((u64) tpr->rx_jmb_mapping >> 32));
10200 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10201 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10202 			val = TG3_RX_JMB_RING_SIZE(tp) <<
10203 			      BDINFO_FLAGS_MAXLEN_SHIFT;
10204 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10205 			     val | BDINFO_FLAGS_USE_EXT_RECV);
10206 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10207 			    tg3_flag(tp, 57765_CLASS) ||
10208 			    tg3_asic_rev(tp) == ASIC_REV_5762)
10209 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10210 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10211 		} else {
10212 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10213 			     BDINFO_FLAGS_DISABLED);
10214 		}
10215 
10216 		if (tg3_flag(tp, 57765_PLUS)) {
10217 			val = TG3_RX_STD_RING_SIZE(tp);
10218 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10219 			val |= (TG3_RX_STD_DMA_SZ << 2);
10220 		} else
10221 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10222 	} else
10223 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10224 
10225 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10226 
10227 	tpr->rx_std_prod_idx = tp->rx_pending;
10228 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10229 
10230 	tpr->rx_jmb_prod_idx =
10231 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10232 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10233 
10234 	tg3_rings_reset(tp);
10235 
10236 	/* Initialize MAC address and backoff seed. */
10237 	__tg3_set_mac_addr(tp, false);
10238 
10239 	/* MTU + ethernet header + FCS + optional VLAN tag */
10240 	tw32(MAC_RX_MTU_SIZE,
10241 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10242 
10243 	/* The slot time is changed by tg3_setup_phy if we
10244 	 * run at gigabit with half duplex.
10245 	 */
10246 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10247 	      (6 << TX_LENGTHS_IPG_SHIFT) |
10248 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10249 
10250 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10251 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10252 		val |= tr32(MAC_TX_LENGTHS) &
10253 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10254 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10255 
10256 	tw32(MAC_TX_LENGTHS, val);
10257 
10258 	/* Receive rules. */
10259 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10260 	tw32(RCVLPC_CONFIG, 0x0181);
10261 
10262 	/* Calculate RDMAC_MODE setting early, we need it to determine
10263 	 * the RCVLPC_STATE_ENABLE mask.
10264 	 */
10265 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10266 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10267 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10268 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10269 		      RDMAC_MODE_LNGREAD_ENAB);
10270 
10271 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10272 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10273 
10274 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10275 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10276 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10277 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10278 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10279 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10280 
10281 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10282 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10283 		if (tg3_flag(tp, TSO_CAPABLE) &&
10284 		    tg3_asic_rev(tp) == ASIC_REV_5705) {
10285 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10286 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10287 			   !tg3_flag(tp, IS_5788)) {
10288 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10289 		}
10290 	}
10291 
10292 	if (tg3_flag(tp, PCI_EXPRESS))
10293 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10294 
10295 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10296 		tp->dma_limit = 0;
10297 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10298 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10299 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10300 		}
10301 	}
10302 
10303 	if (tg3_flag(tp, HW_TSO_1) ||
10304 	    tg3_flag(tp, HW_TSO_2) ||
10305 	    tg3_flag(tp, HW_TSO_3))
10306 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10307 
10308 	if (tg3_flag(tp, 57765_PLUS) ||
10309 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10310 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10311 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10312 
10313 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10314 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10315 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10316 
10317 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10318 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10319 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10320 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10321 	    tg3_flag(tp, 57765_PLUS)) {
10322 		u32 tgtreg;
10323 
10324 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10325 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10326 		else
10327 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10328 
10329 		val = tr32(tgtreg);
10330 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10331 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10332 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10333 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10334 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10335 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10336 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10337 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10338 		}
10339 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10340 	}
10341 
10342 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10343 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10344 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10345 		u32 tgtreg;
10346 
10347 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10348 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10349 		else
10350 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10351 
10352 		val = tr32(tgtreg);
10353 		tw32(tgtreg, val |
10354 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10355 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10356 	}
10357 
10358 	/* Receive/send statistics. */
10359 	if (tg3_flag(tp, 5750_PLUS)) {
10360 		val = tr32(RCVLPC_STATS_ENABLE);
10361 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10362 		tw32(RCVLPC_STATS_ENABLE, val);
10363 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10364 		   tg3_flag(tp, TSO_CAPABLE)) {
10365 		val = tr32(RCVLPC_STATS_ENABLE);
10366 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10367 		tw32(RCVLPC_STATS_ENABLE, val);
10368 	} else {
10369 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10370 	}
10371 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10372 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10373 	tw32(SNDDATAI_STATSCTRL,
10374 	     (SNDDATAI_SCTRL_ENABLE |
10375 	      SNDDATAI_SCTRL_FASTUPD));
10376 
10377 	/* Setup host coalescing engine. */
10378 	tw32(HOSTCC_MODE, 0);
10379 	for (i = 0; i < 2000; i++) {
10380 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10381 			break;
10382 		udelay(10);
10383 	}
10384 
10385 	__tg3_set_coalesce(tp, &tp->coal);
10386 
10387 	if (!tg3_flag(tp, 5705_PLUS)) {
10388 		/* Status/statistics block address.  See tg3_timer,
10389 		 * the tg3_periodic_fetch_stats call there, and
10390 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10391 		 */
10392 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10393 		     ((u64) tp->stats_mapping >> 32));
10394 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10395 		     ((u64) tp->stats_mapping & 0xffffffff));
10396 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10397 
10398 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10399 
10400 		/* Clear statistics and status block memory areas */
10401 		for (i = NIC_SRAM_STATS_BLK;
10402 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10403 		     i += sizeof(u32)) {
10404 			tg3_write_mem(tp, i, 0);
10405 			udelay(40);
10406 		}
10407 	}
10408 
10409 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10410 
10411 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10412 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10413 	if (!tg3_flag(tp, 5705_PLUS))
10414 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10415 
10416 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10417 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10418 		/* reset to prevent losing 1st rx packet intermittently */
10419 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10420 		udelay(10);
10421 	}
10422 
10423 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10424 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10425 			MAC_MODE_FHDE_ENABLE;
10426 	if (tg3_flag(tp, ENABLE_APE))
10427 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10428 	if (!tg3_flag(tp, 5705_PLUS) &&
10429 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10430 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10431 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10432 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10433 	udelay(40);
10434 
10435 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10436 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10437 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10438 	 * whether used as inputs or outputs, are set by boot code after
10439 	 * reset.
10440 	 */
10441 	if (!tg3_flag(tp, IS_NIC)) {
10442 		u32 gpio_mask;
10443 
10444 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10445 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10446 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10447 
10448 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10449 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10450 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10451 
10452 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10453 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10454 
10455 		tp->grc_local_ctrl &= ~gpio_mask;
10456 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10457 
10458 		/* GPIO1 must be driven high for eeprom write protect */
10459 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10460 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10461 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10462 	}
10463 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10464 	udelay(100);
10465 
10466 	if (tg3_flag(tp, USING_MSIX)) {
10467 		val = tr32(MSGINT_MODE);
10468 		val |= MSGINT_MODE_ENABLE;
10469 		if (tp->irq_cnt > 1)
10470 			val |= MSGINT_MODE_MULTIVEC_EN;
10471 		if (!tg3_flag(tp, 1SHOT_MSI))
10472 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10473 		tw32(MSGINT_MODE, val);
10474 	}
10475 
10476 	if (!tg3_flag(tp, 5705_PLUS)) {
10477 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10478 		udelay(40);
10479 	}
10480 
10481 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10482 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10483 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10484 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10485 	       WDMAC_MODE_LNGREAD_ENAB);
10486 
10487 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10488 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10489 		if (tg3_flag(tp, TSO_CAPABLE) &&
10490 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10491 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10492 			/* nothing */
10493 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10494 			   !tg3_flag(tp, IS_5788)) {
10495 			val |= WDMAC_MODE_RX_ACCEL;
10496 		}
10497 	}
10498 
10499 	/* Enable host coalescing bug fix */
10500 	if (tg3_flag(tp, 5755_PLUS))
10501 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10502 
10503 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10504 		val |= WDMAC_MODE_BURST_ALL_DATA;
10505 
10506 	tw32_f(WDMAC_MODE, val);
10507 	udelay(40);
10508 
10509 	if (tg3_flag(tp, PCIX_MODE)) {
10510 		u16 pcix_cmd;
10511 
10512 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10513 				     &pcix_cmd);
10514 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10515 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10516 			pcix_cmd |= PCI_X_CMD_READ_2K;
10517 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10518 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10519 			pcix_cmd |= PCI_X_CMD_READ_2K;
10520 		}
10521 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10522 				      pcix_cmd);
10523 	}
10524 
10525 	tw32_f(RDMAC_MODE, rdmac_mode);
10526 	udelay(40);
10527 
10528 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10529 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10530 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10531 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10532 				break;
10533 		}
10534 		if (i < TG3_NUM_RDMA_CHANNELS) {
10535 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10536 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10537 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10538 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10539 		}
10540 	}
10541 
10542 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10543 	if (!tg3_flag(tp, 5705_PLUS))
10544 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10545 
10546 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10547 		tw32(SNDDATAC_MODE,
10548 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10549 	else
10550 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10551 
10552 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10553 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10554 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10555 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10556 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10557 	tw32(RCVDBDI_MODE, val);
10558 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10559 	if (tg3_flag(tp, HW_TSO_1) ||
10560 	    tg3_flag(tp, HW_TSO_2) ||
10561 	    tg3_flag(tp, HW_TSO_3))
10562 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10563 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10564 	if (tg3_flag(tp, ENABLE_TSS))
10565 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10566 	tw32(SNDBDI_MODE, val);
10567 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10568 
10569 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10570 		err = tg3_load_5701_a0_firmware_fix(tp);
10571 		if (err)
10572 			return err;
10573 	}
10574 
10575 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10576 		/* Ignore any errors for the firmware download. If download
10577 		 * fails, the device will operate with EEE disabled
10578 		 */
10579 		tg3_load_57766_firmware(tp);
10580 	}
10581 
10582 	if (tg3_flag(tp, TSO_CAPABLE)) {
10583 		err = tg3_load_tso_firmware(tp);
10584 		if (err)
10585 			return err;
10586 	}
10587 
10588 	tp->tx_mode = TX_MODE_ENABLE;
10589 
10590 	if (tg3_flag(tp, 5755_PLUS) ||
10591 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10592 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10593 
10594 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10595 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10596 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10597 		tp->tx_mode &= ~val;
10598 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10599 	}
10600 
10601 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10602 	udelay(100);
10603 
10604 	if (tg3_flag(tp, ENABLE_RSS)) {
10605 		u32 rss_key[10];
10606 
10607 		tg3_rss_write_indir_tbl(tp);
10608 
10609 		netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10610 
10611 		for (i = 0; i < 10 ; i++)
10612 			tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10613 	}
10614 
10615 	tp->rx_mode = RX_MODE_ENABLE;
10616 	if (tg3_flag(tp, 5755_PLUS))
10617 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10618 
10619 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10620 		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10621 
10622 	if (tg3_flag(tp, ENABLE_RSS))
10623 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10624 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10625 			       RX_MODE_RSS_IPV6_HASH_EN |
10626 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10627 			       RX_MODE_RSS_IPV4_HASH_EN |
10628 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10629 
10630 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10631 	udelay(10);
10632 
10633 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10634 
10635 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10636 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10637 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10638 		udelay(10);
10639 	}
10640 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10641 	udelay(10);
10642 
10643 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10644 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10645 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10646 			/* Set drive transmission level to 1.2V  */
10647 			/* only if the signal pre-emphasis bit is not set  */
10648 			val = tr32(MAC_SERDES_CFG);
10649 			val &= 0xfffff000;
10650 			val |= 0x880;
10651 			tw32(MAC_SERDES_CFG, val);
10652 		}
10653 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10654 			tw32(MAC_SERDES_CFG, 0x616000);
10655 	}
10656 
10657 	/* Prevent chip from dropping frames when flow control
10658 	 * is enabled.
10659 	 */
10660 	if (tg3_flag(tp, 57765_CLASS))
10661 		val = 1;
10662 	else
10663 		val = 2;
10664 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10665 
10666 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10667 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10668 		/* Use hardware link auto-negotiation */
10669 		tg3_flag_set(tp, HW_AUTONEG);
10670 	}
10671 
10672 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10673 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10674 		u32 tmp;
10675 
10676 		tmp = tr32(SERDES_RX_CTRL);
10677 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10678 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10679 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10680 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10681 	}
10682 
10683 	if (!tg3_flag(tp, USE_PHYLIB)) {
10684 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10685 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10686 
10687 		err = tg3_setup_phy(tp, false);
10688 		if (err)
10689 			return err;
10690 
10691 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10692 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10693 			u32 tmp;
10694 
10695 			/* Clear CRC stats. */
10696 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10697 				tg3_writephy(tp, MII_TG3_TEST1,
10698 					     tmp | MII_TG3_TEST1_CRC_EN);
10699 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10700 			}
10701 		}
10702 	}
10703 
10704 	__tg3_set_rx_mode(tp->dev);
10705 
10706 	/* Initialize receive rules. */
10707 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10708 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10709 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10710 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10711 
10712 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10713 		limit = 8;
10714 	else
10715 		limit = 16;
10716 	if (tg3_flag(tp, ENABLE_ASF))
10717 		limit -= 4;
10718 	switch (limit) {
10719 	case 16:
10720 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10721 		/* fall through */
10722 	case 15:
10723 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10724 		/* fall through */
10725 	case 14:
10726 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10727 		/* fall through */
10728 	case 13:
10729 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10730 		/* fall through */
10731 	case 12:
10732 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10733 		/* fall through */
10734 	case 11:
10735 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10736 		/* fall through */
10737 	case 10:
10738 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10739 		/* fall through */
10740 	case 9:
10741 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10742 		/* fall through */
10743 	case 8:
10744 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10745 		/* fall through */
10746 	case 7:
10747 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10748 		/* fall through */
10749 	case 6:
10750 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10751 		/* fall through */
10752 	case 5:
10753 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10754 		/* fall through */
10755 	case 4:
10756 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10757 	case 3:
10758 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10759 	case 2:
10760 	case 1:
10761 
10762 	default:
10763 		break;
10764 	}
10765 
10766 	if (tg3_flag(tp, ENABLE_APE))
10767 		/* Write our heartbeat update interval to APE. */
10768 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10769 				APE_HOST_HEARTBEAT_INT_5SEC);
10770 
10771 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10772 
10773 	return 0;
10774 }
10775 
10776 /* Called at device open time to get the chip ready for
10777  * packet processing.  Invoked with tp->lock held.
10778  */
10779 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10780 {
10781 	/* Chip may have been just powered on. If so, the boot code may still
10782 	 * be running initialization. Wait for it to finish to avoid races in
10783 	 * accessing the hardware.
10784 	 */
10785 	tg3_enable_register_access(tp);
10786 	tg3_poll_fw(tp);
10787 
10788 	tg3_switch_clocks(tp);
10789 
10790 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10791 
10792 	return tg3_reset_hw(tp, reset_phy);
10793 }
10794 
10795 #ifdef CONFIG_TIGON3_HWMON
10796 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10797 {
10798 	int i;
10799 
10800 	for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10801 		u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10802 
10803 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10804 		off += len;
10805 
10806 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10807 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10808 			memset(ocir, 0, TG3_OCIR_LEN);
10809 	}
10810 }
10811 
10812 /* sysfs attributes for hwmon */
10813 static ssize_t tg3_show_temp(struct device *dev,
10814 			     struct device_attribute *devattr, char *buf)
10815 {
10816 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10817 	struct tg3 *tp = dev_get_drvdata(dev);
10818 	u32 temperature;
10819 
10820 	spin_lock_bh(&tp->lock);
10821 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10822 				sizeof(temperature));
10823 	spin_unlock_bh(&tp->lock);
10824 	return sprintf(buf, "%u\n", temperature * 1000);
10825 }
10826 
10827 
10828 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10829 			  TG3_TEMP_SENSOR_OFFSET);
10830 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10831 			  TG3_TEMP_CAUTION_OFFSET);
10832 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10833 			  TG3_TEMP_MAX_OFFSET);
10834 
10835 static struct attribute *tg3_attrs[] = {
10836 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10837 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10838 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10839 	NULL
10840 };
10841 ATTRIBUTE_GROUPS(tg3);
10842 
10843 static void tg3_hwmon_close(struct tg3 *tp)
10844 {
10845 	if (tp->hwmon_dev) {
10846 		hwmon_device_unregister(tp->hwmon_dev);
10847 		tp->hwmon_dev = NULL;
10848 	}
10849 }
10850 
10851 static void tg3_hwmon_open(struct tg3 *tp)
10852 {
10853 	int i;
10854 	u32 size = 0;
10855 	struct pci_dev *pdev = tp->pdev;
10856 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10857 
10858 	tg3_sd_scan_scratchpad(tp, ocirs);
10859 
10860 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10861 		if (!ocirs[i].src_data_length)
10862 			continue;
10863 
10864 		size += ocirs[i].src_hdr_length;
10865 		size += ocirs[i].src_data_length;
10866 	}
10867 
10868 	if (!size)
10869 		return;
10870 
10871 	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10872 							  tp, tg3_groups);
10873 	if (IS_ERR(tp->hwmon_dev)) {
10874 		tp->hwmon_dev = NULL;
10875 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10876 	}
10877 }
10878 #else
10879 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10880 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10881 #endif /* CONFIG_TIGON3_HWMON */
10882 
10883 
10884 #define TG3_STAT_ADD32(PSTAT, REG) \
10885 do {	u32 __val = tr32(REG); \
10886 	(PSTAT)->low += __val; \
10887 	if ((PSTAT)->low < __val) \
10888 		(PSTAT)->high += 1; \
10889 } while (0)
10890 
10891 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10892 {
10893 	struct tg3_hw_stats *sp = tp->hw_stats;
10894 
10895 	if (!tp->link_up)
10896 		return;
10897 
10898 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10899 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10900 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10901 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10902 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10903 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10904 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10905 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10906 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10907 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10908 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10909 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10910 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10911 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10912 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10913 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10914 		u32 val;
10915 
10916 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10917 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10918 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10919 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10920 	}
10921 
10922 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10923 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10924 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10925 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10926 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10927 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10928 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10929 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10930 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10931 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10932 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10933 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10934 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10935 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10936 
10937 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10938 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10939 	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
10940 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10941 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10942 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10943 	} else {
10944 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10945 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10946 		if (val) {
10947 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10948 			sp->rx_discards.low += val;
10949 			if (sp->rx_discards.low < val)
10950 				sp->rx_discards.high += 1;
10951 		}
10952 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10953 	}
10954 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10955 }
10956 
10957 static void tg3_chk_missed_msi(struct tg3 *tp)
10958 {
10959 	u32 i;
10960 
10961 	for (i = 0; i < tp->irq_cnt; i++) {
10962 		struct tg3_napi *tnapi = &tp->napi[i];
10963 
10964 		if (tg3_has_work(tnapi)) {
10965 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10966 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10967 				if (tnapi->chk_msi_cnt < 1) {
10968 					tnapi->chk_msi_cnt++;
10969 					return;
10970 				}
10971 				tg3_msi(0, tnapi);
10972 			}
10973 		}
10974 		tnapi->chk_msi_cnt = 0;
10975 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10976 		tnapi->last_tx_cons = tnapi->tx_cons;
10977 	}
10978 }
10979 
10980 static void tg3_timer(struct timer_list *t)
10981 {
10982 	struct tg3 *tp = from_timer(tp, t, timer);
10983 
10984 	spin_lock(&tp->lock);
10985 
10986 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10987 		spin_unlock(&tp->lock);
10988 		goto restart_timer;
10989 	}
10990 
10991 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10992 	    tg3_flag(tp, 57765_CLASS))
10993 		tg3_chk_missed_msi(tp);
10994 
10995 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10996 		/* BCM4785: Flush posted writes from GbE to host memory. */
10997 		tr32(HOSTCC_MODE);
10998 	}
10999 
11000 	if (!tg3_flag(tp, TAGGED_STATUS)) {
11001 		/* All of this garbage is because when using non-tagged
11002 		 * IRQ status the mailbox/status_block protocol the chip
11003 		 * uses with the cpu is race prone.
11004 		 */
11005 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11006 			tw32(GRC_LOCAL_CTRL,
11007 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11008 		} else {
11009 			tw32(HOSTCC_MODE, tp->coalesce_mode |
11010 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11011 		}
11012 
11013 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11014 			spin_unlock(&tp->lock);
11015 			tg3_reset_task_schedule(tp);
11016 			goto restart_timer;
11017 		}
11018 	}
11019 
11020 	/* This part only runs once per second. */
11021 	if (!--tp->timer_counter) {
11022 		if (tg3_flag(tp, 5705_PLUS))
11023 			tg3_periodic_fetch_stats(tp);
11024 
11025 		if (tp->setlpicnt && !--tp->setlpicnt)
11026 			tg3_phy_eee_enable(tp);
11027 
11028 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
11029 			u32 mac_stat;
11030 			int phy_event;
11031 
11032 			mac_stat = tr32(MAC_STATUS);
11033 
11034 			phy_event = 0;
11035 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11036 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11037 					phy_event = 1;
11038 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11039 				phy_event = 1;
11040 
11041 			if (phy_event)
11042 				tg3_setup_phy(tp, false);
11043 		} else if (tg3_flag(tp, POLL_SERDES)) {
11044 			u32 mac_stat = tr32(MAC_STATUS);
11045 			int need_setup = 0;
11046 
11047 			if (tp->link_up &&
11048 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11049 				need_setup = 1;
11050 			}
11051 			if (!tp->link_up &&
11052 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
11053 					 MAC_STATUS_SIGNAL_DET))) {
11054 				need_setup = 1;
11055 			}
11056 			if (need_setup) {
11057 				if (!tp->serdes_counter) {
11058 					tw32_f(MAC_MODE,
11059 					     (tp->mac_mode &
11060 					      ~MAC_MODE_PORT_MODE_MASK));
11061 					udelay(40);
11062 					tw32_f(MAC_MODE, tp->mac_mode);
11063 					udelay(40);
11064 				}
11065 				tg3_setup_phy(tp, false);
11066 			}
11067 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11068 			   tg3_flag(tp, 5780_CLASS)) {
11069 			tg3_serdes_parallel_detect(tp);
11070 		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11071 			u32 cpmu = tr32(TG3_CPMU_STATUS);
11072 			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11073 					 TG3_CPMU_STATUS_LINK_MASK);
11074 
11075 			if (link_up != tp->link_up)
11076 				tg3_setup_phy(tp, false);
11077 		}
11078 
11079 		tp->timer_counter = tp->timer_multiplier;
11080 	}
11081 
11082 	/* Heartbeat is only sent once every 2 seconds.
11083 	 *
11084 	 * The heartbeat is to tell the ASF firmware that the host
11085 	 * driver is still alive.  In the event that the OS crashes,
11086 	 * ASF needs to reset the hardware to free up the FIFO space
11087 	 * that may be filled with rx packets destined for the host.
11088 	 * If the FIFO is full, ASF will no longer function properly.
11089 	 *
11090 	 * Unintended resets have been reported on real time kernels
11091 	 * where the timer doesn't run on time.  Netpoll will also have
11092 	 * same problem.
11093 	 *
11094 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11095 	 * to check the ring condition when the heartbeat is expiring
11096 	 * before doing the reset.  This will prevent most unintended
11097 	 * resets.
11098 	 */
11099 	if (!--tp->asf_counter) {
11100 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11101 			tg3_wait_for_event_ack(tp);
11102 
11103 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11104 				      FWCMD_NICDRV_ALIVE3);
11105 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11106 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11107 				      TG3_FW_UPDATE_TIMEOUT_SEC);
11108 
11109 			tg3_generate_fw_event(tp);
11110 		}
11111 		tp->asf_counter = tp->asf_multiplier;
11112 	}
11113 
11114 	/* Update the APE heartbeat every 5 seconds.*/
11115 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11116 
11117 	spin_unlock(&tp->lock);
11118 
11119 restart_timer:
11120 	tp->timer.expires = jiffies + tp->timer_offset;
11121 	add_timer(&tp->timer);
11122 }
11123 
11124 static void tg3_timer_init(struct tg3 *tp)
11125 {
11126 	if (tg3_flag(tp, TAGGED_STATUS) &&
11127 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
11128 	    !tg3_flag(tp, 57765_CLASS))
11129 		tp->timer_offset = HZ;
11130 	else
11131 		tp->timer_offset = HZ / 10;
11132 
11133 	BUG_ON(tp->timer_offset > HZ);
11134 
11135 	tp->timer_multiplier = (HZ / tp->timer_offset);
11136 	tp->asf_multiplier = (HZ / tp->timer_offset) *
11137 			     TG3_FW_UPDATE_FREQ_SEC;
11138 
11139 	timer_setup(&tp->timer, tg3_timer, 0);
11140 }
11141 
11142 static void tg3_timer_start(struct tg3 *tp)
11143 {
11144 	tp->asf_counter   = tp->asf_multiplier;
11145 	tp->timer_counter = tp->timer_multiplier;
11146 
11147 	tp->timer.expires = jiffies + tp->timer_offset;
11148 	add_timer(&tp->timer);
11149 }
11150 
11151 static void tg3_timer_stop(struct tg3 *tp)
11152 {
11153 	del_timer_sync(&tp->timer);
11154 }
11155 
11156 /* Restart hardware after configuration changes, self-test, etc.
11157  * Invoked with tp->lock held.
11158  */
11159 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11160 	__releases(tp->lock)
11161 	__acquires(tp->lock)
11162 {
11163 	int err;
11164 
11165 	err = tg3_init_hw(tp, reset_phy);
11166 	if (err) {
11167 		netdev_err(tp->dev,
11168 			   "Failed to re-initialize device, aborting\n");
11169 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11170 		tg3_full_unlock(tp);
11171 		tg3_timer_stop(tp);
11172 		tp->irq_sync = 0;
11173 		tg3_napi_enable(tp);
11174 		dev_close(tp->dev);
11175 		tg3_full_lock(tp, 0);
11176 	}
11177 	return err;
11178 }
11179 
11180 static void tg3_reset_task(struct work_struct *work)
11181 {
11182 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
11183 	int err;
11184 
11185 	rtnl_lock();
11186 	tg3_full_lock(tp, 0);
11187 
11188 	if (!netif_running(tp->dev)) {
11189 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11190 		tg3_full_unlock(tp);
11191 		rtnl_unlock();
11192 		return;
11193 	}
11194 
11195 	tg3_full_unlock(tp);
11196 
11197 	tg3_phy_stop(tp);
11198 
11199 	tg3_netif_stop(tp);
11200 
11201 	tg3_full_lock(tp, 1);
11202 
11203 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11204 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11205 		tp->write32_rx_mbox = tg3_write_flush_reg32;
11206 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11207 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11208 	}
11209 
11210 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11211 	err = tg3_init_hw(tp, true);
11212 	if (err)
11213 		goto out;
11214 
11215 	tg3_netif_start(tp);
11216 
11217 out:
11218 	tg3_full_unlock(tp);
11219 
11220 	if (!err)
11221 		tg3_phy_start(tp);
11222 
11223 	tg3_flag_clear(tp, RESET_TASK_PENDING);
11224 	rtnl_unlock();
11225 }
11226 
11227 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11228 {
11229 	irq_handler_t fn;
11230 	unsigned long flags;
11231 	char *name;
11232 	struct tg3_napi *tnapi = &tp->napi[irq_num];
11233 
11234 	if (tp->irq_cnt == 1)
11235 		name = tp->dev->name;
11236 	else {
11237 		name = &tnapi->irq_lbl[0];
11238 		if (tnapi->tx_buffers && tnapi->rx_rcb)
11239 			snprintf(name, IFNAMSIZ,
11240 				 "%s-txrx-%d", tp->dev->name, irq_num);
11241 		else if (tnapi->tx_buffers)
11242 			snprintf(name, IFNAMSIZ,
11243 				 "%s-tx-%d", tp->dev->name, irq_num);
11244 		else if (tnapi->rx_rcb)
11245 			snprintf(name, IFNAMSIZ,
11246 				 "%s-rx-%d", tp->dev->name, irq_num);
11247 		else
11248 			snprintf(name, IFNAMSIZ,
11249 				 "%s-%d", tp->dev->name, irq_num);
11250 		name[IFNAMSIZ-1] = 0;
11251 	}
11252 
11253 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11254 		fn = tg3_msi;
11255 		if (tg3_flag(tp, 1SHOT_MSI))
11256 			fn = tg3_msi_1shot;
11257 		flags = 0;
11258 	} else {
11259 		fn = tg3_interrupt;
11260 		if (tg3_flag(tp, TAGGED_STATUS))
11261 			fn = tg3_interrupt_tagged;
11262 		flags = IRQF_SHARED;
11263 	}
11264 
11265 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11266 }
11267 
11268 static int tg3_test_interrupt(struct tg3 *tp)
11269 {
11270 	struct tg3_napi *tnapi = &tp->napi[0];
11271 	struct net_device *dev = tp->dev;
11272 	int err, i, intr_ok = 0;
11273 	u32 val;
11274 
11275 	if (!netif_running(dev))
11276 		return -ENODEV;
11277 
11278 	tg3_disable_ints(tp);
11279 
11280 	free_irq(tnapi->irq_vec, tnapi);
11281 
11282 	/*
11283 	 * Turn off MSI one shot mode.  Otherwise this test has no
11284 	 * observable way to know whether the interrupt was delivered.
11285 	 */
11286 	if (tg3_flag(tp, 57765_PLUS)) {
11287 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11288 		tw32(MSGINT_MODE, val);
11289 	}
11290 
11291 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11292 			  IRQF_SHARED, dev->name, tnapi);
11293 	if (err)
11294 		return err;
11295 
11296 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11297 	tg3_enable_ints(tp);
11298 
11299 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11300 	       tnapi->coal_now);
11301 
11302 	for (i = 0; i < 5; i++) {
11303 		u32 int_mbox, misc_host_ctrl;
11304 
11305 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11306 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11307 
11308 		if ((int_mbox != 0) ||
11309 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11310 			intr_ok = 1;
11311 			break;
11312 		}
11313 
11314 		if (tg3_flag(tp, 57765_PLUS) &&
11315 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11316 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11317 
11318 		msleep(10);
11319 	}
11320 
11321 	tg3_disable_ints(tp);
11322 
11323 	free_irq(tnapi->irq_vec, tnapi);
11324 
11325 	err = tg3_request_irq(tp, 0);
11326 
11327 	if (err)
11328 		return err;
11329 
11330 	if (intr_ok) {
11331 		/* Reenable MSI one shot mode. */
11332 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11333 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11334 			tw32(MSGINT_MODE, val);
11335 		}
11336 		return 0;
11337 	}
11338 
11339 	return -EIO;
11340 }
11341 
11342 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11343  * successfully restored
11344  */
11345 static int tg3_test_msi(struct tg3 *tp)
11346 {
11347 	int err;
11348 	u16 pci_cmd;
11349 
11350 	if (!tg3_flag(tp, USING_MSI))
11351 		return 0;
11352 
11353 	/* Turn off SERR reporting in case MSI terminates with Master
11354 	 * Abort.
11355 	 */
11356 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11357 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11358 			      pci_cmd & ~PCI_COMMAND_SERR);
11359 
11360 	err = tg3_test_interrupt(tp);
11361 
11362 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11363 
11364 	if (!err)
11365 		return 0;
11366 
11367 	/* other failures */
11368 	if (err != -EIO)
11369 		return err;
11370 
11371 	/* MSI test failed, go back to INTx mode */
11372 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11373 		    "to INTx mode. Please report this failure to the PCI "
11374 		    "maintainer and include system chipset information\n");
11375 
11376 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11377 
11378 	pci_disable_msi(tp->pdev);
11379 
11380 	tg3_flag_clear(tp, USING_MSI);
11381 	tp->napi[0].irq_vec = tp->pdev->irq;
11382 
11383 	err = tg3_request_irq(tp, 0);
11384 	if (err)
11385 		return err;
11386 
11387 	/* Need to reset the chip because the MSI cycle may have terminated
11388 	 * with Master Abort.
11389 	 */
11390 	tg3_full_lock(tp, 1);
11391 
11392 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11393 	err = tg3_init_hw(tp, true);
11394 
11395 	tg3_full_unlock(tp);
11396 
11397 	if (err)
11398 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11399 
11400 	return err;
11401 }
11402 
11403 static int tg3_request_firmware(struct tg3 *tp)
11404 {
11405 	const struct tg3_firmware_hdr *fw_hdr;
11406 
11407 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11408 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11409 			   tp->fw_needed);
11410 		return -ENOENT;
11411 	}
11412 
11413 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11414 
11415 	/* Firmware blob starts with version numbers, followed by
11416 	 * start address and _full_ length including BSS sections
11417 	 * (which must be longer than the actual data, of course
11418 	 */
11419 
11420 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11421 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11422 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11423 			   tp->fw_len, tp->fw_needed);
11424 		release_firmware(tp->fw);
11425 		tp->fw = NULL;
11426 		return -EINVAL;
11427 	}
11428 
11429 	/* We no longer need firmware; we have it. */
11430 	tp->fw_needed = NULL;
11431 	return 0;
11432 }
11433 
11434 static u32 tg3_irq_count(struct tg3 *tp)
11435 {
11436 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11437 
11438 	if (irq_cnt > 1) {
11439 		/* We want as many rx rings enabled as there are cpus.
11440 		 * In multiqueue MSI-X mode, the first MSI-X vector
11441 		 * only deals with link interrupts, etc, so we add
11442 		 * one to the number of vectors we are requesting.
11443 		 */
11444 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11445 	}
11446 
11447 	return irq_cnt;
11448 }
11449 
11450 static bool tg3_enable_msix(struct tg3 *tp)
11451 {
11452 	int i, rc;
11453 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11454 
11455 	tp->txq_cnt = tp->txq_req;
11456 	tp->rxq_cnt = tp->rxq_req;
11457 	if (!tp->rxq_cnt)
11458 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11459 	if (tp->rxq_cnt > tp->rxq_max)
11460 		tp->rxq_cnt = tp->rxq_max;
11461 
11462 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11463 	 * scheduling of the TX rings can cause starvation of rings with
11464 	 * small packets when other rings have TSO or jumbo packets.
11465 	 */
11466 	if (!tp->txq_req)
11467 		tp->txq_cnt = 1;
11468 
11469 	tp->irq_cnt = tg3_irq_count(tp);
11470 
11471 	for (i = 0; i < tp->irq_max; i++) {
11472 		msix_ent[i].entry  = i;
11473 		msix_ent[i].vector = 0;
11474 	}
11475 
11476 	rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11477 	if (rc < 0) {
11478 		return false;
11479 	} else if (rc < tp->irq_cnt) {
11480 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11481 			      tp->irq_cnt, rc);
11482 		tp->irq_cnt = rc;
11483 		tp->rxq_cnt = max(rc - 1, 1);
11484 		if (tp->txq_cnt)
11485 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11486 	}
11487 
11488 	for (i = 0; i < tp->irq_max; i++)
11489 		tp->napi[i].irq_vec = msix_ent[i].vector;
11490 
11491 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11492 		pci_disable_msix(tp->pdev);
11493 		return false;
11494 	}
11495 
11496 	if (tp->irq_cnt == 1)
11497 		return true;
11498 
11499 	tg3_flag_set(tp, ENABLE_RSS);
11500 
11501 	if (tp->txq_cnt > 1)
11502 		tg3_flag_set(tp, ENABLE_TSS);
11503 
11504 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11505 
11506 	return true;
11507 }
11508 
11509 static void tg3_ints_init(struct tg3 *tp)
11510 {
11511 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11512 	    !tg3_flag(tp, TAGGED_STATUS)) {
11513 		/* All MSI supporting chips should support tagged
11514 		 * status.  Assert that this is the case.
11515 		 */
11516 		netdev_warn(tp->dev,
11517 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11518 		goto defcfg;
11519 	}
11520 
11521 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11522 		tg3_flag_set(tp, USING_MSIX);
11523 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11524 		tg3_flag_set(tp, USING_MSI);
11525 
11526 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11527 		u32 msi_mode = tr32(MSGINT_MODE);
11528 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11529 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11530 		if (!tg3_flag(tp, 1SHOT_MSI))
11531 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11532 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11533 	}
11534 defcfg:
11535 	if (!tg3_flag(tp, USING_MSIX)) {
11536 		tp->irq_cnt = 1;
11537 		tp->napi[0].irq_vec = tp->pdev->irq;
11538 	}
11539 
11540 	if (tp->irq_cnt == 1) {
11541 		tp->txq_cnt = 1;
11542 		tp->rxq_cnt = 1;
11543 		netif_set_real_num_tx_queues(tp->dev, 1);
11544 		netif_set_real_num_rx_queues(tp->dev, 1);
11545 	}
11546 }
11547 
11548 static void tg3_ints_fini(struct tg3 *tp)
11549 {
11550 	if (tg3_flag(tp, USING_MSIX))
11551 		pci_disable_msix(tp->pdev);
11552 	else if (tg3_flag(tp, USING_MSI))
11553 		pci_disable_msi(tp->pdev);
11554 	tg3_flag_clear(tp, USING_MSI);
11555 	tg3_flag_clear(tp, USING_MSIX);
11556 	tg3_flag_clear(tp, ENABLE_RSS);
11557 	tg3_flag_clear(tp, ENABLE_TSS);
11558 }
11559 
11560 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11561 		     bool init)
11562 {
11563 	struct net_device *dev = tp->dev;
11564 	int i, err;
11565 
11566 	/*
11567 	 * Setup interrupts first so we know how
11568 	 * many NAPI resources to allocate
11569 	 */
11570 	tg3_ints_init(tp);
11571 
11572 	tg3_rss_check_indir_tbl(tp);
11573 
11574 	/* The placement of this call is tied
11575 	 * to the setup and use of Host TX descriptors.
11576 	 */
11577 	err = tg3_alloc_consistent(tp);
11578 	if (err)
11579 		goto out_ints_fini;
11580 
11581 	tg3_napi_init(tp);
11582 
11583 	tg3_napi_enable(tp);
11584 
11585 	for (i = 0; i < tp->irq_cnt; i++) {
11586 		err = tg3_request_irq(tp, i);
11587 		if (err) {
11588 			for (i--; i >= 0; i--) {
11589 				struct tg3_napi *tnapi = &tp->napi[i];
11590 
11591 				free_irq(tnapi->irq_vec, tnapi);
11592 			}
11593 			goto out_napi_fini;
11594 		}
11595 	}
11596 
11597 	tg3_full_lock(tp, 0);
11598 
11599 	if (init)
11600 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11601 
11602 	err = tg3_init_hw(tp, reset_phy);
11603 	if (err) {
11604 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11605 		tg3_free_rings(tp);
11606 	}
11607 
11608 	tg3_full_unlock(tp);
11609 
11610 	if (err)
11611 		goto out_free_irq;
11612 
11613 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11614 		err = tg3_test_msi(tp);
11615 
11616 		if (err) {
11617 			tg3_full_lock(tp, 0);
11618 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11619 			tg3_free_rings(tp);
11620 			tg3_full_unlock(tp);
11621 
11622 			goto out_napi_fini;
11623 		}
11624 
11625 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11626 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11627 
11628 			tw32(PCIE_TRANSACTION_CFG,
11629 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11630 		}
11631 	}
11632 
11633 	tg3_phy_start(tp);
11634 
11635 	tg3_hwmon_open(tp);
11636 
11637 	tg3_full_lock(tp, 0);
11638 
11639 	tg3_timer_start(tp);
11640 	tg3_flag_set(tp, INIT_COMPLETE);
11641 	tg3_enable_ints(tp);
11642 
11643 	tg3_ptp_resume(tp);
11644 
11645 	tg3_full_unlock(tp);
11646 
11647 	netif_tx_start_all_queues(dev);
11648 
11649 	/*
11650 	 * Reset loopback feature if it was turned on while the device was down
11651 	 * make sure that it's installed properly now.
11652 	 */
11653 	if (dev->features & NETIF_F_LOOPBACK)
11654 		tg3_set_loopback(dev, dev->features);
11655 
11656 	return 0;
11657 
11658 out_free_irq:
11659 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11660 		struct tg3_napi *tnapi = &tp->napi[i];
11661 		free_irq(tnapi->irq_vec, tnapi);
11662 	}
11663 
11664 out_napi_fini:
11665 	tg3_napi_disable(tp);
11666 	tg3_napi_fini(tp);
11667 	tg3_free_consistent(tp);
11668 
11669 out_ints_fini:
11670 	tg3_ints_fini(tp);
11671 
11672 	return err;
11673 }
11674 
11675 static void tg3_stop(struct tg3 *tp)
11676 {
11677 	int i;
11678 
11679 	tg3_reset_task_cancel(tp);
11680 	tg3_netif_stop(tp);
11681 
11682 	tg3_timer_stop(tp);
11683 
11684 	tg3_hwmon_close(tp);
11685 
11686 	tg3_phy_stop(tp);
11687 
11688 	tg3_full_lock(tp, 1);
11689 
11690 	tg3_disable_ints(tp);
11691 
11692 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11693 	tg3_free_rings(tp);
11694 	tg3_flag_clear(tp, INIT_COMPLETE);
11695 
11696 	tg3_full_unlock(tp);
11697 
11698 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11699 		struct tg3_napi *tnapi = &tp->napi[i];
11700 		free_irq(tnapi->irq_vec, tnapi);
11701 	}
11702 
11703 	tg3_ints_fini(tp);
11704 
11705 	tg3_napi_fini(tp);
11706 
11707 	tg3_free_consistent(tp);
11708 }
11709 
11710 static int tg3_open(struct net_device *dev)
11711 {
11712 	struct tg3 *tp = netdev_priv(dev);
11713 	int err;
11714 
11715 	if (tp->pcierr_recovery) {
11716 		netdev_err(dev, "Failed to open device. PCI error recovery "
11717 			   "in progress\n");
11718 		return -EAGAIN;
11719 	}
11720 
11721 	if (tp->fw_needed) {
11722 		err = tg3_request_firmware(tp);
11723 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11724 			if (err) {
11725 				netdev_warn(tp->dev, "EEE capability disabled\n");
11726 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11727 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11728 				netdev_warn(tp->dev, "EEE capability restored\n");
11729 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11730 			}
11731 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11732 			if (err)
11733 				return err;
11734 		} else if (err) {
11735 			netdev_warn(tp->dev, "TSO capability disabled\n");
11736 			tg3_flag_clear(tp, TSO_CAPABLE);
11737 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11738 			netdev_notice(tp->dev, "TSO capability restored\n");
11739 			tg3_flag_set(tp, TSO_CAPABLE);
11740 		}
11741 	}
11742 
11743 	tg3_carrier_off(tp);
11744 
11745 	err = tg3_power_up(tp);
11746 	if (err)
11747 		return err;
11748 
11749 	tg3_full_lock(tp, 0);
11750 
11751 	tg3_disable_ints(tp);
11752 	tg3_flag_clear(tp, INIT_COMPLETE);
11753 
11754 	tg3_full_unlock(tp);
11755 
11756 	err = tg3_start(tp,
11757 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11758 			true, true);
11759 	if (err) {
11760 		tg3_frob_aux_power(tp, false);
11761 		pci_set_power_state(tp->pdev, PCI_D3hot);
11762 	}
11763 
11764 	return err;
11765 }
11766 
11767 static int tg3_close(struct net_device *dev)
11768 {
11769 	struct tg3 *tp = netdev_priv(dev);
11770 
11771 	if (tp->pcierr_recovery) {
11772 		netdev_err(dev, "Failed to close device. PCI error recovery "
11773 			   "in progress\n");
11774 		return -EAGAIN;
11775 	}
11776 
11777 	tg3_stop(tp);
11778 
11779 	if (pci_device_is_present(tp->pdev)) {
11780 		tg3_power_down_prepare(tp);
11781 
11782 		tg3_carrier_off(tp);
11783 	}
11784 	return 0;
11785 }
11786 
11787 static inline u64 get_stat64(tg3_stat64_t *val)
11788 {
11789        return ((u64)val->high << 32) | ((u64)val->low);
11790 }
11791 
11792 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11793 {
11794 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11795 
11796 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11797 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11798 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11799 		u32 val;
11800 
11801 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11802 			tg3_writephy(tp, MII_TG3_TEST1,
11803 				     val | MII_TG3_TEST1_CRC_EN);
11804 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11805 		} else
11806 			val = 0;
11807 
11808 		tp->phy_crc_errors += val;
11809 
11810 		return tp->phy_crc_errors;
11811 	}
11812 
11813 	return get_stat64(&hw_stats->rx_fcs_errors);
11814 }
11815 
11816 #define ESTAT_ADD(member) \
11817 	estats->member =	old_estats->member + \
11818 				get_stat64(&hw_stats->member)
11819 
11820 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11821 {
11822 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11823 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11824 
11825 	ESTAT_ADD(rx_octets);
11826 	ESTAT_ADD(rx_fragments);
11827 	ESTAT_ADD(rx_ucast_packets);
11828 	ESTAT_ADD(rx_mcast_packets);
11829 	ESTAT_ADD(rx_bcast_packets);
11830 	ESTAT_ADD(rx_fcs_errors);
11831 	ESTAT_ADD(rx_align_errors);
11832 	ESTAT_ADD(rx_xon_pause_rcvd);
11833 	ESTAT_ADD(rx_xoff_pause_rcvd);
11834 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11835 	ESTAT_ADD(rx_xoff_entered);
11836 	ESTAT_ADD(rx_frame_too_long_errors);
11837 	ESTAT_ADD(rx_jabbers);
11838 	ESTAT_ADD(rx_undersize_packets);
11839 	ESTAT_ADD(rx_in_length_errors);
11840 	ESTAT_ADD(rx_out_length_errors);
11841 	ESTAT_ADD(rx_64_or_less_octet_packets);
11842 	ESTAT_ADD(rx_65_to_127_octet_packets);
11843 	ESTAT_ADD(rx_128_to_255_octet_packets);
11844 	ESTAT_ADD(rx_256_to_511_octet_packets);
11845 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11846 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11847 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11848 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11849 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11850 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11851 
11852 	ESTAT_ADD(tx_octets);
11853 	ESTAT_ADD(tx_collisions);
11854 	ESTAT_ADD(tx_xon_sent);
11855 	ESTAT_ADD(tx_xoff_sent);
11856 	ESTAT_ADD(tx_flow_control);
11857 	ESTAT_ADD(tx_mac_errors);
11858 	ESTAT_ADD(tx_single_collisions);
11859 	ESTAT_ADD(tx_mult_collisions);
11860 	ESTAT_ADD(tx_deferred);
11861 	ESTAT_ADD(tx_excessive_collisions);
11862 	ESTAT_ADD(tx_late_collisions);
11863 	ESTAT_ADD(tx_collide_2times);
11864 	ESTAT_ADD(tx_collide_3times);
11865 	ESTAT_ADD(tx_collide_4times);
11866 	ESTAT_ADD(tx_collide_5times);
11867 	ESTAT_ADD(tx_collide_6times);
11868 	ESTAT_ADD(tx_collide_7times);
11869 	ESTAT_ADD(tx_collide_8times);
11870 	ESTAT_ADD(tx_collide_9times);
11871 	ESTAT_ADD(tx_collide_10times);
11872 	ESTAT_ADD(tx_collide_11times);
11873 	ESTAT_ADD(tx_collide_12times);
11874 	ESTAT_ADD(tx_collide_13times);
11875 	ESTAT_ADD(tx_collide_14times);
11876 	ESTAT_ADD(tx_collide_15times);
11877 	ESTAT_ADD(tx_ucast_packets);
11878 	ESTAT_ADD(tx_mcast_packets);
11879 	ESTAT_ADD(tx_bcast_packets);
11880 	ESTAT_ADD(tx_carrier_sense_errors);
11881 	ESTAT_ADD(tx_discards);
11882 	ESTAT_ADD(tx_errors);
11883 
11884 	ESTAT_ADD(dma_writeq_full);
11885 	ESTAT_ADD(dma_write_prioq_full);
11886 	ESTAT_ADD(rxbds_empty);
11887 	ESTAT_ADD(rx_discards);
11888 	ESTAT_ADD(rx_errors);
11889 	ESTAT_ADD(rx_threshold_hit);
11890 
11891 	ESTAT_ADD(dma_readq_full);
11892 	ESTAT_ADD(dma_read_prioq_full);
11893 	ESTAT_ADD(tx_comp_queue_full);
11894 
11895 	ESTAT_ADD(ring_set_send_prod_index);
11896 	ESTAT_ADD(ring_status_update);
11897 	ESTAT_ADD(nic_irqs);
11898 	ESTAT_ADD(nic_avoided_irqs);
11899 	ESTAT_ADD(nic_tx_threshold_hit);
11900 
11901 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11902 }
11903 
11904 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11905 {
11906 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11907 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11908 
11909 	stats->rx_packets = old_stats->rx_packets +
11910 		get_stat64(&hw_stats->rx_ucast_packets) +
11911 		get_stat64(&hw_stats->rx_mcast_packets) +
11912 		get_stat64(&hw_stats->rx_bcast_packets);
11913 
11914 	stats->tx_packets = old_stats->tx_packets +
11915 		get_stat64(&hw_stats->tx_ucast_packets) +
11916 		get_stat64(&hw_stats->tx_mcast_packets) +
11917 		get_stat64(&hw_stats->tx_bcast_packets);
11918 
11919 	stats->rx_bytes = old_stats->rx_bytes +
11920 		get_stat64(&hw_stats->rx_octets);
11921 	stats->tx_bytes = old_stats->tx_bytes +
11922 		get_stat64(&hw_stats->tx_octets);
11923 
11924 	stats->rx_errors = old_stats->rx_errors +
11925 		get_stat64(&hw_stats->rx_errors);
11926 	stats->tx_errors = old_stats->tx_errors +
11927 		get_stat64(&hw_stats->tx_errors) +
11928 		get_stat64(&hw_stats->tx_mac_errors) +
11929 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11930 		get_stat64(&hw_stats->tx_discards);
11931 
11932 	stats->multicast = old_stats->multicast +
11933 		get_stat64(&hw_stats->rx_mcast_packets);
11934 	stats->collisions = old_stats->collisions +
11935 		get_stat64(&hw_stats->tx_collisions);
11936 
11937 	stats->rx_length_errors = old_stats->rx_length_errors +
11938 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11939 		get_stat64(&hw_stats->rx_undersize_packets);
11940 
11941 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11942 		get_stat64(&hw_stats->rx_align_errors);
11943 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11944 		get_stat64(&hw_stats->tx_discards);
11945 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11946 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11947 
11948 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11949 		tg3_calc_crc_errors(tp);
11950 
11951 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11952 		get_stat64(&hw_stats->rx_discards);
11953 
11954 	stats->rx_dropped = tp->rx_dropped;
11955 	stats->tx_dropped = tp->tx_dropped;
11956 }
11957 
11958 static int tg3_get_regs_len(struct net_device *dev)
11959 {
11960 	return TG3_REG_BLK_SIZE;
11961 }
11962 
11963 static void tg3_get_regs(struct net_device *dev,
11964 		struct ethtool_regs *regs, void *_p)
11965 {
11966 	struct tg3 *tp = netdev_priv(dev);
11967 
11968 	regs->version = 0;
11969 
11970 	memset(_p, 0, TG3_REG_BLK_SIZE);
11971 
11972 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11973 		return;
11974 
11975 	tg3_full_lock(tp, 0);
11976 
11977 	tg3_dump_legacy_regs(tp, (u32 *)_p);
11978 
11979 	tg3_full_unlock(tp);
11980 }
11981 
11982 static int tg3_get_eeprom_len(struct net_device *dev)
11983 {
11984 	struct tg3 *tp = netdev_priv(dev);
11985 
11986 	return tp->nvram_size;
11987 }
11988 
11989 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11990 {
11991 	struct tg3 *tp = netdev_priv(dev);
11992 	int ret, cpmu_restore = 0;
11993 	u8  *pd;
11994 	u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11995 	__be32 val;
11996 
11997 	if (tg3_flag(tp, NO_NVRAM))
11998 		return -EINVAL;
11999 
12000 	offset = eeprom->offset;
12001 	len = eeprom->len;
12002 	eeprom->len = 0;
12003 
12004 	eeprom->magic = TG3_EEPROM_MAGIC;
12005 
12006 	/* Override clock, link aware and link idle modes */
12007 	if (tg3_flag(tp, CPMU_PRESENT)) {
12008 		cpmu_val = tr32(TG3_CPMU_CTRL);
12009 		if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12010 				CPMU_CTRL_LINK_IDLE_MODE)) {
12011 			tw32(TG3_CPMU_CTRL, cpmu_val &
12012 					    ~(CPMU_CTRL_LINK_AWARE_MODE |
12013 					     CPMU_CTRL_LINK_IDLE_MODE));
12014 			cpmu_restore = 1;
12015 		}
12016 	}
12017 	tg3_override_clk(tp);
12018 
12019 	if (offset & 3) {
12020 		/* adjustments to start on required 4 byte boundary */
12021 		b_offset = offset & 3;
12022 		b_count = 4 - b_offset;
12023 		if (b_count > len) {
12024 			/* i.e. offset=1 len=2 */
12025 			b_count = len;
12026 		}
12027 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12028 		if (ret)
12029 			goto eeprom_done;
12030 		memcpy(data, ((char *)&val) + b_offset, b_count);
12031 		len -= b_count;
12032 		offset += b_count;
12033 		eeprom->len += b_count;
12034 	}
12035 
12036 	/* read bytes up to the last 4 byte boundary */
12037 	pd = &data[eeprom->len];
12038 	for (i = 0; i < (len - (len & 3)); i += 4) {
12039 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
12040 		if (ret) {
12041 			if (i)
12042 				i -= 4;
12043 			eeprom->len += i;
12044 			goto eeprom_done;
12045 		}
12046 		memcpy(pd + i, &val, 4);
12047 		if (need_resched()) {
12048 			if (signal_pending(current)) {
12049 				eeprom->len += i;
12050 				ret = -EINTR;
12051 				goto eeprom_done;
12052 			}
12053 			cond_resched();
12054 		}
12055 	}
12056 	eeprom->len += i;
12057 
12058 	if (len & 3) {
12059 		/* read last bytes not ending on 4 byte boundary */
12060 		pd = &data[eeprom->len];
12061 		b_count = len & 3;
12062 		b_offset = offset + len - b_count;
12063 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
12064 		if (ret)
12065 			goto eeprom_done;
12066 		memcpy(pd, &val, b_count);
12067 		eeprom->len += b_count;
12068 	}
12069 	ret = 0;
12070 
12071 eeprom_done:
12072 	/* Restore clock, link aware and link idle modes */
12073 	tg3_restore_clk(tp);
12074 	if (cpmu_restore)
12075 		tw32(TG3_CPMU_CTRL, cpmu_val);
12076 
12077 	return ret;
12078 }
12079 
12080 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12081 {
12082 	struct tg3 *tp = netdev_priv(dev);
12083 	int ret;
12084 	u32 offset, len, b_offset, odd_len;
12085 	u8 *buf;
12086 	__be32 start = 0, end;
12087 
12088 	if (tg3_flag(tp, NO_NVRAM) ||
12089 	    eeprom->magic != TG3_EEPROM_MAGIC)
12090 		return -EINVAL;
12091 
12092 	offset = eeprom->offset;
12093 	len = eeprom->len;
12094 
12095 	if ((b_offset = (offset & 3))) {
12096 		/* adjustments to start on required 4 byte boundary */
12097 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12098 		if (ret)
12099 			return ret;
12100 		len += b_offset;
12101 		offset &= ~3;
12102 		if (len < 4)
12103 			len = 4;
12104 	}
12105 
12106 	odd_len = 0;
12107 	if (len & 3) {
12108 		/* adjustments to end on required 4 byte boundary */
12109 		odd_len = 1;
12110 		len = (len + 3) & ~3;
12111 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12112 		if (ret)
12113 			return ret;
12114 	}
12115 
12116 	buf = data;
12117 	if (b_offset || odd_len) {
12118 		buf = kmalloc(len, GFP_KERNEL);
12119 		if (!buf)
12120 			return -ENOMEM;
12121 		if (b_offset)
12122 			memcpy(buf, &start, 4);
12123 		if (odd_len)
12124 			memcpy(buf+len-4, &end, 4);
12125 		memcpy(buf + b_offset, data, eeprom->len);
12126 	}
12127 
12128 	ret = tg3_nvram_write_block(tp, offset, len, buf);
12129 
12130 	if (buf != data)
12131 		kfree(buf);
12132 
12133 	return ret;
12134 }
12135 
12136 static int tg3_get_link_ksettings(struct net_device *dev,
12137 				  struct ethtool_link_ksettings *cmd)
12138 {
12139 	struct tg3 *tp = netdev_priv(dev);
12140 	u32 supported, advertising;
12141 
12142 	if (tg3_flag(tp, USE_PHYLIB)) {
12143 		struct phy_device *phydev;
12144 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12145 			return -EAGAIN;
12146 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12147 		phy_ethtool_ksettings_get(phydev, cmd);
12148 
12149 		return 0;
12150 	}
12151 
12152 	supported = (SUPPORTED_Autoneg);
12153 
12154 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12155 		supported |= (SUPPORTED_1000baseT_Half |
12156 			      SUPPORTED_1000baseT_Full);
12157 
12158 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12159 		supported |= (SUPPORTED_100baseT_Half |
12160 			      SUPPORTED_100baseT_Full |
12161 			      SUPPORTED_10baseT_Half |
12162 			      SUPPORTED_10baseT_Full |
12163 			      SUPPORTED_TP);
12164 		cmd->base.port = PORT_TP;
12165 	} else {
12166 		supported |= SUPPORTED_FIBRE;
12167 		cmd->base.port = PORT_FIBRE;
12168 	}
12169 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12170 						supported);
12171 
12172 	advertising = tp->link_config.advertising;
12173 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
12174 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12175 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12176 				advertising |= ADVERTISED_Pause;
12177 			} else {
12178 				advertising |= ADVERTISED_Pause |
12179 					ADVERTISED_Asym_Pause;
12180 			}
12181 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12182 			advertising |= ADVERTISED_Asym_Pause;
12183 		}
12184 	}
12185 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12186 						advertising);
12187 
12188 	if (netif_running(dev) && tp->link_up) {
12189 		cmd->base.speed = tp->link_config.active_speed;
12190 		cmd->base.duplex = tp->link_config.active_duplex;
12191 		ethtool_convert_legacy_u32_to_link_mode(
12192 			cmd->link_modes.lp_advertising,
12193 			tp->link_config.rmt_adv);
12194 
12195 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12196 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12197 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12198 			else
12199 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
12200 		}
12201 	} else {
12202 		cmd->base.speed = SPEED_UNKNOWN;
12203 		cmd->base.duplex = DUPLEX_UNKNOWN;
12204 		cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12205 	}
12206 	cmd->base.phy_address = tp->phy_addr;
12207 	cmd->base.autoneg = tp->link_config.autoneg;
12208 	return 0;
12209 }
12210 
12211 static int tg3_set_link_ksettings(struct net_device *dev,
12212 				  const struct ethtool_link_ksettings *cmd)
12213 {
12214 	struct tg3 *tp = netdev_priv(dev);
12215 	u32 speed = cmd->base.speed;
12216 	u32 advertising;
12217 
12218 	if (tg3_flag(tp, USE_PHYLIB)) {
12219 		struct phy_device *phydev;
12220 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12221 			return -EAGAIN;
12222 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12223 		return phy_ethtool_ksettings_set(phydev, cmd);
12224 	}
12225 
12226 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
12227 	    cmd->base.autoneg != AUTONEG_DISABLE)
12228 		return -EINVAL;
12229 
12230 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
12231 	    cmd->base.duplex != DUPLEX_FULL &&
12232 	    cmd->base.duplex != DUPLEX_HALF)
12233 		return -EINVAL;
12234 
12235 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
12236 						cmd->link_modes.advertising);
12237 
12238 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12239 		u32 mask = ADVERTISED_Autoneg |
12240 			   ADVERTISED_Pause |
12241 			   ADVERTISED_Asym_Pause;
12242 
12243 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12244 			mask |= ADVERTISED_1000baseT_Half |
12245 				ADVERTISED_1000baseT_Full;
12246 
12247 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12248 			mask |= ADVERTISED_100baseT_Half |
12249 				ADVERTISED_100baseT_Full |
12250 				ADVERTISED_10baseT_Half |
12251 				ADVERTISED_10baseT_Full |
12252 				ADVERTISED_TP;
12253 		else
12254 			mask |= ADVERTISED_FIBRE;
12255 
12256 		if (advertising & ~mask)
12257 			return -EINVAL;
12258 
12259 		mask &= (ADVERTISED_1000baseT_Half |
12260 			 ADVERTISED_1000baseT_Full |
12261 			 ADVERTISED_100baseT_Half |
12262 			 ADVERTISED_100baseT_Full |
12263 			 ADVERTISED_10baseT_Half |
12264 			 ADVERTISED_10baseT_Full);
12265 
12266 		advertising &= mask;
12267 	} else {
12268 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12269 			if (speed != SPEED_1000)
12270 				return -EINVAL;
12271 
12272 			if (cmd->base.duplex != DUPLEX_FULL)
12273 				return -EINVAL;
12274 		} else {
12275 			if (speed != SPEED_100 &&
12276 			    speed != SPEED_10)
12277 				return -EINVAL;
12278 		}
12279 	}
12280 
12281 	tg3_full_lock(tp, 0);
12282 
12283 	tp->link_config.autoneg = cmd->base.autoneg;
12284 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12285 		tp->link_config.advertising = (advertising |
12286 					      ADVERTISED_Autoneg);
12287 		tp->link_config.speed = SPEED_UNKNOWN;
12288 		tp->link_config.duplex = DUPLEX_UNKNOWN;
12289 	} else {
12290 		tp->link_config.advertising = 0;
12291 		tp->link_config.speed = speed;
12292 		tp->link_config.duplex = cmd->base.duplex;
12293 	}
12294 
12295 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12296 
12297 	tg3_warn_mgmt_link_flap(tp);
12298 
12299 	if (netif_running(dev))
12300 		tg3_setup_phy(tp, true);
12301 
12302 	tg3_full_unlock(tp);
12303 
12304 	return 0;
12305 }
12306 
12307 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12308 {
12309 	struct tg3 *tp = netdev_priv(dev);
12310 
12311 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12312 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12313 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12314 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12315 }
12316 
12317 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12318 {
12319 	struct tg3 *tp = netdev_priv(dev);
12320 
12321 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12322 		wol->supported = WAKE_MAGIC;
12323 	else
12324 		wol->supported = 0;
12325 	wol->wolopts = 0;
12326 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12327 		wol->wolopts = WAKE_MAGIC;
12328 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12329 }
12330 
12331 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12332 {
12333 	struct tg3 *tp = netdev_priv(dev);
12334 	struct device *dp = &tp->pdev->dev;
12335 
12336 	if (wol->wolopts & ~WAKE_MAGIC)
12337 		return -EINVAL;
12338 	if ((wol->wolopts & WAKE_MAGIC) &&
12339 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12340 		return -EINVAL;
12341 
12342 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12343 
12344 	if (device_may_wakeup(dp))
12345 		tg3_flag_set(tp, WOL_ENABLE);
12346 	else
12347 		tg3_flag_clear(tp, WOL_ENABLE);
12348 
12349 	return 0;
12350 }
12351 
12352 static u32 tg3_get_msglevel(struct net_device *dev)
12353 {
12354 	struct tg3 *tp = netdev_priv(dev);
12355 	return tp->msg_enable;
12356 }
12357 
12358 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12359 {
12360 	struct tg3 *tp = netdev_priv(dev);
12361 	tp->msg_enable = value;
12362 }
12363 
12364 static int tg3_nway_reset(struct net_device *dev)
12365 {
12366 	struct tg3 *tp = netdev_priv(dev);
12367 	int r;
12368 
12369 	if (!netif_running(dev))
12370 		return -EAGAIN;
12371 
12372 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12373 		return -EINVAL;
12374 
12375 	tg3_warn_mgmt_link_flap(tp);
12376 
12377 	if (tg3_flag(tp, USE_PHYLIB)) {
12378 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12379 			return -EAGAIN;
12380 		r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12381 	} else {
12382 		u32 bmcr;
12383 
12384 		spin_lock_bh(&tp->lock);
12385 		r = -EINVAL;
12386 		tg3_readphy(tp, MII_BMCR, &bmcr);
12387 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12388 		    ((bmcr & BMCR_ANENABLE) ||
12389 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12390 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12391 						   BMCR_ANENABLE);
12392 			r = 0;
12393 		}
12394 		spin_unlock_bh(&tp->lock);
12395 	}
12396 
12397 	return r;
12398 }
12399 
12400 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12401 {
12402 	struct tg3 *tp = netdev_priv(dev);
12403 
12404 	ering->rx_max_pending = tp->rx_std_ring_mask;
12405 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12406 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12407 	else
12408 		ering->rx_jumbo_max_pending = 0;
12409 
12410 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12411 
12412 	ering->rx_pending = tp->rx_pending;
12413 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12414 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12415 	else
12416 		ering->rx_jumbo_pending = 0;
12417 
12418 	ering->tx_pending = tp->napi[0].tx_pending;
12419 }
12420 
12421 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12422 {
12423 	struct tg3 *tp = netdev_priv(dev);
12424 	int i, irq_sync = 0, err = 0;
12425 	bool reset_phy = false;
12426 
12427 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12428 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12429 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12430 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12431 	    (tg3_flag(tp, TSO_BUG) &&
12432 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12433 		return -EINVAL;
12434 
12435 	if (netif_running(dev)) {
12436 		tg3_phy_stop(tp);
12437 		tg3_netif_stop(tp);
12438 		irq_sync = 1;
12439 	}
12440 
12441 	tg3_full_lock(tp, irq_sync);
12442 
12443 	tp->rx_pending = ering->rx_pending;
12444 
12445 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12446 	    tp->rx_pending > 63)
12447 		tp->rx_pending = 63;
12448 
12449 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12450 		tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12451 
12452 	for (i = 0; i < tp->irq_max; i++)
12453 		tp->napi[i].tx_pending = ering->tx_pending;
12454 
12455 	if (netif_running(dev)) {
12456 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12457 		/* Reset PHY to avoid PHY lock up */
12458 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12459 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12460 		    tg3_asic_rev(tp) == ASIC_REV_5720)
12461 			reset_phy = true;
12462 
12463 		err = tg3_restart_hw(tp, reset_phy);
12464 		if (!err)
12465 			tg3_netif_start(tp);
12466 	}
12467 
12468 	tg3_full_unlock(tp);
12469 
12470 	if (irq_sync && !err)
12471 		tg3_phy_start(tp);
12472 
12473 	return err;
12474 }
12475 
12476 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12477 {
12478 	struct tg3 *tp = netdev_priv(dev);
12479 
12480 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12481 
12482 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12483 		epause->rx_pause = 1;
12484 	else
12485 		epause->rx_pause = 0;
12486 
12487 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12488 		epause->tx_pause = 1;
12489 	else
12490 		epause->tx_pause = 0;
12491 }
12492 
12493 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12494 {
12495 	struct tg3 *tp = netdev_priv(dev);
12496 	int err = 0;
12497 	bool reset_phy = false;
12498 
12499 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12500 		tg3_warn_mgmt_link_flap(tp);
12501 
12502 	if (tg3_flag(tp, USE_PHYLIB)) {
12503 		struct phy_device *phydev;
12504 
12505 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12506 
12507 		if (!phy_validate_pause(phydev, epause))
12508 			return -EINVAL;
12509 
12510 		tp->link_config.flowctrl = 0;
12511 		phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12512 		if (epause->rx_pause) {
12513 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12514 
12515 			if (epause->tx_pause) {
12516 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12517 			}
12518 		} else if (epause->tx_pause) {
12519 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12520 		}
12521 
12522 		if (epause->autoneg)
12523 			tg3_flag_set(tp, PAUSE_AUTONEG);
12524 		else
12525 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12526 
12527 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12528 			if (phydev->autoneg) {
12529 				/* phy_set_asym_pause() will
12530 				 * renegotiate the link to inform our
12531 				 * link partner of our flow control
12532 				 * settings, even if the flow control
12533 				 * is forced.  Let tg3_adjust_link()
12534 				 * do the final flow control setup.
12535 				 */
12536 				return 0;
12537 			}
12538 
12539 			if (!epause->autoneg)
12540 				tg3_setup_flow_control(tp, 0, 0);
12541 		}
12542 	} else {
12543 		int irq_sync = 0;
12544 
12545 		if (netif_running(dev)) {
12546 			tg3_netif_stop(tp);
12547 			irq_sync = 1;
12548 		}
12549 
12550 		tg3_full_lock(tp, irq_sync);
12551 
12552 		if (epause->autoneg)
12553 			tg3_flag_set(tp, PAUSE_AUTONEG);
12554 		else
12555 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12556 		if (epause->rx_pause)
12557 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12558 		else
12559 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12560 		if (epause->tx_pause)
12561 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12562 		else
12563 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12564 
12565 		if (netif_running(dev)) {
12566 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12567 			/* Reset PHY to avoid PHY lock up */
12568 			if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12569 			    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12570 			    tg3_asic_rev(tp) == ASIC_REV_5720)
12571 				reset_phy = true;
12572 
12573 			err = tg3_restart_hw(tp, reset_phy);
12574 			if (!err)
12575 				tg3_netif_start(tp);
12576 		}
12577 
12578 		tg3_full_unlock(tp);
12579 	}
12580 
12581 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12582 
12583 	return err;
12584 }
12585 
12586 static int tg3_get_sset_count(struct net_device *dev, int sset)
12587 {
12588 	switch (sset) {
12589 	case ETH_SS_TEST:
12590 		return TG3_NUM_TEST;
12591 	case ETH_SS_STATS:
12592 		return TG3_NUM_STATS;
12593 	default:
12594 		return -EOPNOTSUPP;
12595 	}
12596 }
12597 
12598 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12599 			 u32 *rules __always_unused)
12600 {
12601 	struct tg3 *tp = netdev_priv(dev);
12602 
12603 	if (!tg3_flag(tp, SUPPORT_MSIX))
12604 		return -EOPNOTSUPP;
12605 
12606 	switch (info->cmd) {
12607 	case ETHTOOL_GRXRINGS:
12608 		if (netif_running(tp->dev))
12609 			info->data = tp->rxq_cnt;
12610 		else {
12611 			info->data = num_online_cpus();
12612 			if (info->data > TG3_RSS_MAX_NUM_QS)
12613 				info->data = TG3_RSS_MAX_NUM_QS;
12614 		}
12615 
12616 		return 0;
12617 
12618 	default:
12619 		return -EOPNOTSUPP;
12620 	}
12621 }
12622 
12623 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12624 {
12625 	u32 size = 0;
12626 	struct tg3 *tp = netdev_priv(dev);
12627 
12628 	if (tg3_flag(tp, SUPPORT_MSIX))
12629 		size = TG3_RSS_INDIR_TBL_SIZE;
12630 
12631 	return size;
12632 }
12633 
12634 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12635 {
12636 	struct tg3 *tp = netdev_priv(dev);
12637 	int i;
12638 
12639 	if (hfunc)
12640 		*hfunc = ETH_RSS_HASH_TOP;
12641 	if (!indir)
12642 		return 0;
12643 
12644 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12645 		indir[i] = tp->rss_ind_tbl[i];
12646 
12647 	return 0;
12648 }
12649 
12650 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12651 			const u8 hfunc)
12652 {
12653 	struct tg3 *tp = netdev_priv(dev);
12654 	size_t i;
12655 
12656 	/* We require at least one supported parameter to be changed and no
12657 	 * change in any of the unsupported parameters
12658 	 */
12659 	if (key ||
12660 	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12661 		return -EOPNOTSUPP;
12662 
12663 	if (!indir)
12664 		return 0;
12665 
12666 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12667 		tp->rss_ind_tbl[i] = indir[i];
12668 
12669 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12670 		return 0;
12671 
12672 	/* It is legal to write the indirection
12673 	 * table while the device is running.
12674 	 */
12675 	tg3_full_lock(tp, 0);
12676 	tg3_rss_write_indir_tbl(tp);
12677 	tg3_full_unlock(tp);
12678 
12679 	return 0;
12680 }
12681 
12682 static void tg3_get_channels(struct net_device *dev,
12683 			     struct ethtool_channels *channel)
12684 {
12685 	struct tg3 *tp = netdev_priv(dev);
12686 	u32 deflt_qs = netif_get_num_default_rss_queues();
12687 
12688 	channel->max_rx = tp->rxq_max;
12689 	channel->max_tx = tp->txq_max;
12690 
12691 	if (netif_running(dev)) {
12692 		channel->rx_count = tp->rxq_cnt;
12693 		channel->tx_count = tp->txq_cnt;
12694 	} else {
12695 		if (tp->rxq_req)
12696 			channel->rx_count = tp->rxq_req;
12697 		else
12698 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12699 
12700 		if (tp->txq_req)
12701 			channel->tx_count = tp->txq_req;
12702 		else
12703 			channel->tx_count = min(deflt_qs, tp->txq_max);
12704 	}
12705 }
12706 
12707 static int tg3_set_channels(struct net_device *dev,
12708 			    struct ethtool_channels *channel)
12709 {
12710 	struct tg3 *tp = netdev_priv(dev);
12711 
12712 	if (!tg3_flag(tp, SUPPORT_MSIX))
12713 		return -EOPNOTSUPP;
12714 
12715 	if (channel->rx_count > tp->rxq_max ||
12716 	    channel->tx_count > tp->txq_max)
12717 		return -EINVAL;
12718 
12719 	tp->rxq_req = channel->rx_count;
12720 	tp->txq_req = channel->tx_count;
12721 
12722 	if (!netif_running(dev))
12723 		return 0;
12724 
12725 	tg3_stop(tp);
12726 
12727 	tg3_carrier_off(tp);
12728 
12729 	tg3_start(tp, true, false, false);
12730 
12731 	return 0;
12732 }
12733 
12734 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12735 {
12736 	switch (stringset) {
12737 	case ETH_SS_STATS:
12738 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12739 		break;
12740 	case ETH_SS_TEST:
12741 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12742 		break;
12743 	default:
12744 		WARN_ON(1);	/* we need a WARN() */
12745 		break;
12746 	}
12747 }
12748 
12749 static int tg3_set_phys_id(struct net_device *dev,
12750 			    enum ethtool_phys_id_state state)
12751 {
12752 	struct tg3 *tp = netdev_priv(dev);
12753 
12754 	if (!netif_running(tp->dev))
12755 		return -EAGAIN;
12756 
12757 	switch (state) {
12758 	case ETHTOOL_ID_ACTIVE:
12759 		return 1;	/* cycle on/off once per second */
12760 
12761 	case ETHTOOL_ID_ON:
12762 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12763 		     LED_CTRL_1000MBPS_ON |
12764 		     LED_CTRL_100MBPS_ON |
12765 		     LED_CTRL_10MBPS_ON |
12766 		     LED_CTRL_TRAFFIC_OVERRIDE |
12767 		     LED_CTRL_TRAFFIC_BLINK |
12768 		     LED_CTRL_TRAFFIC_LED);
12769 		break;
12770 
12771 	case ETHTOOL_ID_OFF:
12772 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12773 		     LED_CTRL_TRAFFIC_OVERRIDE);
12774 		break;
12775 
12776 	case ETHTOOL_ID_INACTIVE:
12777 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12778 		break;
12779 	}
12780 
12781 	return 0;
12782 }
12783 
12784 static void tg3_get_ethtool_stats(struct net_device *dev,
12785 				   struct ethtool_stats *estats, u64 *tmp_stats)
12786 {
12787 	struct tg3 *tp = netdev_priv(dev);
12788 
12789 	if (tp->hw_stats)
12790 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12791 	else
12792 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12793 }
12794 
12795 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12796 {
12797 	int i;
12798 	__be32 *buf;
12799 	u32 offset = 0, len = 0;
12800 	u32 magic, val;
12801 
12802 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12803 		return NULL;
12804 
12805 	if (magic == TG3_EEPROM_MAGIC) {
12806 		for (offset = TG3_NVM_DIR_START;
12807 		     offset < TG3_NVM_DIR_END;
12808 		     offset += TG3_NVM_DIRENT_SIZE) {
12809 			if (tg3_nvram_read(tp, offset, &val))
12810 				return NULL;
12811 
12812 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12813 			    TG3_NVM_DIRTYPE_EXTVPD)
12814 				break;
12815 		}
12816 
12817 		if (offset != TG3_NVM_DIR_END) {
12818 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12819 			if (tg3_nvram_read(tp, offset + 4, &offset))
12820 				return NULL;
12821 
12822 			offset = tg3_nvram_logical_addr(tp, offset);
12823 		}
12824 	}
12825 
12826 	if (!offset || !len) {
12827 		offset = TG3_NVM_VPD_OFF;
12828 		len = TG3_NVM_VPD_LEN;
12829 	}
12830 
12831 	buf = kmalloc(len, GFP_KERNEL);
12832 	if (buf == NULL)
12833 		return NULL;
12834 
12835 	if (magic == TG3_EEPROM_MAGIC) {
12836 		for (i = 0; i < len; i += 4) {
12837 			/* The data is in little-endian format in NVRAM.
12838 			 * Use the big-endian read routines to preserve
12839 			 * the byte order as it exists in NVRAM.
12840 			 */
12841 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12842 				goto error;
12843 		}
12844 	} else {
12845 		u8 *ptr;
12846 		ssize_t cnt;
12847 		unsigned int pos = 0;
12848 
12849 		ptr = (u8 *)&buf[0];
12850 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12851 			cnt = pci_read_vpd(tp->pdev, pos,
12852 					   len - pos, ptr);
12853 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
12854 				cnt = 0;
12855 			else if (cnt < 0)
12856 				goto error;
12857 		}
12858 		if (pos != len)
12859 			goto error;
12860 	}
12861 
12862 	*vpdlen = len;
12863 
12864 	return buf;
12865 
12866 error:
12867 	kfree(buf);
12868 	return NULL;
12869 }
12870 
12871 #define NVRAM_TEST_SIZE 0x100
12872 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12873 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12874 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12875 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12876 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12877 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12878 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12879 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12880 
12881 static int tg3_test_nvram(struct tg3 *tp)
12882 {
12883 	u32 csum, magic, len;
12884 	__be32 *buf;
12885 	int i, j, k, err = 0, size;
12886 
12887 	if (tg3_flag(tp, NO_NVRAM))
12888 		return 0;
12889 
12890 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12891 		return -EIO;
12892 
12893 	if (magic == TG3_EEPROM_MAGIC)
12894 		size = NVRAM_TEST_SIZE;
12895 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12896 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12897 		    TG3_EEPROM_SB_FORMAT_1) {
12898 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12899 			case TG3_EEPROM_SB_REVISION_0:
12900 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12901 				break;
12902 			case TG3_EEPROM_SB_REVISION_2:
12903 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12904 				break;
12905 			case TG3_EEPROM_SB_REVISION_3:
12906 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12907 				break;
12908 			case TG3_EEPROM_SB_REVISION_4:
12909 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12910 				break;
12911 			case TG3_EEPROM_SB_REVISION_5:
12912 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12913 				break;
12914 			case TG3_EEPROM_SB_REVISION_6:
12915 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12916 				break;
12917 			default:
12918 				return -EIO;
12919 			}
12920 		} else
12921 			return 0;
12922 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12923 		size = NVRAM_SELFBOOT_HW_SIZE;
12924 	else
12925 		return -EIO;
12926 
12927 	buf = kmalloc(size, GFP_KERNEL);
12928 	if (buf == NULL)
12929 		return -ENOMEM;
12930 
12931 	err = -EIO;
12932 	for (i = 0, j = 0; i < size; i += 4, j++) {
12933 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12934 		if (err)
12935 			break;
12936 	}
12937 	if (i < size)
12938 		goto out;
12939 
12940 	/* Selfboot format */
12941 	magic = be32_to_cpu(buf[0]);
12942 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12943 	    TG3_EEPROM_MAGIC_FW) {
12944 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12945 
12946 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12947 		    TG3_EEPROM_SB_REVISION_2) {
12948 			/* For rev 2, the csum doesn't include the MBA. */
12949 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12950 				csum8 += buf8[i];
12951 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12952 				csum8 += buf8[i];
12953 		} else {
12954 			for (i = 0; i < size; i++)
12955 				csum8 += buf8[i];
12956 		}
12957 
12958 		if (csum8 == 0) {
12959 			err = 0;
12960 			goto out;
12961 		}
12962 
12963 		err = -EIO;
12964 		goto out;
12965 	}
12966 
12967 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12968 	    TG3_EEPROM_MAGIC_HW) {
12969 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12970 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12971 		u8 *buf8 = (u8 *) buf;
12972 
12973 		/* Separate the parity bits and the data bytes.  */
12974 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12975 			if ((i == 0) || (i == 8)) {
12976 				int l;
12977 				u8 msk;
12978 
12979 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12980 					parity[k++] = buf8[i] & msk;
12981 				i++;
12982 			} else if (i == 16) {
12983 				int l;
12984 				u8 msk;
12985 
12986 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12987 					parity[k++] = buf8[i] & msk;
12988 				i++;
12989 
12990 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12991 					parity[k++] = buf8[i] & msk;
12992 				i++;
12993 			}
12994 			data[j++] = buf8[i];
12995 		}
12996 
12997 		err = -EIO;
12998 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12999 			u8 hw8 = hweight8(data[i]);
13000 
13001 			if ((hw8 & 0x1) && parity[i])
13002 				goto out;
13003 			else if (!(hw8 & 0x1) && !parity[i])
13004 				goto out;
13005 		}
13006 		err = 0;
13007 		goto out;
13008 	}
13009 
13010 	err = -EIO;
13011 
13012 	/* Bootstrap checksum at offset 0x10 */
13013 	csum = calc_crc((unsigned char *) buf, 0x10);
13014 	if (csum != le32_to_cpu(buf[0x10/4]))
13015 		goto out;
13016 
13017 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13018 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13019 	if (csum != le32_to_cpu(buf[0xfc/4]))
13020 		goto out;
13021 
13022 	kfree(buf);
13023 
13024 	buf = tg3_vpd_readblock(tp, &len);
13025 	if (!buf)
13026 		return -ENOMEM;
13027 
13028 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13029 	if (i > 0) {
13030 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13031 		if (j < 0)
13032 			goto out;
13033 
13034 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13035 			goto out;
13036 
13037 		i += PCI_VPD_LRDT_TAG_SIZE;
13038 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13039 					      PCI_VPD_RO_KEYWORD_CHKSUM);
13040 		if (j > 0) {
13041 			u8 csum8 = 0;
13042 
13043 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
13044 
13045 			for (i = 0; i <= j; i++)
13046 				csum8 += ((u8 *)buf)[i];
13047 
13048 			if (csum8)
13049 				goto out;
13050 		}
13051 	}
13052 
13053 	err = 0;
13054 
13055 out:
13056 	kfree(buf);
13057 	return err;
13058 }
13059 
13060 #define TG3_SERDES_TIMEOUT_SEC	2
13061 #define TG3_COPPER_TIMEOUT_SEC	6
13062 
13063 static int tg3_test_link(struct tg3 *tp)
13064 {
13065 	int i, max;
13066 
13067 	if (!netif_running(tp->dev))
13068 		return -ENODEV;
13069 
13070 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13071 		max = TG3_SERDES_TIMEOUT_SEC;
13072 	else
13073 		max = TG3_COPPER_TIMEOUT_SEC;
13074 
13075 	for (i = 0; i < max; i++) {
13076 		if (tp->link_up)
13077 			return 0;
13078 
13079 		if (msleep_interruptible(1000))
13080 			break;
13081 	}
13082 
13083 	return -EIO;
13084 }
13085 
13086 /* Only test the commonly used registers */
13087 static int tg3_test_registers(struct tg3 *tp)
13088 {
13089 	int i, is_5705, is_5750;
13090 	u32 offset, read_mask, write_mask, val, save_val, read_val;
13091 	static struct {
13092 		u16 offset;
13093 		u16 flags;
13094 #define TG3_FL_5705	0x1
13095 #define TG3_FL_NOT_5705	0x2
13096 #define TG3_FL_NOT_5788	0x4
13097 #define TG3_FL_NOT_5750	0x8
13098 		u32 read_mask;
13099 		u32 write_mask;
13100 	} reg_tbl[] = {
13101 		/* MAC Control Registers */
13102 		{ MAC_MODE, TG3_FL_NOT_5705,
13103 			0x00000000, 0x00ef6f8c },
13104 		{ MAC_MODE, TG3_FL_5705,
13105 			0x00000000, 0x01ef6b8c },
13106 		{ MAC_STATUS, TG3_FL_NOT_5705,
13107 			0x03800107, 0x00000000 },
13108 		{ MAC_STATUS, TG3_FL_5705,
13109 			0x03800100, 0x00000000 },
13110 		{ MAC_ADDR_0_HIGH, 0x0000,
13111 			0x00000000, 0x0000ffff },
13112 		{ MAC_ADDR_0_LOW, 0x0000,
13113 			0x00000000, 0xffffffff },
13114 		{ MAC_RX_MTU_SIZE, 0x0000,
13115 			0x00000000, 0x0000ffff },
13116 		{ MAC_TX_MODE, 0x0000,
13117 			0x00000000, 0x00000070 },
13118 		{ MAC_TX_LENGTHS, 0x0000,
13119 			0x00000000, 0x00003fff },
13120 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
13121 			0x00000000, 0x000007fc },
13122 		{ MAC_RX_MODE, TG3_FL_5705,
13123 			0x00000000, 0x000007dc },
13124 		{ MAC_HASH_REG_0, 0x0000,
13125 			0x00000000, 0xffffffff },
13126 		{ MAC_HASH_REG_1, 0x0000,
13127 			0x00000000, 0xffffffff },
13128 		{ MAC_HASH_REG_2, 0x0000,
13129 			0x00000000, 0xffffffff },
13130 		{ MAC_HASH_REG_3, 0x0000,
13131 			0x00000000, 0xffffffff },
13132 
13133 		/* Receive Data and Receive BD Initiator Control Registers. */
13134 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13135 			0x00000000, 0xffffffff },
13136 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13137 			0x00000000, 0xffffffff },
13138 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13139 			0x00000000, 0x00000003 },
13140 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13141 			0x00000000, 0xffffffff },
13142 		{ RCVDBDI_STD_BD+0, 0x0000,
13143 			0x00000000, 0xffffffff },
13144 		{ RCVDBDI_STD_BD+4, 0x0000,
13145 			0x00000000, 0xffffffff },
13146 		{ RCVDBDI_STD_BD+8, 0x0000,
13147 			0x00000000, 0xffff0002 },
13148 		{ RCVDBDI_STD_BD+0xc, 0x0000,
13149 			0x00000000, 0xffffffff },
13150 
13151 		/* Receive BD Initiator Control Registers. */
13152 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13153 			0x00000000, 0xffffffff },
13154 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
13155 			0x00000000, 0x000003ff },
13156 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13157 			0x00000000, 0xffffffff },
13158 
13159 		/* Host Coalescing Control Registers. */
13160 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
13161 			0x00000000, 0x00000004 },
13162 		{ HOSTCC_MODE, TG3_FL_5705,
13163 			0x00000000, 0x000000f6 },
13164 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13165 			0x00000000, 0xffffffff },
13166 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13167 			0x00000000, 0x000003ff },
13168 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13169 			0x00000000, 0xffffffff },
13170 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13171 			0x00000000, 0x000003ff },
13172 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13173 			0x00000000, 0xffffffff },
13174 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13175 			0x00000000, 0x000000ff },
13176 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13177 			0x00000000, 0xffffffff },
13178 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13179 			0x00000000, 0x000000ff },
13180 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13181 			0x00000000, 0xffffffff },
13182 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13183 			0x00000000, 0xffffffff },
13184 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13185 			0x00000000, 0xffffffff },
13186 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13187 			0x00000000, 0x000000ff },
13188 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13189 			0x00000000, 0xffffffff },
13190 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13191 			0x00000000, 0x000000ff },
13192 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13193 			0x00000000, 0xffffffff },
13194 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13195 			0x00000000, 0xffffffff },
13196 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13197 			0x00000000, 0xffffffff },
13198 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13199 			0x00000000, 0xffffffff },
13200 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13201 			0x00000000, 0xffffffff },
13202 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13203 			0xffffffff, 0x00000000 },
13204 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13205 			0xffffffff, 0x00000000 },
13206 
13207 		/* Buffer Manager Control Registers. */
13208 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13209 			0x00000000, 0x007fff80 },
13210 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13211 			0x00000000, 0x007fffff },
13212 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13213 			0x00000000, 0x0000003f },
13214 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13215 			0x00000000, 0x000001ff },
13216 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
13217 			0x00000000, 0x000001ff },
13218 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13219 			0xffffffff, 0x00000000 },
13220 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13221 			0xffffffff, 0x00000000 },
13222 
13223 		/* Mailbox Registers */
13224 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13225 			0x00000000, 0x000001ff },
13226 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13227 			0x00000000, 0x000001ff },
13228 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13229 			0x00000000, 0x000007ff },
13230 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13231 			0x00000000, 0x000001ff },
13232 
13233 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
13234 	};
13235 
13236 	is_5705 = is_5750 = 0;
13237 	if (tg3_flag(tp, 5705_PLUS)) {
13238 		is_5705 = 1;
13239 		if (tg3_flag(tp, 5750_PLUS))
13240 			is_5750 = 1;
13241 	}
13242 
13243 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13244 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13245 			continue;
13246 
13247 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13248 			continue;
13249 
13250 		if (tg3_flag(tp, IS_5788) &&
13251 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13252 			continue;
13253 
13254 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13255 			continue;
13256 
13257 		offset = (u32) reg_tbl[i].offset;
13258 		read_mask = reg_tbl[i].read_mask;
13259 		write_mask = reg_tbl[i].write_mask;
13260 
13261 		/* Save the original register content */
13262 		save_val = tr32(offset);
13263 
13264 		/* Determine the read-only value. */
13265 		read_val = save_val & read_mask;
13266 
13267 		/* Write zero to the register, then make sure the read-only bits
13268 		 * are not changed and the read/write bits are all zeros.
13269 		 */
13270 		tw32(offset, 0);
13271 
13272 		val = tr32(offset);
13273 
13274 		/* Test the read-only and read/write bits. */
13275 		if (((val & read_mask) != read_val) || (val & write_mask))
13276 			goto out;
13277 
13278 		/* Write ones to all the bits defined by RdMask and WrMask, then
13279 		 * make sure the read-only bits are not changed and the
13280 		 * read/write bits are all ones.
13281 		 */
13282 		tw32(offset, read_mask | write_mask);
13283 
13284 		val = tr32(offset);
13285 
13286 		/* Test the read-only bits. */
13287 		if ((val & read_mask) != read_val)
13288 			goto out;
13289 
13290 		/* Test the read/write bits. */
13291 		if ((val & write_mask) != write_mask)
13292 			goto out;
13293 
13294 		tw32(offset, save_val);
13295 	}
13296 
13297 	return 0;
13298 
13299 out:
13300 	if (netif_msg_hw(tp))
13301 		netdev_err(tp->dev,
13302 			   "Register test failed at offset %x\n", offset);
13303 	tw32(offset, save_val);
13304 	return -EIO;
13305 }
13306 
13307 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13308 {
13309 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13310 	int i;
13311 	u32 j;
13312 
13313 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13314 		for (j = 0; j < len; j += 4) {
13315 			u32 val;
13316 
13317 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13318 			tg3_read_mem(tp, offset + j, &val);
13319 			if (val != test_pattern[i])
13320 				return -EIO;
13321 		}
13322 	}
13323 	return 0;
13324 }
13325 
13326 static int tg3_test_memory(struct tg3 *tp)
13327 {
13328 	static struct mem_entry {
13329 		u32 offset;
13330 		u32 len;
13331 	} mem_tbl_570x[] = {
13332 		{ 0x00000000, 0x00b50},
13333 		{ 0x00002000, 0x1c000},
13334 		{ 0xffffffff, 0x00000}
13335 	}, mem_tbl_5705[] = {
13336 		{ 0x00000100, 0x0000c},
13337 		{ 0x00000200, 0x00008},
13338 		{ 0x00004000, 0x00800},
13339 		{ 0x00006000, 0x01000},
13340 		{ 0x00008000, 0x02000},
13341 		{ 0x00010000, 0x0e000},
13342 		{ 0xffffffff, 0x00000}
13343 	}, mem_tbl_5755[] = {
13344 		{ 0x00000200, 0x00008},
13345 		{ 0x00004000, 0x00800},
13346 		{ 0x00006000, 0x00800},
13347 		{ 0x00008000, 0x02000},
13348 		{ 0x00010000, 0x0c000},
13349 		{ 0xffffffff, 0x00000}
13350 	}, mem_tbl_5906[] = {
13351 		{ 0x00000200, 0x00008},
13352 		{ 0x00004000, 0x00400},
13353 		{ 0x00006000, 0x00400},
13354 		{ 0x00008000, 0x01000},
13355 		{ 0x00010000, 0x01000},
13356 		{ 0xffffffff, 0x00000}
13357 	}, mem_tbl_5717[] = {
13358 		{ 0x00000200, 0x00008},
13359 		{ 0x00010000, 0x0a000},
13360 		{ 0x00020000, 0x13c00},
13361 		{ 0xffffffff, 0x00000}
13362 	}, mem_tbl_57765[] = {
13363 		{ 0x00000200, 0x00008},
13364 		{ 0x00004000, 0x00800},
13365 		{ 0x00006000, 0x09800},
13366 		{ 0x00010000, 0x0a000},
13367 		{ 0xffffffff, 0x00000}
13368 	};
13369 	struct mem_entry *mem_tbl;
13370 	int err = 0;
13371 	int i;
13372 
13373 	if (tg3_flag(tp, 5717_PLUS))
13374 		mem_tbl = mem_tbl_5717;
13375 	else if (tg3_flag(tp, 57765_CLASS) ||
13376 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13377 		mem_tbl = mem_tbl_57765;
13378 	else if (tg3_flag(tp, 5755_PLUS))
13379 		mem_tbl = mem_tbl_5755;
13380 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13381 		mem_tbl = mem_tbl_5906;
13382 	else if (tg3_flag(tp, 5705_PLUS))
13383 		mem_tbl = mem_tbl_5705;
13384 	else
13385 		mem_tbl = mem_tbl_570x;
13386 
13387 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13388 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13389 		if (err)
13390 			break;
13391 	}
13392 
13393 	return err;
13394 }
13395 
13396 #define TG3_TSO_MSS		500
13397 
13398 #define TG3_TSO_IP_HDR_LEN	20
13399 #define TG3_TSO_TCP_HDR_LEN	20
13400 #define TG3_TSO_TCP_OPT_LEN	12
13401 
13402 static const u8 tg3_tso_header[] = {
13403 0x08, 0x00,
13404 0x45, 0x00, 0x00, 0x00,
13405 0x00, 0x00, 0x40, 0x00,
13406 0x40, 0x06, 0x00, 0x00,
13407 0x0a, 0x00, 0x00, 0x01,
13408 0x0a, 0x00, 0x00, 0x02,
13409 0x0d, 0x00, 0xe0, 0x00,
13410 0x00, 0x00, 0x01, 0x00,
13411 0x00, 0x00, 0x02, 0x00,
13412 0x80, 0x10, 0x10, 0x00,
13413 0x14, 0x09, 0x00, 0x00,
13414 0x01, 0x01, 0x08, 0x0a,
13415 0x11, 0x11, 0x11, 0x11,
13416 0x11, 0x11, 0x11, 0x11,
13417 };
13418 
13419 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13420 {
13421 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13422 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13423 	u32 budget;
13424 	struct sk_buff *skb;
13425 	u8 *tx_data, *rx_data;
13426 	dma_addr_t map;
13427 	int num_pkts, tx_len, rx_len, i, err;
13428 	struct tg3_rx_buffer_desc *desc;
13429 	struct tg3_napi *tnapi, *rnapi;
13430 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13431 
13432 	tnapi = &tp->napi[0];
13433 	rnapi = &tp->napi[0];
13434 	if (tp->irq_cnt > 1) {
13435 		if (tg3_flag(tp, ENABLE_RSS))
13436 			rnapi = &tp->napi[1];
13437 		if (tg3_flag(tp, ENABLE_TSS))
13438 			tnapi = &tp->napi[1];
13439 	}
13440 	coal_now = tnapi->coal_now | rnapi->coal_now;
13441 
13442 	err = -EIO;
13443 
13444 	tx_len = pktsz;
13445 	skb = netdev_alloc_skb(tp->dev, tx_len);
13446 	if (!skb)
13447 		return -ENOMEM;
13448 
13449 	tx_data = skb_put(skb, tx_len);
13450 	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13451 	memset(tx_data + ETH_ALEN, 0x0, 8);
13452 
13453 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13454 
13455 	if (tso_loopback) {
13456 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13457 
13458 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13459 			      TG3_TSO_TCP_OPT_LEN;
13460 
13461 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13462 		       sizeof(tg3_tso_header));
13463 		mss = TG3_TSO_MSS;
13464 
13465 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13466 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13467 
13468 		/* Set the total length field in the IP header */
13469 		iph->tot_len = htons((u16)(mss + hdr_len));
13470 
13471 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13472 			      TXD_FLAG_CPU_POST_DMA);
13473 
13474 		if (tg3_flag(tp, HW_TSO_1) ||
13475 		    tg3_flag(tp, HW_TSO_2) ||
13476 		    tg3_flag(tp, HW_TSO_3)) {
13477 			struct tcphdr *th;
13478 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13479 			th = (struct tcphdr *)&tx_data[val];
13480 			th->check = 0;
13481 		} else
13482 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13483 
13484 		if (tg3_flag(tp, HW_TSO_3)) {
13485 			mss |= (hdr_len & 0xc) << 12;
13486 			if (hdr_len & 0x10)
13487 				base_flags |= 0x00000010;
13488 			base_flags |= (hdr_len & 0x3e0) << 5;
13489 		} else if (tg3_flag(tp, HW_TSO_2))
13490 			mss |= hdr_len << 9;
13491 		else if (tg3_flag(tp, HW_TSO_1) ||
13492 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13493 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13494 		} else {
13495 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13496 		}
13497 
13498 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13499 	} else {
13500 		num_pkts = 1;
13501 		data_off = ETH_HLEN;
13502 
13503 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13504 		    tx_len > VLAN_ETH_FRAME_LEN)
13505 			base_flags |= TXD_FLAG_JMB_PKT;
13506 	}
13507 
13508 	for (i = data_off; i < tx_len; i++)
13509 		tx_data[i] = (u8) (i & 0xff);
13510 
13511 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13512 	if (pci_dma_mapping_error(tp->pdev, map)) {
13513 		dev_kfree_skb(skb);
13514 		return -EIO;
13515 	}
13516 
13517 	val = tnapi->tx_prod;
13518 	tnapi->tx_buffers[val].skb = skb;
13519 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13520 
13521 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13522 	       rnapi->coal_now);
13523 
13524 	udelay(10);
13525 
13526 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13527 
13528 	budget = tg3_tx_avail(tnapi);
13529 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13530 			    base_flags | TXD_FLAG_END, mss, 0)) {
13531 		tnapi->tx_buffers[val].skb = NULL;
13532 		dev_kfree_skb(skb);
13533 		return -EIO;
13534 	}
13535 
13536 	tnapi->tx_prod++;
13537 
13538 	/* Sync BD data before updating mailbox */
13539 	wmb();
13540 
13541 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13542 	tr32_mailbox(tnapi->prodmbox);
13543 
13544 	udelay(10);
13545 
13546 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13547 	for (i = 0; i < 35; i++) {
13548 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13549 		       coal_now);
13550 
13551 		udelay(10);
13552 
13553 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13554 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13555 		if ((tx_idx == tnapi->tx_prod) &&
13556 		    (rx_idx == (rx_start_idx + num_pkts)))
13557 			break;
13558 	}
13559 
13560 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13561 	dev_kfree_skb(skb);
13562 
13563 	if (tx_idx != tnapi->tx_prod)
13564 		goto out;
13565 
13566 	if (rx_idx != rx_start_idx + num_pkts)
13567 		goto out;
13568 
13569 	val = data_off;
13570 	while (rx_idx != rx_start_idx) {
13571 		desc = &rnapi->rx_rcb[rx_start_idx++];
13572 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13573 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13574 
13575 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13576 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13577 			goto out;
13578 
13579 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13580 			 - ETH_FCS_LEN;
13581 
13582 		if (!tso_loopback) {
13583 			if (rx_len != tx_len)
13584 				goto out;
13585 
13586 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13587 				if (opaque_key != RXD_OPAQUE_RING_STD)
13588 					goto out;
13589 			} else {
13590 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13591 					goto out;
13592 			}
13593 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13594 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13595 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13596 			goto out;
13597 		}
13598 
13599 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13600 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13601 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13602 					     mapping);
13603 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13604 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13605 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13606 					     mapping);
13607 		} else
13608 			goto out;
13609 
13610 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13611 					    PCI_DMA_FROMDEVICE);
13612 
13613 		rx_data += TG3_RX_OFFSET(tp);
13614 		for (i = data_off; i < rx_len; i++, val++) {
13615 			if (*(rx_data + i) != (u8) (val & 0xff))
13616 				goto out;
13617 		}
13618 	}
13619 
13620 	err = 0;
13621 
13622 	/* tg3_free_rings will unmap and free the rx_data */
13623 out:
13624 	return err;
13625 }
13626 
13627 #define TG3_STD_LOOPBACK_FAILED		1
13628 #define TG3_JMB_LOOPBACK_FAILED		2
13629 #define TG3_TSO_LOOPBACK_FAILED		4
13630 #define TG3_LOOPBACK_FAILED \
13631 	(TG3_STD_LOOPBACK_FAILED | \
13632 	 TG3_JMB_LOOPBACK_FAILED | \
13633 	 TG3_TSO_LOOPBACK_FAILED)
13634 
13635 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13636 {
13637 	int err = -EIO;
13638 	u32 eee_cap;
13639 	u32 jmb_pkt_sz = 9000;
13640 
13641 	if (tp->dma_limit)
13642 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13643 
13644 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13645 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13646 
13647 	if (!netif_running(tp->dev)) {
13648 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13649 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13650 		if (do_extlpbk)
13651 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13652 		goto done;
13653 	}
13654 
13655 	err = tg3_reset_hw(tp, true);
13656 	if (err) {
13657 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13658 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13659 		if (do_extlpbk)
13660 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13661 		goto done;
13662 	}
13663 
13664 	if (tg3_flag(tp, ENABLE_RSS)) {
13665 		int i;
13666 
13667 		/* Reroute all rx packets to the 1st queue */
13668 		for (i = MAC_RSS_INDIR_TBL_0;
13669 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13670 			tw32(i, 0x0);
13671 	}
13672 
13673 	/* HW errata - mac loopback fails in some cases on 5780.
13674 	 * Normal traffic and PHY loopback are not affected by
13675 	 * errata.  Also, the MAC loopback test is deprecated for
13676 	 * all newer ASIC revisions.
13677 	 */
13678 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13679 	    !tg3_flag(tp, CPMU_PRESENT)) {
13680 		tg3_mac_loopback(tp, true);
13681 
13682 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13683 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13684 
13685 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13686 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13687 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13688 
13689 		tg3_mac_loopback(tp, false);
13690 	}
13691 
13692 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13693 	    !tg3_flag(tp, USE_PHYLIB)) {
13694 		int i;
13695 
13696 		tg3_phy_lpbk_set(tp, 0, false);
13697 
13698 		/* Wait for link */
13699 		for (i = 0; i < 100; i++) {
13700 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13701 				break;
13702 			mdelay(1);
13703 		}
13704 
13705 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13706 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13707 		if (tg3_flag(tp, TSO_CAPABLE) &&
13708 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13709 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13710 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13711 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13712 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13713 
13714 		if (do_extlpbk) {
13715 			tg3_phy_lpbk_set(tp, 0, true);
13716 
13717 			/* All link indications report up, but the hardware
13718 			 * isn't really ready for about 20 msec.  Double it
13719 			 * to be sure.
13720 			 */
13721 			mdelay(40);
13722 
13723 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13724 				data[TG3_EXT_LOOPB_TEST] |=
13725 							TG3_STD_LOOPBACK_FAILED;
13726 			if (tg3_flag(tp, TSO_CAPABLE) &&
13727 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13728 				data[TG3_EXT_LOOPB_TEST] |=
13729 							TG3_TSO_LOOPBACK_FAILED;
13730 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13731 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13732 				data[TG3_EXT_LOOPB_TEST] |=
13733 							TG3_JMB_LOOPBACK_FAILED;
13734 		}
13735 
13736 		/* Re-enable gphy autopowerdown. */
13737 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13738 			tg3_phy_toggle_apd(tp, true);
13739 	}
13740 
13741 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13742 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13743 
13744 done:
13745 	tp->phy_flags |= eee_cap;
13746 
13747 	return err;
13748 }
13749 
13750 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13751 			  u64 *data)
13752 {
13753 	struct tg3 *tp = netdev_priv(dev);
13754 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13755 
13756 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13757 		if (tg3_power_up(tp)) {
13758 			etest->flags |= ETH_TEST_FL_FAILED;
13759 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13760 			return;
13761 		}
13762 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13763 	}
13764 
13765 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13766 
13767 	if (tg3_test_nvram(tp) != 0) {
13768 		etest->flags |= ETH_TEST_FL_FAILED;
13769 		data[TG3_NVRAM_TEST] = 1;
13770 	}
13771 	if (!doextlpbk && tg3_test_link(tp)) {
13772 		etest->flags |= ETH_TEST_FL_FAILED;
13773 		data[TG3_LINK_TEST] = 1;
13774 	}
13775 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13776 		int err, err2 = 0, irq_sync = 0;
13777 
13778 		if (netif_running(dev)) {
13779 			tg3_phy_stop(tp);
13780 			tg3_netif_stop(tp);
13781 			irq_sync = 1;
13782 		}
13783 
13784 		tg3_full_lock(tp, irq_sync);
13785 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13786 		err = tg3_nvram_lock(tp);
13787 		tg3_halt_cpu(tp, RX_CPU_BASE);
13788 		if (!tg3_flag(tp, 5705_PLUS))
13789 			tg3_halt_cpu(tp, TX_CPU_BASE);
13790 		if (!err)
13791 			tg3_nvram_unlock(tp);
13792 
13793 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13794 			tg3_phy_reset(tp);
13795 
13796 		if (tg3_test_registers(tp) != 0) {
13797 			etest->flags |= ETH_TEST_FL_FAILED;
13798 			data[TG3_REGISTER_TEST] = 1;
13799 		}
13800 
13801 		if (tg3_test_memory(tp) != 0) {
13802 			etest->flags |= ETH_TEST_FL_FAILED;
13803 			data[TG3_MEMORY_TEST] = 1;
13804 		}
13805 
13806 		if (doextlpbk)
13807 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13808 
13809 		if (tg3_test_loopback(tp, data, doextlpbk))
13810 			etest->flags |= ETH_TEST_FL_FAILED;
13811 
13812 		tg3_full_unlock(tp);
13813 
13814 		if (tg3_test_interrupt(tp) != 0) {
13815 			etest->flags |= ETH_TEST_FL_FAILED;
13816 			data[TG3_INTERRUPT_TEST] = 1;
13817 		}
13818 
13819 		tg3_full_lock(tp, 0);
13820 
13821 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13822 		if (netif_running(dev)) {
13823 			tg3_flag_set(tp, INIT_COMPLETE);
13824 			err2 = tg3_restart_hw(tp, true);
13825 			if (!err2)
13826 				tg3_netif_start(tp);
13827 		}
13828 
13829 		tg3_full_unlock(tp);
13830 
13831 		if (irq_sync && !err2)
13832 			tg3_phy_start(tp);
13833 	}
13834 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13835 		tg3_power_down_prepare(tp);
13836 
13837 }
13838 
13839 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13840 {
13841 	struct tg3 *tp = netdev_priv(dev);
13842 	struct hwtstamp_config stmpconf;
13843 
13844 	if (!tg3_flag(tp, PTP_CAPABLE))
13845 		return -EOPNOTSUPP;
13846 
13847 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13848 		return -EFAULT;
13849 
13850 	if (stmpconf.flags)
13851 		return -EINVAL;
13852 
13853 	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13854 	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13855 		return -ERANGE;
13856 
13857 	switch (stmpconf.rx_filter) {
13858 	case HWTSTAMP_FILTER_NONE:
13859 		tp->rxptpctl = 0;
13860 		break;
13861 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13862 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13863 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13864 		break;
13865 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13866 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13867 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13868 		break;
13869 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13870 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13871 			       TG3_RX_PTP_CTL_DELAY_REQ;
13872 		break;
13873 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13874 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13875 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13876 		break;
13877 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13878 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13879 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13880 		break;
13881 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13882 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13883 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13884 		break;
13885 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13886 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13887 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13888 		break;
13889 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13890 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13891 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13892 		break;
13893 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13894 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13895 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13896 		break;
13897 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13898 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13899 			       TG3_RX_PTP_CTL_DELAY_REQ;
13900 		break;
13901 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13902 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13903 			       TG3_RX_PTP_CTL_DELAY_REQ;
13904 		break;
13905 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13906 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13907 			       TG3_RX_PTP_CTL_DELAY_REQ;
13908 		break;
13909 	default:
13910 		return -ERANGE;
13911 	}
13912 
13913 	if (netif_running(dev) && tp->rxptpctl)
13914 		tw32(TG3_RX_PTP_CTL,
13915 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13916 
13917 	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13918 		tg3_flag_set(tp, TX_TSTAMP_EN);
13919 	else
13920 		tg3_flag_clear(tp, TX_TSTAMP_EN);
13921 
13922 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13923 		-EFAULT : 0;
13924 }
13925 
13926 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13927 {
13928 	struct tg3 *tp = netdev_priv(dev);
13929 	struct hwtstamp_config stmpconf;
13930 
13931 	if (!tg3_flag(tp, PTP_CAPABLE))
13932 		return -EOPNOTSUPP;
13933 
13934 	stmpconf.flags = 0;
13935 	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13936 			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13937 
13938 	switch (tp->rxptpctl) {
13939 	case 0:
13940 		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13941 		break;
13942 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13943 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13944 		break;
13945 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13946 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13947 		break;
13948 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13949 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13950 		break;
13951 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13952 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13953 		break;
13954 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13955 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13956 		break;
13957 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13958 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13959 		break;
13960 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13961 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13962 		break;
13963 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13964 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13965 		break;
13966 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13967 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13968 		break;
13969 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13970 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13971 		break;
13972 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13973 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13974 		break;
13975 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13976 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13977 		break;
13978 	default:
13979 		WARN_ON_ONCE(1);
13980 		return -ERANGE;
13981 	}
13982 
13983 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13984 		-EFAULT : 0;
13985 }
13986 
13987 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13988 {
13989 	struct mii_ioctl_data *data = if_mii(ifr);
13990 	struct tg3 *tp = netdev_priv(dev);
13991 	int err;
13992 
13993 	if (tg3_flag(tp, USE_PHYLIB)) {
13994 		struct phy_device *phydev;
13995 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13996 			return -EAGAIN;
13997 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13998 		return phy_mii_ioctl(phydev, ifr, cmd);
13999 	}
14000 
14001 	switch (cmd) {
14002 	case SIOCGMIIPHY:
14003 		data->phy_id = tp->phy_addr;
14004 
14005 		/* fall through */
14006 	case SIOCGMIIREG: {
14007 		u32 mii_regval;
14008 
14009 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14010 			break;			/* We have no PHY */
14011 
14012 		if (!netif_running(dev))
14013 			return -EAGAIN;
14014 
14015 		spin_lock_bh(&tp->lock);
14016 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
14017 				    data->reg_num & 0x1f, &mii_regval);
14018 		spin_unlock_bh(&tp->lock);
14019 
14020 		data->val_out = mii_regval;
14021 
14022 		return err;
14023 	}
14024 
14025 	case SIOCSMIIREG:
14026 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14027 			break;			/* We have no PHY */
14028 
14029 		if (!netif_running(dev))
14030 			return -EAGAIN;
14031 
14032 		spin_lock_bh(&tp->lock);
14033 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
14034 				     data->reg_num & 0x1f, data->val_in);
14035 		spin_unlock_bh(&tp->lock);
14036 
14037 		return err;
14038 
14039 	case SIOCSHWTSTAMP:
14040 		return tg3_hwtstamp_set(dev, ifr);
14041 
14042 	case SIOCGHWTSTAMP:
14043 		return tg3_hwtstamp_get(dev, ifr);
14044 
14045 	default:
14046 		/* do nothing */
14047 		break;
14048 	}
14049 	return -EOPNOTSUPP;
14050 }
14051 
14052 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14053 {
14054 	struct tg3 *tp = netdev_priv(dev);
14055 
14056 	memcpy(ec, &tp->coal, sizeof(*ec));
14057 	return 0;
14058 }
14059 
14060 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14061 {
14062 	struct tg3 *tp = netdev_priv(dev);
14063 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14064 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14065 
14066 	if (!tg3_flag(tp, 5705_PLUS)) {
14067 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14068 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14069 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14070 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14071 	}
14072 
14073 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14074 	    (!ec->rx_coalesce_usecs) ||
14075 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14076 	    (!ec->tx_coalesce_usecs) ||
14077 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14078 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14079 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14080 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14081 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14082 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14083 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14084 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14085 		return -EINVAL;
14086 
14087 	/* Only copy relevant parameters, ignore all others. */
14088 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14089 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14090 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14091 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14092 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14093 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14094 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14095 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14096 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14097 
14098 	if (netif_running(dev)) {
14099 		tg3_full_lock(tp, 0);
14100 		__tg3_set_coalesce(tp, &tp->coal);
14101 		tg3_full_unlock(tp);
14102 	}
14103 	return 0;
14104 }
14105 
14106 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14107 {
14108 	struct tg3 *tp = netdev_priv(dev);
14109 
14110 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14111 		netdev_warn(tp->dev, "Board does not support EEE!\n");
14112 		return -EOPNOTSUPP;
14113 	}
14114 
14115 	if (edata->advertised != tp->eee.advertised) {
14116 		netdev_warn(tp->dev,
14117 			    "Direct manipulation of EEE advertisement is not supported\n");
14118 		return -EINVAL;
14119 	}
14120 
14121 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14122 		netdev_warn(tp->dev,
14123 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
14124 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14125 		return -EINVAL;
14126 	}
14127 
14128 	tp->eee = *edata;
14129 
14130 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14131 	tg3_warn_mgmt_link_flap(tp);
14132 
14133 	if (netif_running(tp->dev)) {
14134 		tg3_full_lock(tp, 0);
14135 		tg3_setup_eee(tp);
14136 		tg3_phy_reset(tp);
14137 		tg3_full_unlock(tp);
14138 	}
14139 
14140 	return 0;
14141 }
14142 
14143 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14144 {
14145 	struct tg3 *tp = netdev_priv(dev);
14146 
14147 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14148 		netdev_warn(tp->dev,
14149 			    "Board does not support EEE!\n");
14150 		return -EOPNOTSUPP;
14151 	}
14152 
14153 	*edata = tp->eee;
14154 	return 0;
14155 }
14156 
14157 static const struct ethtool_ops tg3_ethtool_ops = {
14158 	.get_drvinfo		= tg3_get_drvinfo,
14159 	.get_regs_len		= tg3_get_regs_len,
14160 	.get_regs		= tg3_get_regs,
14161 	.get_wol		= tg3_get_wol,
14162 	.set_wol		= tg3_set_wol,
14163 	.get_msglevel		= tg3_get_msglevel,
14164 	.set_msglevel		= tg3_set_msglevel,
14165 	.nway_reset		= tg3_nway_reset,
14166 	.get_link		= ethtool_op_get_link,
14167 	.get_eeprom_len		= tg3_get_eeprom_len,
14168 	.get_eeprom		= tg3_get_eeprom,
14169 	.set_eeprom		= tg3_set_eeprom,
14170 	.get_ringparam		= tg3_get_ringparam,
14171 	.set_ringparam		= tg3_set_ringparam,
14172 	.get_pauseparam		= tg3_get_pauseparam,
14173 	.set_pauseparam		= tg3_set_pauseparam,
14174 	.self_test		= tg3_self_test,
14175 	.get_strings		= tg3_get_strings,
14176 	.set_phys_id		= tg3_set_phys_id,
14177 	.get_ethtool_stats	= tg3_get_ethtool_stats,
14178 	.get_coalesce		= tg3_get_coalesce,
14179 	.set_coalesce		= tg3_set_coalesce,
14180 	.get_sset_count		= tg3_get_sset_count,
14181 	.get_rxnfc		= tg3_get_rxnfc,
14182 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14183 	.get_rxfh		= tg3_get_rxfh,
14184 	.set_rxfh		= tg3_set_rxfh,
14185 	.get_channels		= tg3_get_channels,
14186 	.set_channels		= tg3_set_channels,
14187 	.get_ts_info		= tg3_get_ts_info,
14188 	.get_eee		= tg3_get_eee,
14189 	.set_eee		= tg3_set_eee,
14190 	.get_link_ksettings	= tg3_get_link_ksettings,
14191 	.set_link_ksettings	= tg3_set_link_ksettings,
14192 };
14193 
14194 static void tg3_get_stats64(struct net_device *dev,
14195 			    struct rtnl_link_stats64 *stats)
14196 {
14197 	struct tg3 *tp = netdev_priv(dev);
14198 
14199 	spin_lock_bh(&tp->lock);
14200 	if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14201 		*stats = tp->net_stats_prev;
14202 		spin_unlock_bh(&tp->lock);
14203 		return;
14204 	}
14205 
14206 	tg3_get_nstats(tp, stats);
14207 	spin_unlock_bh(&tp->lock);
14208 }
14209 
14210 static void tg3_set_rx_mode(struct net_device *dev)
14211 {
14212 	struct tg3 *tp = netdev_priv(dev);
14213 
14214 	if (!netif_running(dev))
14215 		return;
14216 
14217 	tg3_full_lock(tp, 0);
14218 	__tg3_set_rx_mode(dev);
14219 	tg3_full_unlock(tp);
14220 }
14221 
14222 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14223 			       int new_mtu)
14224 {
14225 	dev->mtu = new_mtu;
14226 
14227 	if (new_mtu > ETH_DATA_LEN) {
14228 		if (tg3_flag(tp, 5780_CLASS)) {
14229 			netdev_update_features(dev);
14230 			tg3_flag_clear(tp, TSO_CAPABLE);
14231 		} else {
14232 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
14233 		}
14234 	} else {
14235 		if (tg3_flag(tp, 5780_CLASS)) {
14236 			tg3_flag_set(tp, TSO_CAPABLE);
14237 			netdev_update_features(dev);
14238 		}
14239 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14240 	}
14241 }
14242 
14243 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14244 {
14245 	struct tg3 *tp = netdev_priv(dev);
14246 	int err;
14247 	bool reset_phy = false;
14248 
14249 	if (!netif_running(dev)) {
14250 		/* We'll just catch it later when the
14251 		 * device is up'd.
14252 		 */
14253 		tg3_set_mtu(dev, tp, new_mtu);
14254 		return 0;
14255 	}
14256 
14257 	tg3_phy_stop(tp);
14258 
14259 	tg3_netif_stop(tp);
14260 
14261 	tg3_set_mtu(dev, tp, new_mtu);
14262 
14263 	tg3_full_lock(tp, 1);
14264 
14265 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14266 
14267 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
14268 	 * breaks all requests to 256 bytes.
14269 	 */
14270 	if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14271 	    tg3_asic_rev(tp) == ASIC_REV_5717 ||
14272 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
14273 	    tg3_asic_rev(tp) == ASIC_REV_5720)
14274 		reset_phy = true;
14275 
14276 	err = tg3_restart_hw(tp, reset_phy);
14277 
14278 	if (!err)
14279 		tg3_netif_start(tp);
14280 
14281 	tg3_full_unlock(tp);
14282 
14283 	if (!err)
14284 		tg3_phy_start(tp);
14285 
14286 	return err;
14287 }
14288 
14289 static const struct net_device_ops tg3_netdev_ops = {
14290 	.ndo_open		= tg3_open,
14291 	.ndo_stop		= tg3_close,
14292 	.ndo_start_xmit		= tg3_start_xmit,
14293 	.ndo_get_stats64	= tg3_get_stats64,
14294 	.ndo_validate_addr	= eth_validate_addr,
14295 	.ndo_set_rx_mode	= tg3_set_rx_mode,
14296 	.ndo_set_mac_address	= tg3_set_mac_addr,
14297 	.ndo_do_ioctl		= tg3_ioctl,
14298 	.ndo_tx_timeout		= tg3_tx_timeout,
14299 	.ndo_change_mtu		= tg3_change_mtu,
14300 	.ndo_fix_features	= tg3_fix_features,
14301 	.ndo_set_features	= tg3_set_features,
14302 #ifdef CONFIG_NET_POLL_CONTROLLER
14303 	.ndo_poll_controller	= tg3_poll_controller,
14304 #endif
14305 };
14306 
14307 static void tg3_get_eeprom_size(struct tg3 *tp)
14308 {
14309 	u32 cursize, val, magic;
14310 
14311 	tp->nvram_size = EEPROM_CHIP_SIZE;
14312 
14313 	if (tg3_nvram_read(tp, 0, &magic) != 0)
14314 		return;
14315 
14316 	if ((magic != TG3_EEPROM_MAGIC) &&
14317 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14318 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14319 		return;
14320 
14321 	/*
14322 	 * Size the chip by reading offsets at increasing powers of two.
14323 	 * When we encounter our validation signature, we know the addressing
14324 	 * has wrapped around, and thus have our chip size.
14325 	 */
14326 	cursize = 0x10;
14327 
14328 	while (cursize < tp->nvram_size) {
14329 		if (tg3_nvram_read(tp, cursize, &val) != 0)
14330 			return;
14331 
14332 		if (val == magic)
14333 			break;
14334 
14335 		cursize <<= 1;
14336 	}
14337 
14338 	tp->nvram_size = cursize;
14339 }
14340 
14341 static void tg3_get_nvram_size(struct tg3 *tp)
14342 {
14343 	u32 val;
14344 
14345 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14346 		return;
14347 
14348 	/* Selfboot format */
14349 	if (val != TG3_EEPROM_MAGIC) {
14350 		tg3_get_eeprom_size(tp);
14351 		return;
14352 	}
14353 
14354 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14355 		if (val != 0) {
14356 			/* This is confusing.  We want to operate on the
14357 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14358 			 * call will read from NVRAM and byteswap the data
14359 			 * according to the byteswapping settings for all
14360 			 * other register accesses.  This ensures the data we
14361 			 * want will always reside in the lower 16-bits.
14362 			 * However, the data in NVRAM is in LE format, which
14363 			 * means the data from the NVRAM read will always be
14364 			 * opposite the endianness of the CPU.  The 16-bit
14365 			 * byteswap then brings the data to CPU endianness.
14366 			 */
14367 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14368 			return;
14369 		}
14370 	}
14371 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14372 }
14373 
14374 static void tg3_get_nvram_info(struct tg3 *tp)
14375 {
14376 	u32 nvcfg1;
14377 
14378 	nvcfg1 = tr32(NVRAM_CFG1);
14379 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14380 		tg3_flag_set(tp, FLASH);
14381 	} else {
14382 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14383 		tw32(NVRAM_CFG1, nvcfg1);
14384 	}
14385 
14386 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14387 	    tg3_flag(tp, 5780_CLASS)) {
14388 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14389 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14390 			tp->nvram_jedecnum = JEDEC_ATMEL;
14391 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14392 			tg3_flag_set(tp, NVRAM_BUFFERED);
14393 			break;
14394 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14395 			tp->nvram_jedecnum = JEDEC_ATMEL;
14396 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14397 			break;
14398 		case FLASH_VENDOR_ATMEL_EEPROM:
14399 			tp->nvram_jedecnum = JEDEC_ATMEL;
14400 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14401 			tg3_flag_set(tp, NVRAM_BUFFERED);
14402 			break;
14403 		case FLASH_VENDOR_ST:
14404 			tp->nvram_jedecnum = JEDEC_ST;
14405 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14406 			tg3_flag_set(tp, NVRAM_BUFFERED);
14407 			break;
14408 		case FLASH_VENDOR_SAIFUN:
14409 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14410 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14411 			break;
14412 		case FLASH_VENDOR_SST_SMALL:
14413 		case FLASH_VENDOR_SST_LARGE:
14414 			tp->nvram_jedecnum = JEDEC_SST;
14415 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14416 			break;
14417 		}
14418 	} else {
14419 		tp->nvram_jedecnum = JEDEC_ATMEL;
14420 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14421 		tg3_flag_set(tp, NVRAM_BUFFERED);
14422 	}
14423 }
14424 
14425 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14426 {
14427 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14428 	case FLASH_5752PAGE_SIZE_256:
14429 		tp->nvram_pagesize = 256;
14430 		break;
14431 	case FLASH_5752PAGE_SIZE_512:
14432 		tp->nvram_pagesize = 512;
14433 		break;
14434 	case FLASH_5752PAGE_SIZE_1K:
14435 		tp->nvram_pagesize = 1024;
14436 		break;
14437 	case FLASH_5752PAGE_SIZE_2K:
14438 		tp->nvram_pagesize = 2048;
14439 		break;
14440 	case FLASH_5752PAGE_SIZE_4K:
14441 		tp->nvram_pagesize = 4096;
14442 		break;
14443 	case FLASH_5752PAGE_SIZE_264:
14444 		tp->nvram_pagesize = 264;
14445 		break;
14446 	case FLASH_5752PAGE_SIZE_528:
14447 		tp->nvram_pagesize = 528;
14448 		break;
14449 	}
14450 }
14451 
14452 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14453 {
14454 	u32 nvcfg1;
14455 
14456 	nvcfg1 = tr32(NVRAM_CFG1);
14457 
14458 	/* NVRAM protection for TPM */
14459 	if (nvcfg1 & (1 << 27))
14460 		tg3_flag_set(tp, PROTECTED_NVRAM);
14461 
14462 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14463 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14464 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14465 		tp->nvram_jedecnum = JEDEC_ATMEL;
14466 		tg3_flag_set(tp, NVRAM_BUFFERED);
14467 		break;
14468 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14469 		tp->nvram_jedecnum = JEDEC_ATMEL;
14470 		tg3_flag_set(tp, NVRAM_BUFFERED);
14471 		tg3_flag_set(tp, FLASH);
14472 		break;
14473 	case FLASH_5752VENDOR_ST_M45PE10:
14474 	case FLASH_5752VENDOR_ST_M45PE20:
14475 	case FLASH_5752VENDOR_ST_M45PE40:
14476 		tp->nvram_jedecnum = JEDEC_ST;
14477 		tg3_flag_set(tp, NVRAM_BUFFERED);
14478 		tg3_flag_set(tp, FLASH);
14479 		break;
14480 	}
14481 
14482 	if (tg3_flag(tp, FLASH)) {
14483 		tg3_nvram_get_pagesize(tp, nvcfg1);
14484 	} else {
14485 		/* For eeprom, set pagesize to maximum eeprom size */
14486 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14487 
14488 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14489 		tw32(NVRAM_CFG1, nvcfg1);
14490 	}
14491 }
14492 
14493 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14494 {
14495 	u32 nvcfg1, protect = 0;
14496 
14497 	nvcfg1 = tr32(NVRAM_CFG1);
14498 
14499 	/* NVRAM protection for TPM */
14500 	if (nvcfg1 & (1 << 27)) {
14501 		tg3_flag_set(tp, PROTECTED_NVRAM);
14502 		protect = 1;
14503 	}
14504 
14505 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14506 	switch (nvcfg1) {
14507 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14508 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14509 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14510 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14511 		tp->nvram_jedecnum = JEDEC_ATMEL;
14512 		tg3_flag_set(tp, NVRAM_BUFFERED);
14513 		tg3_flag_set(tp, FLASH);
14514 		tp->nvram_pagesize = 264;
14515 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14516 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14517 			tp->nvram_size = (protect ? 0x3e200 :
14518 					  TG3_NVRAM_SIZE_512KB);
14519 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14520 			tp->nvram_size = (protect ? 0x1f200 :
14521 					  TG3_NVRAM_SIZE_256KB);
14522 		else
14523 			tp->nvram_size = (protect ? 0x1f200 :
14524 					  TG3_NVRAM_SIZE_128KB);
14525 		break;
14526 	case FLASH_5752VENDOR_ST_M45PE10:
14527 	case FLASH_5752VENDOR_ST_M45PE20:
14528 	case FLASH_5752VENDOR_ST_M45PE40:
14529 		tp->nvram_jedecnum = JEDEC_ST;
14530 		tg3_flag_set(tp, NVRAM_BUFFERED);
14531 		tg3_flag_set(tp, FLASH);
14532 		tp->nvram_pagesize = 256;
14533 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14534 			tp->nvram_size = (protect ?
14535 					  TG3_NVRAM_SIZE_64KB :
14536 					  TG3_NVRAM_SIZE_128KB);
14537 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14538 			tp->nvram_size = (protect ?
14539 					  TG3_NVRAM_SIZE_64KB :
14540 					  TG3_NVRAM_SIZE_256KB);
14541 		else
14542 			tp->nvram_size = (protect ?
14543 					  TG3_NVRAM_SIZE_128KB :
14544 					  TG3_NVRAM_SIZE_512KB);
14545 		break;
14546 	}
14547 }
14548 
14549 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14550 {
14551 	u32 nvcfg1;
14552 
14553 	nvcfg1 = tr32(NVRAM_CFG1);
14554 
14555 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14556 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14557 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14558 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14559 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14560 		tp->nvram_jedecnum = JEDEC_ATMEL;
14561 		tg3_flag_set(tp, NVRAM_BUFFERED);
14562 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14563 
14564 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14565 		tw32(NVRAM_CFG1, nvcfg1);
14566 		break;
14567 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14568 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14569 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14570 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14571 		tp->nvram_jedecnum = JEDEC_ATMEL;
14572 		tg3_flag_set(tp, NVRAM_BUFFERED);
14573 		tg3_flag_set(tp, FLASH);
14574 		tp->nvram_pagesize = 264;
14575 		break;
14576 	case FLASH_5752VENDOR_ST_M45PE10:
14577 	case FLASH_5752VENDOR_ST_M45PE20:
14578 	case FLASH_5752VENDOR_ST_M45PE40:
14579 		tp->nvram_jedecnum = JEDEC_ST;
14580 		tg3_flag_set(tp, NVRAM_BUFFERED);
14581 		tg3_flag_set(tp, FLASH);
14582 		tp->nvram_pagesize = 256;
14583 		break;
14584 	}
14585 }
14586 
14587 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14588 {
14589 	u32 nvcfg1, protect = 0;
14590 
14591 	nvcfg1 = tr32(NVRAM_CFG1);
14592 
14593 	/* NVRAM protection for TPM */
14594 	if (nvcfg1 & (1 << 27)) {
14595 		tg3_flag_set(tp, PROTECTED_NVRAM);
14596 		protect = 1;
14597 	}
14598 
14599 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14600 	switch (nvcfg1) {
14601 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14602 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14603 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14604 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14605 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14606 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14607 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14608 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14609 		tp->nvram_jedecnum = JEDEC_ATMEL;
14610 		tg3_flag_set(tp, NVRAM_BUFFERED);
14611 		tg3_flag_set(tp, FLASH);
14612 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14613 		tp->nvram_pagesize = 256;
14614 		break;
14615 	case FLASH_5761VENDOR_ST_A_M45PE20:
14616 	case FLASH_5761VENDOR_ST_A_M45PE40:
14617 	case FLASH_5761VENDOR_ST_A_M45PE80:
14618 	case FLASH_5761VENDOR_ST_A_M45PE16:
14619 	case FLASH_5761VENDOR_ST_M_M45PE20:
14620 	case FLASH_5761VENDOR_ST_M_M45PE40:
14621 	case FLASH_5761VENDOR_ST_M_M45PE80:
14622 	case FLASH_5761VENDOR_ST_M_M45PE16:
14623 		tp->nvram_jedecnum = JEDEC_ST;
14624 		tg3_flag_set(tp, NVRAM_BUFFERED);
14625 		tg3_flag_set(tp, FLASH);
14626 		tp->nvram_pagesize = 256;
14627 		break;
14628 	}
14629 
14630 	if (protect) {
14631 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14632 	} else {
14633 		switch (nvcfg1) {
14634 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14635 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14636 		case FLASH_5761VENDOR_ST_A_M45PE16:
14637 		case FLASH_5761VENDOR_ST_M_M45PE16:
14638 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14639 			break;
14640 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14641 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14642 		case FLASH_5761VENDOR_ST_A_M45PE80:
14643 		case FLASH_5761VENDOR_ST_M_M45PE80:
14644 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14645 			break;
14646 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14647 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14648 		case FLASH_5761VENDOR_ST_A_M45PE40:
14649 		case FLASH_5761VENDOR_ST_M_M45PE40:
14650 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14651 			break;
14652 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14653 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14654 		case FLASH_5761VENDOR_ST_A_M45PE20:
14655 		case FLASH_5761VENDOR_ST_M_M45PE20:
14656 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14657 			break;
14658 		}
14659 	}
14660 }
14661 
14662 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14663 {
14664 	tp->nvram_jedecnum = JEDEC_ATMEL;
14665 	tg3_flag_set(tp, NVRAM_BUFFERED);
14666 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14667 }
14668 
14669 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14670 {
14671 	u32 nvcfg1;
14672 
14673 	nvcfg1 = tr32(NVRAM_CFG1);
14674 
14675 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14676 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14677 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14678 		tp->nvram_jedecnum = JEDEC_ATMEL;
14679 		tg3_flag_set(tp, NVRAM_BUFFERED);
14680 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14681 
14682 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14683 		tw32(NVRAM_CFG1, nvcfg1);
14684 		return;
14685 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14686 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14687 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14688 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14689 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14690 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14691 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14692 		tp->nvram_jedecnum = JEDEC_ATMEL;
14693 		tg3_flag_set(tp, NVRAM_BUFFERED);
14694 		tg3_flag_set(tp, FLASH);
14695 
14696 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14697 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14698 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14699 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14700 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14701 			break;
14702 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14703 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14704 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14705 			break;
14706 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14707 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14708 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14709 			break;
14710 		}
14711 		break;
14712 	case FLASH_5752VENDOR_ST_M45PE10:
14713 	case FLASH_5752VENDOR_ST_M45PE20:
14714 	case FLASH_5752VENDOR_ST_M45PE40:
14715 		tp->nvram_jedecnum = JEDEC_ST;
14716 		tg3_flag_set(tp, NVRAM_BUFFERED);
14717 		tg3_flag_set(tp, FLASH);
14718 
14719 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14720 		case FLASH_5752VENDOR_ST_M45PE10:
14721 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14722 			break;
14723 		case FLASH_5752VENDOR_ST_M45PE20:
14724 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14725 			break;
14726 		case FLASH_5752VENDOR_ST_M45PE40:
14727 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14728 			break;
14729 		}
14730 		break;
14731 	default:
14732 		tg3_flag_set(tp, NO_NVRAM);
14733 		return;
14734 	}
14735 
14736 	tg3_nvram_get_pagesize(tp, nvcfg1);
14737 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14738 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14739 }
14740 
14741 
14742 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14743 {
14744 	u32 nvcfg1;
14745 
14746 	nvcfg1 = tr32(NVRAM_CFG1);
14747 
14748 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14749 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14750 	case FLASH_5717VENDOR_MICRO_EEPROM:
14751 		tp->nvram_jedecnum = JEDEC_ATMEL;
14752 		tg3_flag_set(tp, NVRAM_BUFFERED);
14753 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14754 
14755 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14756 		tw32(NVRAM_CFG1, nvcfg1);
14757 		return;
14758 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14759 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14760 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14761 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14762 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14763 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14764 	case FLASH_5717VENDOR_ATMEL_45USPT:
14765 		tp->nvram_jedecnum = JEDEC_ATMEL;
14766 		tg3_flag_set(tp, NVRAM_BUFFERED);
14767 		tg3_flag_set(tp, FLASH);
14768 
14769 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14770 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14771 			/* Detect size with tg3_nvram_get_size() */
14772 			break;
14773 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14774 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14775 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14776 			break;
14777 		default:
14778 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14779 			break;
14780 		}
14781 		break;
14782 	case FLASH_5717VENDOR_ST_M_M25PE10:
14783 	case FLASH_5717VENDOR_ST_A_M25PE10:
14784 	case FLASH_5717VENDOR_ST_M_M45PE10:
14785 	case FLASH_5717VENDOR_ST_A_M45PE10:
14786 	case FLASH_5717VENDOR_ST_M_M25PE20:
14787 	case FLASH_5717VENDOR_ST_A_M25PE20:
14788 	case FLASH_5717VENDOR_ST_M_M45PE20:
14789 	case FLASH_5717VENDOR_ST_A_M45PE20:
14790 	case FLASH_5717VENDOR_ST_25USPT:
14791 	case FLASH_5717VENDOR_ST_45USPT:
14792 		tp->nvram_jedecnum = JEDEC_ST;
14793 		tg3_flag_set(tp, NVRAM_BUFFERED);
14794 		tg3_flag_set(tp, FLASH);
14795 
14796 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14797 		case FLASH_5717VENDOR_ST_M_M25PE20:
14798 		case FLASH_5717VENDOR_ST_M_M45PE20:
14799 			/* Detect size with tg3_nvram_get_size() */
14800 			break;
14801 		case FLASH_5717VENDOR_ST_A_M25PE20:
14802 		case FLASH_5717VENDOR_ST_A_M45PE20:
14803 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14804 			break;
14805 		default:
14806 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14807 			break;
14808 		}
14809 		break;
14810 	default:
14811 		tg3_flag_set(tp, NO_NVRAM);
14812 		return;
14813 	}
14814 
14815 	tg3_nvram_get_pagesize(tp, nvcfg1);
14816 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14817 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14818 }
14819 
14820 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14821 {
14822 	u32 nvcfg1, nvmpinstrp, nv_status;
14823 
14824 	nvcfg1 = tr32(NVRAM_CFG1);
14825 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14826 
14827 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14828 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14829 			tg3_flag_set(tp, NO_NVRAM);
14830 			return;
14831 		}
14832 
14833 		switch (nvmpinstrp) {
14834 		case FLASH_5762_MX25L_100:
14835 		case FLASH_5762_MX25L_200:
14836 		case FLASH_5762_MX25L_400:
14837 		case FLASH_5762_MX25L_800:
14838 		case FLASH_5762_MX25L_160_320:
14839 			tp->nvram_pagesize = 4096;
14840 			tp->nvram_jedecnum = JEDEC_MACRONIX;
14841 			tg3_flag_set(tp, NVRAM_BUFFERED);
14842 			tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14843 			tg3_flag_set(tp, FLASH);
14844 			nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14845 			tp->nvram_size =
14846 				(1 << (nv_status >> AUTOSENSE_DEVID &
14847 						AUTOSENSE_DEVID_MASK)
14848 					<< AUTOSENSE_SIZE_IN_MB);
14849 			return;
14850 
14851 		case FLASH_5762_EEPROM_HD:
14852 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14853 			break;
14854 		case FLASH_5762_EEPROM_LD:
14855 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14856 			break;
14857 		case FLASH_5720VENDOR_M_ST_M45PE20:
14858 			/* This pinstrap supports multiple sizes, so force it
14859 			 * to read the actual size from location 0xf0.
14860 			 */
14861 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14862 			break;
14863 		}
14864 	}
14865 
14866 	switch (nvmpinstrp) {
14867 	case FLASH_5720_EEPROM_HD:
14868 	case FLASH_5720_EEPROM_LD:
14869 		tp->nvram_jedecnum = JEDEC_ATMEL;
14870 		tg3_flag_set(tp, NVRAM_BUFFERED);
14871 
14872 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14873 		tw32(NVRAM_CFG1, nvcfg1);
14874 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14875 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14876 		else
14877 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14878 		return;
14879 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14880 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14881 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14882 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14883 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14884 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14885 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14886 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14887 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14888 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14889 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14890 	case FLASH_5720VENDOR_ATMEL_45USPT:
14891 		tp->nvram_jedecnum = JEDEC_ATMEL;
14892 		tg3_flag_set(tp, NVRAM_BUFFERED);
14893 		tg3_flag_set(tp, FLASH);
14894 
14895 		switch (nvmpinstrp) {
14896 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14897 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14898 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14899 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14900 			break;
14901 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14902 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14903 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14904 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14905 			break;
14906 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14907 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14908 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14909 			break;
14910 		default:
14911 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14912 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14913 			break;
14914 		}
14915 		break;
14916 	case FLASH_5720VENDOR_M_ST_M25PE10:
14917 	case FLASH_5720VENDOR_M_ST_M45PE10:
14918 	case FLASH_5720VENDOR_A_ST_M25PE10:
14919 	case FLASH_5720VENDOR_A_ST_M45PE10:
14920 	case FLASH_5720VENDOR_M_ST_M25PE20:
14921 	case FLASH_5720VENDOR_M_ST_M45PE20:
14922 	case FLASH_5720VENDOR_A_ST_M25PE20:
14923 	case FLASH_5720VENDOR_A_ST_M45PE20:
14924 	case FLASH_5720VENDOR_M_ST_M25PE40:
14925 	case FLASH_5720VENDOR_M_ST_M45PE40:
14926 	case FLASH_5720VENDOR_A_ST_M25PE40:
14927 	case FLASH_5720VENDOR_A_ST_M45PE40:
14928 	case FLASH_5720VENDOR_M_ST_M25PE80:
14929 	case FLASH_5720VENDOR_M_ST_M45PE80:
14930 	case FLASH_5720VENDOR_A_ST_M25PE80:
14931 	case FLASH_5720VENDOR_A_ST_M45PE80:
14932 	case FLASH_5720VENDOR_ST_25USPT:
14933 	case FLASH_5720VENDOR_ST_45USPT:
14934 		tp->nvram_jedecnum = JEDEC_ST;
14935 		tg3_flag_set(tp, NVRAM_BUFFERED);
14936 		tg3_flag_set(tp, FLASH);
14937 
14938 		switch (nvmpinstrp) {
14939 		case FLASH_5720VENDOR_M_ST_M25PE20:
14940 		case FLASH_5720VENDOR_M_ST_M45PE20:
14941 		case FLASH_5720VENDOR_A_ST_M25PE20:
14942 		case FLASH_5720VENDOR_A_ST_M45PE20:
14943 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14944 			break;
14945 		case FLASH_5720VENDOR_M_ST_M25PE40:
14946 		case FLASH_5720VENDOR_M_ST_M45PE40:
14947 		case FLASH_5720VENDOR_A_ST_M25PE40:
14948 		case FLASH_5720VENDOR_A_ST_M45PE40:
14949 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14950 			break;
14951 		case FLASH_5720VENDOR_M_ST_M25PE80:
14952 		case FLASH_5720VENDOR_M_ST_M45PE80:
14953 		case FLASH_5720VENDOR_A_ST_M25PE80:
14954 		case FLASH_5720VENDOR_A_ST_M45PE80:
14955 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14956 			break;
14957 		default:
14958 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14959 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14960 			break;
14961 		}
14962 		break;
14963 	default:
14964 		tg3_flag_set(tp, NO_NVRAM);
14965 		return;
14966 	}
14967 
14968 	tg3_nvram_get_pagesize(tp, nvcfg1);
14969 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14970 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14971 
14972 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14973 		u32 val;
14974 
14975 		if (tg3_nvram_read(tp, 0, &val))
14976 			return;
14977 
14978 		if (val != TG3_EEPROM_MAGIC &&
14979 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14980 			tg3_flag_set(tp, NO_NVRAM);
14981 	}
14982 }
14983 
14984 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14985 static void tg3_nvram_init(struct tg3 *tp)
14986 {
14987 	if (tg3_flag(tp, IS_SSB_CORE)) {
14988 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14989 		tg3_flag_clear(tp, NVRAM);
14990 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14991 		tg3_flag_set(tp, NO_NVRAM);
14992 		return;
14993 	}
14994 
14995 	tw32_f(GRC_EEPROM_ADDR,
14996 	     (EEPROM_ADDR_FSM_RESET |
14997 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
14998 	       EEPROM_ADDR_CLKPERD_SHIFT)));
14999 
15000 	msleep(1);
15001 
15002 	/* Enable seeprom accesses. */
15003 	tw32_f(GRC_LOCAL_CTRL,
15004 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15005 	udelay(100);
15006 
15007 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15008 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
15009 		tg3_flag_set(tp, NVRAM);
15010 
15011 		if (tg3_nvram_lock(tp)) {
15012 			netdev_warn(tp->dev,
15013 				    "Cannot get nvram lock, %s failed\n",
15014 				    __func__);
15015 			return;
15016 		}
15017 		tg3_enable_nvram_access(tp);
15018 
15019 		tp->nvram_size = 0;
15020 
15021 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
15022 			tg3_get_5752_nvram_info(tp);
15023 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15024 			tg3_get_5755_nvram_info(tp);
15025 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15026 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15027 			 tg3_asic_rev(tp) == ASIC_REV_5785)
15028 			tg3_get_5787_nvram_info(tp);
15029 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15030 			tg3_get_5761_nvram_info(tp);
15031 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15032 			tg3_get_5906_nvram_info(tp);
15033 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15034 			 tg3_flag(tp, 57765_CLASS))
15035 			tg3_get_57780_nvram_info(tp);
15036 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15037 			 tg3_asic_rev(tp) == ASIC_REV_5719)
15038 			tg3_get_5717_nvram_info(tp);
15039 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15040 			 tg3_asic_rev(tp) == ASIC_REV_5762)
15041 			tg3_get_5720_nvram_info(tp);
15042 		else
15043 			tg3_get_nvram_info(tp);
15044 
15045 		if (tp->nvram_size == 0)
15046 			tg3_get_nvram_size(tp);
15047 
15048 		tg3_disable_nvram_access(tp);
15049 		tg3_nvram_unlock(tp);
15050 
15051 	} else {
15052 		tg3_flag_clear(tp, NVRAM);
15053 		tg3_flag_clear(tp, NVRAM_BUFFERED);
15054 
15055 		tg3_get_eeprom_size(tp);
15056 	}
15057 }
15058 
15059 struct subsys_tbl_ent {
15060 	u16 subsys_vendor, subsys_devid;
15061 	u32 phy_id;
15062 };
15063 
15064 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15065 	/* Broadcom boards. */
15066 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15067 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15068 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15069 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15070 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15071 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15072 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15073 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15074 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15075 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15076 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15077 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15078 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15079 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15080 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15081 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15082 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15083 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15084 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15085 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15086 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15087 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15088 
15089 	/* 3com boards. */
15090 	{ TG3PCI_SUBVENDOR_ID_3COM,
15091 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15092 	{ TG3PCI_SUBVENDOR_ID_3COM,
15093 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15094 	{ TG3PCI_SUBVENDOR_ID_3COM,
15095 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15096 	{ TG3PCI_SUBVENDOR_ID_3COM,
15097 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15098 	{ TG3PCI_SUBVENDOR_ID_3COM,
15099 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15100 
15101 	/* DELL boards. */
15102 	{ TG3PCI_SUBVENDOR_ID_DELL,
15103 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15104 	{ TG3PCI_SUBVENDOR_ID_DELL,
15105 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15106 	{ TG3PCI_SUBVENDOR_ID_DELL,
15107 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15108 	{ TG3PCI_SUBVENDOR_ID_DELL,
15109 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15110 
15111 	/* Compaq boards. */
15112 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15113 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15114 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15115 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15116 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15117 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15118 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15119 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15120 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15121 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15122 
15123 	/* IBM boards. */
15124 	{ TG3PCI_SUBVENDOR_ID_IBM,
15125 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15126 };
15127 
15128 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15129 {
15130 	int i;
15131 
15132 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15133 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
15134 		     tp->pdev->subsystem_vendor) &&
15135 		    (subsys_id_to_phy_id[i].subsys_devid ==
15136 		     tp->pdev->subsystem_device))
15137 			return &subsys_id_to_phy_id[i];
15138 	}
15139 	return NULL;
15140 }
15141 
15142 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15143 {
15144 	u32 val;
15145 
15146 	tp->phy_id = TG3_PHY_ID_INVALID;
15147 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15148 
15149 	/* Assume an onboard device and WOL capable by default.  */
15150 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
15151 	tg3_flag_set(tp, WOL_CAP);
15152 
15153 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15154 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15155 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15156 			tg3_flag_set(tp, IS_NIC);
15157 		}
15158 		val = tr32(VCPU_CFGSHDW);
15159 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
15160 			tg3_flag_set(tp, ASPM_WORKAROUND);
15161 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15162 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15163 			tg3_flag_set(tp, WOL_ENABLE);
15164 			device_set_wakeup_enable(&tp->pdev->dev, true);
15165 		}
15166 		goto done;
15167 	}
15168 
15169 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15170 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15171 		u32 nic_cfg, led_cfg;
15172 		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15173 		u32 nic_phy_id, ver, eeprom_phy_id;
15174 		int eeprom_phy_serdes = 0;
15175 
15176 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15177 		tp->nic_sram_data_cfg = nic_cfg;
15178 
15179 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15180 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
15181 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15182 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15183 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
15184 		    (ver > 0) && (ver < 0x100))
15185 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15186 
15187 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
15188 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15189 
15190 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15191 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15192 		    tg3_asic_rev(tp) == ASIC_REV_5720)
15193 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15194 
15195 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15196 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15197 			eeprom_phy_serdes = 1;
15198 
15199 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15200 		if (nic_phy_id != 0) {
15201 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15202 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15203 
15204 			eeprom_phy_id  = (id1 >> 16) << 10;
15205 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
15206 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15207 		} else
15208 			eeprom_phy_id = 0;
15209 
15210 		tp->phy_id = eeprom_phy_id;
15211 		if (eeprom_phy_serdes) {
15212 			if (!tg3_flag(tp, 5705_PLUS))
15213 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15214 			else
15215 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15216 		}
15217 
15218 		if (tg3_flag(tp, 5750_PLUS))
15219 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15220 				    SHASTA_EXT_LED_MODE_MASK);
15221 		else
15222 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15223 
15224 		switch (led_cfg) {
15225 		default:
15226 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15227 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15228 			break;
15229 
15230 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15231 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15232 			break;
15233 
15234 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15235 			tp->led_ctrl = LED_CTRL_MODE_MAC;
15236 
15237 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
15238 			 * read on some older 5700/5701 bootcode.
15239 			 */
15240 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15241 			    tg3_asic_rev(tp) == ASIC_REV_5701)
15242 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15243 
15244 			break;
15245 
15246 		case SHASTA_EXT_LED_SHARED:
15247 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
15248 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15249 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15250 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15251 						 LED_CTRL_MODE_PHY_2);
15252 
15253 			if (tg3_flag(tp, 5717_PLUS) ||
15254 			    tg3_asic_rev(tp) == ASIC_REV_5762)
15255 				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15256 						LED_CTRL_BLINK_RATE_MASK;
15257 
15258 			break;
15259 
15260 		case SHASTA_EXT_LED_MAC:
15261 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15262 			break;
15263 
15264 		case SHASTA_EXT_LED_COMBO:
15265 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
15266 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15267 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15268 						 LED_CTRL_MODE_PHY_2);
15269 			break;
15270 
15271 		}
15272 
15273 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15274 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
15275 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15276 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15277 
15278 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15279 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15280 
15281 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15282 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
15283 			if ((tp->pdev->subsystem_vendor ==
15284 			     PCI_VENDOR_ID_ARIMA) &&
15285 			    (tp->pdev->subsystem_device == 0x205a ||
15286 			     tp->pdev->subsystem_device == 0x2063))
15287 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15288 		} else {
15289 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15290 			tg3_flag_set(tp, IS_NIC);
15291 		}
15292 
15293 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15294 			tg3_flag_set(tp, ENABLE_ASF);
15295 			if (tg3_flag(tp, 5750_PLUS))
15296 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15297 		}
15298 
15299 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15300 		    tg3_flag(tp, 5750_PLUS))
15301 			tg3_flag_set(tp, ENABLE_APE);
15302 
15303 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15304 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15305 			tg3_flag_clear(tp, WOL_CAP);
15306 
15307 		if (tg3_flag(tp, WOL_CAP) &&
15308 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15309 			tg3_flag_set(tp, WOL_ENABLE);
15310 			device_set_wakeup_enable(&tp->pdev->dev, true);
15311 		}
15312 
15313 		if (cfg2 & (1 << 17))
15314 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15315 
15316 		/* serdes signal pre-emphasis in register 0x590 set by */
15317 		/* bootcode if bit 18 is set */
15318 		if (cfg2 & (1 << 18))
15319 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15320 
15321 		if ((tg3_flag(tp, 57765_PLUS) ||
15322 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15323 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15324 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15325 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15326 
15327 		if (tg3_flag(tp, PCI_EXPRESS)) {
15328 			u32 cfg3;
15329 
15330 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15331 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15332 			    !tg3_flag(tp, 57765_PLUS) &&
15333 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15334 				tg3_flag_set(tp, ASPM_WORKAROUND);
15335 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15336 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15337 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15338 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15339 		}
15340 
15341 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15342 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15343 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15344 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15345 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15346 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15347 
15348 		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15349 			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15350 	}
15351 done:
15352 	if (tg3_flag(tp, WOL_CAP))
15353 		device_set_wakeup_enable(&tp->pdev->dev,
15354 					 tg3_flag(tp, WOL_ENABLE));
15355 	else
15356 		device_set_wakeup_capable(&tp->pdev->dev, false);
15357 }
15358 
15359 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15360 {
15361 	int i, err;
15362 	u32 val2, off = offset * 8;
15363 
15364 	err = tg3_nvram_lock(tp);
15365 	if (err)
15366 		return err;
15367 
15368 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15369 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15370 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15371 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15372 	udelay(10);
15373 
15374 	for (i = 0; i < 100; i++) {
15375 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15376 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15377 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15378 			break;
15379 		}
15380 		udelay(10);
15381 	}
15382 
15383 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15384 
15385 	tg3_nvram_unlock(tp);
15386 	if (val2 & APE_OTP_STATUS_CMD_DONE)
15387 		return 0;
15388 
15389 	return -EBUSY;
15390 }
15391 
15392 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15393 {
15394 	int i;
15395 	u32 val;
15396 
15397 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15398 	tw32(OTP_CTRL, cmd);
15399 
15400 	/* Wait for up to 1 ms for command to execute. */
15401 	for (i = 0; i < 100; i++) {
15402 		val = tr32(OTP_STATUS);
15403 		if (val & OTP_STATUS_CMD_DONE)
15404 			break;
15405 		udelay(10);
15406 	}
15407 
15408 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15409 }
15410 
15411 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15412  * configuration is a 32-bit value that straddles the alignment boundary.
15413  * We do two 32-bit reads and then shift and merge the results.
15414  */
15415 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15416 {
15417 	u32 bhalf_otp, thalf_otp;
15418 
15419 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15420 
15421 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15422 		return 0;
15423 
15424 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15425 
15426 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15427 		return 0;
15428 
15429 	thalf_otp = tr32(OTP_READ_DATA);
15430 
15431 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15432 
15433 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15434 		return 0;
15435 
15436 	bhalf_otp = tr32(OTP_READ_DATA);
15437 
15438 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15439 }
15440 
15441 static void tg3_phy_init_link_config(struct tg3 *tp)
15442 {
15443 	u32 adv = ADVERTISED_Autoneg;
15444 
15445 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15446 		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15447 			adv |= ADVERTISED_1000baseT_Half;
15448 		adv |= ADVERTISED_1000baseT_Full;
15449 	}
15450 
15451 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15452 		adv |= ADVERTISED_100baseT_Half |
15453 		       ADVERTISED_100baseT_Full |
15454 		       ADVERTISED_10baseT_Half |
15455 		       ADVERTISED_10baseT_Full |
15456 		       ADVERTISED_TP;
15457 	else
15458 		adv |= ADVERTISED_FIBRE;
15459 
15460 	tp->link_config.advertising = adv;
15461 	tp->link_config.speed = SPEED_UNKNOWN;
15462 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15463 	tp->link_config.autoneg = AUTONEG_ENABLE;
15464 	tp->link_config.active_speed = SPEED_UNKNOWN;
15465 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15466 
15467 	tp->old_link = -1;
15468 }
15469 
15470 static int tg3_phy_probe(struct tg3 *tp)
15471 {
15472 	u32 hw_phy_id_1, hw_phy_id_2;
15473 	u32 hw_phy_id, hw_phy_id_masked;
15474 	int err;
15475 
15476 	/* flow control autonegotiation is default behavior */
15477 	tg3_flag_set(tp, PAUSE_AUTONEG);
15478 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15479 
15480 	if (tg3_flag(tp, ENABLE_APE)) {
15481 		switch (tp->pci_fn) {
15482 		case 0:
15483 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15484 			break;
15485 		case 1:
15486 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15487 			break;
15488 		case 2:
15489 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15490 			break;
15491 		case 3:
15492 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15493 			break;
15494 		}
15495 	}
15496 
15497 	if (!tg3_flag(tp, ENABLE_ASF) &&
15498 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15499 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15500 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15501 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15502 
15503 	if (tg3_flag(tp, USE_PHYLIB))
15504 		return tg3_phy_init(tp);
15505 
15506 	/* Reading the PHY ID register can conflict with ASF
15507 	 * firmware access to the PHY hardware.
15508 	 */
15509 	err = 0;
15510 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15511 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15512 	} else {
15513 		/* Now read the physical PHY_ID from the chip and verify
15514 		 * that it is sane.  If it doesn't look good, we fall back
15515 		 * to either the hard-coded table based PHY_ID and failing
15516 		 * that the value found in the eeprom area.
15517 		 */
15518 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15519 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15520 
15521 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15522 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15523 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15524 
15525 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15526 	}
15527 
15528 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15529 		tp->phy_id = hw_phy_id;
15530 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15531 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15532 		else
15533 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15534 	} else {
15535 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15536 			/* Do nothing, phy ID already set up in
15537 			 * tg3_get_eeprom_hw_cfg().
15538 			 */
15539 		} else {
15540 			struct subsys_tbl_ent *p;
15541 
15542 			/* No eeprom signature?  Try the hardcoded
15543 			 * subsys device table.
15544 			 */
15545 			p = tg3_lookup_by_subsys(tp);
15546 			if (p) {
15547 				tp->phy_id = p->phy_id;
15548 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15549 				/* For now we saw the IDs 0xbc050cd0,
15550 				 * 0xbc050f80 and 0xbc050c30 on devices
15551 				 * connected to an BCM4785 and there are
15552 				 * probably more. Just assume that the phy is
15553 				 * supported when it is connected to a SSB core
15554 				 * for now.
15555 				 */
15556 				return -ENODEV;
15557 			}
15558 
15559 			if (!tp->phy_id ||
15560 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15561 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15562 		}
15563 	}
15564 
15565 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15566 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15567 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15568 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15569 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15570 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15571 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15572 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15573 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15574 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15575 
15576 		tp->eee.supported = SUPPORTED_100baseT_Full |
15577 				    SUPPORTED_1000baseT_Full;
15578 		tp->eee.advertised = ADVERTISED_100baseT_Full |
15579 				     ADVERTISED_1000baseT_Full;
15580 		tp->eee.eee_enabled = 1;
15581 		tp->eee.tx_lpi_enabled = 1;
15582 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15583 	}
15584 
15585 	tg3_phy_init_link_config(tp);
15586 
15587 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15588 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15589 	    !tg3_flag(tp, ENABLE_APE) &&
15590 	    !tg3_flag(tp, ENABLE_ASF)) {
15591 		u32 bmsr, dummy;
15592 
15593 		tg3_readphy(tp, MII_BMSR, &bmsr);
15594 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15595 		    (bmsr & BMSR_LSTATUS))
15596 			goto skip_phy_reset;
15597 
15598 		err = tg3_phy_reset(tp);
15599 		if (err)
15600 			return err;
15601 
15602 		tg3_phy_set_wirespeed(tp);
15603 
15604 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15605 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15606 					    tp->link_config.flowctrl);
15607 
15608 			tg3_writephy(tp, MII_BMCR,
15609 				     BMCR_ANENABLE | BMCR_ANRESTART);
15610 		}
15611 	}
15612 
15613 skip_phy_reset:
15614 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15615 		err = tg3_init_5401phy_dsp(tp);
15616 		if (err)
15617 			return err;
15618 
15619 		err = tg3_init_5401phy_dsp(tp);
15620 	}
15621 
15622 	return err;
15623 }
15624 
15625 static void tg3_read_vpd(struct tg3 *tp)
15626 {
15627 	u8 *vpd_data;
15628 	unsigned int block_end, rosize, len;
15629 	u32 vpdlen;
15630 	int j, i = 0;
15631 
15632 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15633 	if (!vpd_data)
15634 		goto out_no_vpd;
15635 
15636 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15637 	if (i < 0)
15638 		goto out_not_found;
15639 
15640 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15641 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15642 	i += PCI_VPD_LRDT_TAG_SIZE;
15643 
15644 	if (block_end > vpdlen)
15645 		goto out_not_found;
15646 
15647 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15648 				      PCI_VPD_RO_KEYWORD_MFR_ID);
15649 	if (j > 0) {
15650 		len = pci_vpd_info_field_size(&vpd_data[j]);
15651 
15652 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15653 		if (j + len > block_end || len != 4 ||
15654 		    memcmp(&vpd_data[j], "1028", 4))
15655 			goto partno;
15656 
15657 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15658 					      PCI_VPD_RO_KEYWORD_VENDOR0);
15659 		if (j < 0)
15660 			goto partno;
15661 
15662 		len = pci_vpd_info_field_size(&vpd_data[j]);
15663 
15664 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15665 		if (j + len > block_end)
15666 			goto partno;
15667 
15668 		if (len >= sizeof(tp->fw_ver))
15669 			len = sizeof(tp->fw_ver) - 1;
15670 		memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15671 		snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15672 			 &vpd_data[j]);
15673 	}
15674 
15675 partno:
15676 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15677 				      PCI_VPD_RO_KEYWORD_PARTNO);
15678 	if (i < 0)
15679 		goto out_not_found;
15680 
15681 	len = pci_vpd_info_field_size(&vpd_data[i]);
15682 
15683 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
15684 	if (len > TG3_BPN_SIZE ||
15685 	    (len + i) > vpdlen)
15686 		goto out_not_found;
15687 
15688 	memcpy(tp->board_part_number, &vpd_data[i], len);
15689 
15690 out_not_found:
15691 	kfree(vpd_data);
15692 	if (tp->board_part_number[0])
15693 		return;
15694 
15695 out_no_vpd:
15696 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15697 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15698 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15699 			strcpy(tp->board_part_number, "BCM5717");
15700 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15701 			strcpy(tp->board_part_number, "BCM5718");
15702 		else
15703 			goto nomatch;
15704 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15705 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15706 			strcpy(tp->board_part_number, "BCM57780");
15707 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15708 			strcpy(tp->board_part_number, "BCM57760");
15709 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15710 			strcpy(tp->board_part_number, "BCM57790");
15711 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15712 			strcpy(tp->board_part_number, "BCM57788");
15713 		else
15714 			goto nomatch;
15715 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15716 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15717 			strcpy(tp->board_part_number, "BCM57761");
15718 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15719 			strcpy(tp->board_part_number, "BCM57765");
15720 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15721 			strcpy(tp->board_part_number, "BCM57781");
15722 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15723 			strcpy(tp->board_part_number, "BCM57785");
15724 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15725 			strcpy(tp->board_part_number, "BCM57791");
15726 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15727 			strcpy(tp->board_part_number, "BCM57795");
15728 		else
15729 			goto nomatch;
15730 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15731 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15732 			strcpy(tp->board_part_number, "BCM57762");
15733 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15734 			strcpy(tp->board_part_number, "BCM57766");
15735 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15736 			strcpy(tp->board_part_number, "BCM57782");
15737 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15738 			strcpy(tp->board_part_number, "BCM57786");
15739 		else
15740 			goto nomatch;
15741 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15742 		strcpy(tp->board_part_number, "BCM95906");
15743 	} else {
15744 nomatch:
15745 		strcpy(tp->board_part_number, "none");
15746 	}
15747 }
15748 
15749 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15750 {
15751 	u32 val;
15752 
15753 	if (tg3_nvram_read(tp, offset, &val) ||
15754 	    (val & 0xfc000000) != 0x0c000000 ||
15755 	    tg3_nvram_read(tp, offset + 4, &val) ||
15756 	    val != 0)
15757 		return 0;
15758 
15759 	return 1;
15760 }
15761 
15762 static void tg3_read_bc_ver(struct tg3 *tp)
15763 {
15764 	u32 val, offset, start, ver_offset;
15765 	int i, dst_off;
15766 	bool newver = false;
15767 
15768 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15769 	    tg3_nvram_read(tp, 0x4, &start))
15770 		return;
15771 
15772 	offset = tg3_nvram_logical_addr(tp, offset);
15773 
15774 	if (tg3_nvram_read(tp, offset, &val))
15775 		return;
15776 
15777 	if ((val & 0xfc000000) == 0x0c000000) {
15778 		if (tg3_nvram_read(tp, offset + 4, &val))
15779 			return;
15780 
15781 		if (val == 0)
15782 			newver = true;
15783 	}
15784 
15785 	dst_off = strlen(tp->fw_ver);
15786 
15787 	if (newver) {
15788 		if (TG3_VER_SIZE - dst_off < 16 ||
15789 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15790 			return;
15791 
15792 		offset = offset + ver_offset - start;
15793 		for (i = 0; i < 16; i += 4) {
15794 			__be32 v;
15795 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15796 				return;
15797 
15798 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15799 		}
15800 	} else {
15801 		u32 major, minor;
15802 
15803 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15804 			return;
15805 
15806 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15807 			TG3_NVM_BCVER_MAJSFT;
15808 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15809 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15810 			 "v%d.%02d", major, minor);
15811 	}
15812 }
15813 
15814 static void tg3_read_hwsb_ver(struct tg3 *tp)
15815 {
15816 	u32 val, major, minor;
15817 
15818 	/* Use native endian representation */
15819 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15820 		return;
15821 
15822 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15823 		TG3_NVM_HWSB_CFG1_MAJSFT;
15824 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15825 		TG3_NVM_HWSB_CFG1_MINSFT;
15826 
15827 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15828 }
15829 
15830 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15831 {
15832 	u32 offset, major, minor, build;
15833 
15834 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15835 
15836 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15837 		return;
15838 
15839 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15840 	case TG3_EEPROM_SB_REVISION_0:
15841 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15842 		break;
15843 	case TG3_EEPROM_SB_REVISION_2:
15844 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15845 		break;
15846 	case TG3_EEPROM_SB_REVISION_3:
15847 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15848 		break;
15849 	case TG3_EEPROM_SB_REVISION_4:
15850 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15851 		break;
15852 	case TG3_EEPROM_SB_REVISION_5:
15853 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15854 		break;
15855 	case TG3_EEPROM_SB_REVISION_6:
15856 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15857 		break;
15858 	default:
15859 		return;
15860 	}
15861 
15862 	if (tg3_nvram_read(tp, offset, &val))
15863 		return;
15864 
15865 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15866 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15867 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15868 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15869 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15870 
15871 	if (minor > 99 || build > 26)
15872 		return;
15873 
15874 	offset = strlen(tp->fw_ver);
15875 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15876 		 " v%d.%02d", major, minor);
15877 
15878 	if (build > 0) {
15879 		offset = strlen(tp->fw_ver);
15880 		if (offset < TG3_VER_SIZE - 1)
15881 			tp->fw_ver[offset] = 'a' + build - 1;
15882 	}
15883 }
15884 
15885 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15886 {
15887 	u32 val, offset, start;
15888 	int i, vlen;
15889 
15890 	for (offset = TG3_NVM_DIR_START;
15891 	     offset < TG3_NVM_DIR_END;
15892 	     offset += TG3_NVM_DIRENT_SIZE) {
15893 		if (tg3_nvram_read(tp, offset, &val))
15894 			return;
15895 
15896 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15897 			break;
15898 	}
15899 
15900 	if (offset == TG3_NVM_DIR_END)
15901 		return;
15902 
15903 	if (!tg3_flag(tp, 5705_PLUS))
15904 		start = 0x08000000;
15905 	else if (tg3_nvram_read(tp, offset - 4, &start))
15906 		return;
15907 
15908 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15909 	    !tg3_fw_img_is_valid(tp, offset) ||
15910 	    tg3_nvram_read(tp, offset + 8, &val))
15911 		return;
15912 
15913 	offset += val - start;
15914 
15915 	vlen = strlen(tp->fw_ver);
15916 
15917 	tp->fw_ver[vlen++] = ',';
15918 	tp->fw_ver[vlen++] = ' ';
15919 
15920 	for (i = 0; i < 4; i++) {
15921 		__be32 v;
15922 		if (tg3_nvram_read_be32(tp, offset, &v))
15923 			return;
15924 
15925 		offset += sizeof(v);
15926 
15927 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15928 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15929 			break;
15930 		}
15931 
15932 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15933 		vlen += sizeof(v);
15934 	}
15935 }
15936 
15937 static void tg3_probe_ncsi(struct tg3 *tp)
15938 {
15939 	u32 apedata;
15940 
15941 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15942 	if (apedata != APE_SEG_SIG_MAGIC)
15943 		return;
15944 
15945 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15946 	if (!(apedata & APE_FW_STATUS_READY))
15947 		return;
15948 
15949 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15950 		tg3_flag_set(tp, APE_HAS_NCSI);
15951 }
15952 
15953 static void tg3_read_dash_ver(struct tg3 *tp)
15954 {
15955 	int vlen;
15956 	u32 apedata;
15957 	char *fwtype;
15958 
15959 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15960 
15961 	if (tg3_flag(tp, APE_HAS_NCSI))
15962 		fwtype = "NCSI";
15963 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15964 		fwtype = "SMASH";
15965 	else
15966 		fwtype = "DASH";
15967 
15968 	vlen = strlen(tp->fw_ver);
15969 
15970 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15971 		 fwtype,
15972 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15973 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15974 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15975 		 (apedata & APE_FW_VERSION_BLDMSK));
15976 }
15977 
15978 static void tg3_read_otp_ver(struct tg3 *tp)
15979 {
15980 	u32 val, val2;
15981 
15982 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
15983 		return;
15984 
15985 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15986 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15987 	    TG3_OTP_MAGIC0_VALID(val)) {
15988 		u64 val64 = (u64) val << 32 | val2;
15989 		u32 ver = 0;
15990 		int i, vlen;
15991 
15992 		for (i = 0; i < 7; i++) {
15993 			if ((val64 & 0xff) == 0)
15994 				break;
15995 			ver = val64 & 0xff;
15996 			val64 >>= 8;
15997 		}
15998 		vlen = strlen(tp->fw_ver);
15999 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16000 	}
16001 }
16002 
16003 static void tg3_read_fw_ver(struct tg3 *tp)
16004 {
16005 	u32 val;
16006 	bool vpd_vers = false;
16007 
16008 	if (tp->fw_ver[0] != 0)
16009 		vpd_vers = true;
16010 
16011 	if (tg3_flag(tp, NO_NVRAM)) {
16012 		strcat(tp->fw_ver, "sb");
16013 		tg3_read_otp_ver(tp);
16014 		return;
16015 	}
16016 
16017 	if (tg3_nvram_read(tp, 0, &val))
16018 		return;
16019 
16020 	if (val == TG3_EEPROM_MAGIC)
16021 		tg3_read_bc_ver(tp);
16022 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16023 		tg3_read_sb_ver(tp, val);
16024 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16025 		tg3_read_hwsb_ver(tp);
16026 
16027 	if (tg3_flag(tp, ENABLE_ASF)) {
16028 		if (tg3_flag(tp, ENABLE_APE)) {
16029 			tg3_probe_ncsi(tp);
16030 			if (!vpd_vers)
16031 				tg3_read_dash_ver(tp);
16032 		} else if (!vpd_vers) {
16033 			tg3_read_mgmtfw_ver(tp);
16034 		}
16035 	}
16036 
16037 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16038 }
16039 
16040 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16041 {
16042 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
16043 		return TG3_RX_RET_MAX_SIZE_5717;
16044 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16045 		return TG3_RX_RET_MAX_SIZE_5700;
16046 	else
16047 		return TG3_RX_RET_MAX_SIZE_5705;
16048 }
16049 
16050 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16051 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16052 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16053 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16054 	{ },
16055 };
16056 
16057 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16058 {
16059 	struct pci_dev *peer;
16060 	unsigned int func, devnr = tp->pdev->devfn & ~7;
16061 
16062 	for (func = 0; func < 8; func++) {
16063 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
16064 		if (peer && peer != tp->pdev)
16065 			break;
16066 		pci_dev_put(peer);
16067 	}
16068 	/* 5704 can be configured in single-port mode, set peer to
16069 	 * tp->pdev in that case.
16070 	 */
16071 	if (!peer) {
16072 		peer = tp->pdev;
16073 		return peer;
16074 	}
16075 
16076 	/*
16077 	 * We don't need to keep the refcount elevated; there's no way
16078 	 * to remove one half of this device without removing the other
16079 	 */
16080 	pci_dev_put(peer);
16081 
16082 	return peer;
16083 }
16084 
16085 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16086 {
16087 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16088 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16089 		u32 reg;
16090 
16091 		/* All devices that use the alternate
16092 		 * ASIC REV location have a CPMU.
16093 		 */
16094 		tg3_flag_set(tp, CPMU_PRESENT);
16095 
16096 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16097 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16098 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16099 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16100 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16101 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16102 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16103 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16104 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16105 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16106 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16107 			reg = TG3PCI_GEN2_PRODID_ASICREV;
16108 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16109 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16110 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16111 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16112 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16113 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16114 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16115 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16116 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16117 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16118 			reg = TG3PCI_GEN15_PRODID_ASICREV;
16119 		else
16120 			reg = TG3PCI_PRODID_ASICREV;
16121 
16122 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16123 	}
16124 
16125 	/* Wrong chip ID in 5752 A0. This code can be removed later
16126 	 * as A0 is not in production.
16127 	 */
16128 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16129 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16130 
16131 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16132 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16133 
16134 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16135 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16136 	    tg3_asic_rev(tp) == ASIC_REV_5720)
16137 		tg3_flag_set(tp, 5717_PLUS);
16138 
16139 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16140 	    tg3_asic_rev(tp) == ASIC_REV_57766)
16141 		tg3_flag_set(tp, 57765_CLASS);
16142 
16143 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16144 	     tg3_asic_rev(tp) == ASIC_REV_5762)
16145 		tg3_flag_set(tp, 57765_PLUS);
16146 
16147 	/* Intentionally exclude ASIC_REV_5906 */
16148 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16149 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16150 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16151 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16152 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
16153 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16154 	    tg3_flag(tp, 57765_PLUS))
16155 		tg3_flag_set(tp, 5755_PLUS);
16156 
16157 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16158 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16159 		tg3_flag_set(tp, 5780_CLASS);
16160 
16161 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16162 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16163 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
16164 	    tg3_flag(tp, 5755_PLUS) ||
16165 	    tg3_flag(tp, 5780_CLASS))
16166 		tg3_flag_set(tp, 5750_PLUS);
16167 
16168 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16169 	    tg3_flag(tp, 5750_PLUS))
16170 		tg3_flag_set(tp, 5705_PLUS);
16171 }
16172 
16173 static bool tg3_10_100_only_device(struct tg3 *tp,
16174 				   const struct pci_device_id *ent)
16175 {
16176 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16177 
16178 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16179 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16180 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
16181 		return true;
16182 
16183 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16184 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16185 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16186 				return true;
16187 		} else {
16188 			return true;
16189 		}
16190 	}
16191 
16192 	return false;
16193 }
16194 
16195 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16196 {
16197 	u32 misc_ctrl_reg;
16198 	u32 pci_state_reg, grc_misc_cfg;
16199 	u32 val;
16200 	u16 pci_cmd;
16201 	int err;
16202 
16203 	/* Force memory write invalidate off.  If we leave it on,
16204 	 * then on 5700_BX chips we have to enable a workaround.
16205 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16206 	 * to match the cacheline size.  The Broadcom driver have this
16207 	 * workaround but turns MWI off all the times so never uses
16208 	 * it.  This seems to suggest that the workaround is insufficient.
16209 	 */
16210 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16211 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16212 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16213 
16214 	/* Important! -- Make sure register accesses are byteswapped
16215 	 * correctly.  Also, for those chips that require it, make
16216 	 * sure that indirect register accesses are enabled before
16217 	 * the first operation.
16218 	 */
16219 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16220 			      &misc_ctrl_reg);
16221 	tp->misc_host_ctrl |= (misc_ctrl_reg &
16222 			       MISC_HOST_CTRL_CHIPREV);
16223 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16224 			       tp->misc_host_ctrl);
16225 
16226 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
16227 
16228 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16229 	 * we need to disable memory and use config. cycles
16230 	 * only to access all registers. The 5702/03 chips
16231 	 * can mistakenly decode the special cycles from the
16232 	 * ICH chipsets as memory write cycles, causing corruption
16233 	 * of register and memory space. Only certain ICH bridges
16234 	 * will drive special cycles with non-zero data during the
16235 	 * address phase which can fall within the 5703's address
16236 	 * range. This is not an ICH bug as the PCI spec allows
16237 	 * non-zero address during special cycles. However, only
16238 	 * these ICH bridges are known to drive non-zero addresses
16239 	 * during special cycles.
16240 	 *
16241 	 * Since special cycles do not cross PCI bridges, we only
16242 	 * enable this workaround if the 5703 is on the secondary
16243 	 * bus of these ICH bridges.
16244 	 */
16245 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16246 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16247 		static struct tg3_dev_id {
16248 			u32	vendor;
16249 			u32	device;
16250 			u32	rev;
16251 		} ich_chipsets[] = {
16252 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16253 			  PCI_ANY_ID },
16254 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16255 			  PCI_ANY_ID },
16256 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16257 			  0xa },
16258 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16259 			  PCI_ANY_ID },
16260 			{ },
16261 		};
16262 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
16263 		struct pci_dev *bridge = NULL;
16264 
16265 		while (pci_id->vendor != 0) {
16266 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
16267 						bridge);
16268 			if (!bridge) {
16269 				pci_id++;
16270 				continue;
16271 			}
16272 			if (pci_id->rev != PCI_ANY_ID) {
16273 				if (bridge->revision > pci_id->rev)
16274 					continue;
16275 			}
16276 			if (bridge->subordinate &&
16277 			    (bridge->subordinate->number ==
16278 			     tp->pdev->bus->number)) {
16279 				tg3_flag_set(tp, ICH_WORKAROUND);
16280 				pci_dev_put(bridge);
16281 				break;
16282 			}
16283 		}
16284 	}
16285 
16286 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16287 		static struct tg3_dev_id {
16288 			u32	vendor;
16289 			u32	device;
16290 		} bridge_chipsets[] = {
16291 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16292 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16293 			{ },
16294 		};
16295 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16296 		struct pci_dev *bridge = NULL;
16297 
16298 		while (pci_id->vendor != 0) {
16299 			bridge = pci_get_device(pci_id->vendor,
16300 						pci_id->device,
16301 						bridge);
16302 			if (!bridge) {
16303 				pci_id++;
16304 				continue;
16305 			}
16306 			if (bridge->subordinate &&
16307 			    (bridge->subordinate->number <=
16308 			     tp->pdev->bus->number) &&
16309 			    (bridge->subordinate->busn_res.end >=
16310 			     tp->pdev->bus->number)) {
16311 				tg3_flag_set(tp, 5701_DMA_BUG);
16312 				pci_dev_put(bridge);
16313 				break;
16314 			}
16315 		}
16316 	}
16317 
16318 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
16319 	 * DMA addresses > 40-bit. This bridge may have other additional
16320 	 * 57xx devices behind it in some 4-port NIC designs for example.
16321 	 * Any tg3 device found behind the bridge will also need the 40-bit
16322 	 * DMA workaround.
16323 	 */
16324 	if (tg3_flag(tp, 5780_CLASS)) {
16325 		tg3_flag_set(tp, 40BIT_DMA_BUG);
16326 		tp->msi_cap = tp->pdev->msi_cap;
16327 	} else {
16328 		struct pci_dev *bridge = NULL;
16329 
16330 		do {
16331 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16332 						PCI_DEVICE_ID_SERVERWORKS_EPB,
16333 						bridge);
16334 			if (bridge && bridge->subordinate &&
16335 			    (bridge->subordinate->number <=
16336 			     tp->pdev->bus->number) &&
16337 			    (bridge->subordinate->busn_res.end >=
16338 			     tp->pdev->bus->number)) {
16339 				tg3_flag_set(tp, 40BIT_DMA_BUG);
16340 				pci_dev_put(bridge);
16341 				break;
16342 			}
16343 		} while (bridge);
16344 	}
16345 
16346 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16347 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16348 		tp->pdev_peer = tg3_find_peer(tp);
16349 
16350 	/* Determine TSO capabilities */
16351 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16352 		; /* Do nothing. HW bug. */
16353 	else if (tg3_flag(tp, 57765_PLUS))
16354 		tg3_flag_set(tp, HW_TSO_3);
16355 	else if (tg3_flag(tp, 5755_PLUS) ||
16356 		 tg3_asic_rev(tp) == ASIC_REV_5906)
16357 		tg3_flag_set(tp, HW_TSO_2);
16358 	else if (tg3_flag(tp, 5750_PLUS)) {
16359 		tg3_flag_set(tp, HW_TSO_1);
16360 		tg3_flag_set(tp, TSO_BUG);
16361 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16362 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16363 			tg3_flag_clear(tp, TSO_BUG);
16364 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16365 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16366 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16367 		tg3_flag_set(tp, FW_TSO);
16368 		tg3_flag_set(tp, TSO_BUG);
16369 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16370 			tp->fw_needed = FIRMWARE_TG3TSO5;
16371 		else
16372 			tp->fw_needed = FIRMWARE_TG3TSO;
16373 	}
16374 
16375 	/* Selectively allow TSO based on operating conditions */
16376 	if (tg3_flag(tp, HW_TSO_1) ||
16377 	    tg3_flag(tp, HW_TSO_2) ||
16378 	    tg3_flag(tp, HW_TSO_3) ||
16379 	    tg3_flag(tp, FW_TSO)) {
16380 		/* For firmware TSO, assume ASF is disabled.
16381 		 * We'll disable TSO later if we discover ASF
16382 		 * is enabled in tg3_get_eeprom_hw_cfg().
16383 		 */
16384 		tg3_flag_set(tp, TSO_CAPABLE);
16385 	} else {
16386 		tg3_flag_clear(tp, TSO_CAPABLE);
16387 		tg3_flag_clear(tp, TSO_BUG);
16388 		tp->fw_needed = NULL;
16389 	}
16390 
16391 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16392 		tp->fw_needed = FIRMWARE_TG3;
16393 
16394 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16395 		tp->fw_needed = FIRMWARE_TG357766;
16396 
16397 	tp->irq_max = 1;
16398 
16399 	if (tg3_flag(tp, 5750_PLUS)) {
16400 		tg3_flag_set(tp, SUPPORT_MSI);
16401 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16402 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16403 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16404 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16405 		     tp->pdev_peer == tp->pdev))
16406 			tg3_flag_clear(tp, SUPPORT_MSI);
16407 
16408 		if (tg3_flag(tp, 5755_PLUS) ||
16409 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16410 			tg3_flag_set(tp, 1SHOT_MSI);
16411 		}
16412 
16413 		if (tg3_flag(tp, 57765_PLUS)) {
16414 			tg3_flag_set(tp, SUPPORT_MSIX);
16415 			tp->irq_max = TG3_IRQ_MAX_VECS;
16416 		}
16417 	}
16418 
16419 	tp->txq_max = 1;
16420 	tp->rxq_max = 1;
16421 	if (tp->irq_max > 1) {
16422 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16423 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16424 
16425 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16426 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16427 			tp->txq_max = tp->irq_max - 1;
16428 	}
16429 
16430 	if (tg3_flag(tp, 5755_PLUS) ||
16431 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16432 		tg3_flag_set(tp, SHORT_DMA_BUG);
16433 
16434 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16435 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16436 
16437 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16438 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16439 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16440 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16441 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16442 
16443 	if (tg3_flag(tp, 57765_PLUS) &&
16444 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16445 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16446 
16447 	if (!tg3_flag(tp, 5705_PLUS) ||
16448 	    tg3_flag(tp, 5780_CLASS) ||
16449 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16450 		tg3_flag_set(tp, JUMBO_CAPABLE);
16451 
16452 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16453 			      &pci_state_reg);
16454 
16455 	if (pci_is_pcie(tp->pdev)) {
16456 		u16 lnkctl;
16457 
16458 		tg3_flag_set(tp, PCI_EXPRESS);
16459 
16460 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16461 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16462 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16463 				tg3_flag_clear(tp, HW_TSO_2);
16464 				tg3_flag_clear(tp, TSO_CAPABLE);
16465 			}
16466 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16467 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16468 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16469 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16470 				tg3_flag_set(tp, CLKREQ_BUG);
16471 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16472 			tg3_flag_set(tp, L1PLLPD_EN);
16473 		}
16474 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16475 		/* BCM5785 devices are effectively PCIe devices, and should
16476 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16477 		 * section.
16478 		 */
16479 		tg3_flag_set(tp, PCI_EXPRESS);
16480 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16481 		   tg3_flag(tp, 5780_CLASS)) {
16482 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16483 		if (!tp->pcix_cap) {
16484 			dev_err(&tp->pdev->dev,
16485 				"Cannot find PCI-X capability, aborting\n");
16486 			return -EIO;
16487 		}
16488 
16489 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16490 			tg3_flag_set(tp, PCIX_MODE);
16491 	}
16492 
16493 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16494 	 * reordering to the mailbox registers done by the host
16495 	 * controller can cause major troubles.  We read back from
16496 	 * every mailbox register write to force the writes to be
16497 	 * posted to the chip in order.
16498 	 */
16499 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16500 	    !tg3_flag(tp, PCI_EXPRESS))
16501 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16502 
16503 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16504 			     &tp->pci_cacheline_sz);
16505 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16506 			     &tp->pci_lat_timer);
16507 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16508 	    tp->pci_lat_timer < 64) {
16509 		tp->pci_lat_timer = 64;
16510 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16511 				      tp->pci_lat_timer);
16512 	}
16513 
16514 	/* Important! -- It is critical that the PCI-X hw workaround
16515 	 * situation is decided before the first MMIO register access.
16516 	 */
16517 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16518 		/* 5700 BX chips need to have their TX producer index
16519 		 * mailboxes written twice to workaround a bug.
16520 		 */
16521 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16522 
16523 		/* If we are in PCI-X mode, enable register write workaround.
16524 		 *
16525 		 * The workaround is to use indirect register accesses
16526 		 * for all chip writes not to mailbox registers.
16527 		 */
16528 		if (tg3_flag(tp, PCIX_MODE)) {
16529 			u32 pm_reg;
16530 
16531 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16532 
16533 			/* The chip can have it's power management PCI config
16534 			 * space registers clobbered due to this bug.
16535 			 * So explicitly force the chip into D0 here.
16536 			 */
16537 			pci_read_config_dword(tp->pdev,
16538 					      tp->pdev->pm_cap + PCI_PM_CTRL,
16539 					      &pm_reg);
16540 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16541 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16542 			pci_write_config_dword(tp->pdev,
16543 					       tp->pdev->pm_cap + PCI_PM_CTRL,
16544 					       pm_reg);
16545 
16546 			/* Also, force SERR#/PERR# in PCI command. */
16547 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16548 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16549 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16550 		}
16551 	}
16552 
16553 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16554 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16555 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16556 		tg3_flag_set(tp, PCI_32BIT);
16557 
16558 	/* Chip-specific fixup from Broadcom driver */
16559 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16560 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16561 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16562 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16563 	}
16564 
16565 	/* Default fast path register access methods */
16566 	tp->read32 = tg3_read32;
16567 	tp->write32 = tg3_write32;
16568 	tp->read32_mbox = tg3_read32;
16569 	tp->write32_mbox = tg3_write32;
16570 	tp->write32_tx_mbox = tg3_write32;
16571 	tp->write32_rx_mbox = tg3_write32;
16572 
16573 	/* Various workaround register access methods */
16574 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16575 		tp->write32 = tg3_write_indirect_reg32;
16576 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16577 		 (tg3_flag(tp, PCI_EXPRESS) &&
16578 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16579 		/*
16580 		 * Back to back register writes can cause problems on these
16581 		 * chips, the workaround is to read back all reg writes
16582 		 * except those to mailbox regs.
16583 		 *
16584 		 * See tg3_write_indirect_reg32().
16585 		 */
16586 		tp->write32 = tg3_write_flush_reg32;
16587 	}
16588 
16589 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16590 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16591 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16592 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16593 	}
16594 
16595 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16596 		tp->read32 = tg3_read_indirect_reg32;
16597 		tp->write32 = tg3_write_indirect_reg32;
16598 		tp->read32_mbox = tg3_read_indirect_mbox;
16599 		tp->write32_mbox = tg3_write_indirect_mbox;
16600 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16601 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16602 
16603 		iounmap(tp->regs);
16604 		tp->regs = NULL;
16605 
16606 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16607 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16608 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16609 	}
16610 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16611 		tp->read32_mbox = tg3_read32_mbox_5906;
16612 		tp->write32_mbox = tg3_write32_mbox_5906;
16613 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16614 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16615 	}
16616 
16617 	if (tp->write32 == tg3_write_indirect_reg32 ||
16618 	    (tg3_flag(tp, PCIX_MODE) &&
16619 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16620 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16621 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16622 
16623 	/* The memory arbiter has to be enabled in order for SRAM accesses
16624 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16625 	 * sure it is enabled, but other entities such as system netboot
16626 	 * code might disable it.
16627 	 */
16628 	val = tr32(MEMARB_MODE);
16629 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16630 
16631 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16632 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16633 	    tg3_flag(tp, 5780_CLASS)) {
16634 		if (tg3_flag(tp, PCIX_MODE)) {
16635 			pci_read_config_dword(tp->pdev,
16636 					      tp->pcix_cap + PCI_X_STATUS,
16637 					      &val);
16638 			tp->pci_fn = val & 0x7;
16639 		}
16640 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16641 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16642 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16643 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16644 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16645 			val = tr32(TG3_CPMU_STATUS);
16646 
16647 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16648 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16649 		else
16650 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16651 				     TG3_CPMU_STATUS_FSHFT_5719;
16652 	}
16653 
16654 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16655 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16656 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16657 	}
16658 
16659 	/* Get eeprom hw config before calling tg3_set_power_state().
16660 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16661 	 * determined before calling tg3_set_power_state() so that
16662 	 * we know whether or not to switch out of Vaux power.
16663 	 * When the flag is set, it means that GPIO1 is used for eeprom
16664 	 * write protect and also implies that it is a LOM where GPIOs
16665 	 * are not used to switch power.
16666 	 */
16667 	tg3_get_eeprom_hw_cfg(tp);
16668 
16669 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16670 		tg3_flag_clear(tp, TSO_CAPABLE);
16671 		tg3_flag_clear(tp, TSO_BUG);
16672 		tp->fw_needed = NULL;
16673 	}
16674 
16675 	if (tg3_flag(tp, ENABLE_APE)) {
16676 		/* Allow reads and writes to the
16677 		 * APE register and memory space.
16678 		 */
16679 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16680 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16681 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16682 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16683 				       pci_state_reg);
16684 
16685 		tg3_ape_lock_init(tp);
16686 		tp->ape_hb_interval =
16687 			msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16688 	}
16689 
16690 	/* Set up tp->grc_local_ctrl before calling
16691 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16692 	 * will bring 5700's external PHY out of reset.
16693 	 * It is also used as eeprom write protect on LOMs.
16694 	 */
16695 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16696 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16697 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16698 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16699 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16700 	/* Unused GPIO3 must be driven as output on 5752 because there
16701 	 * are no pull-up resistors on unused GPIO pins.
16702 	 */
16703 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16704 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16705 
16706 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16707 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16708 	    tg3_flag(tp, 57765_CLASS))
16709 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16710 
16711 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16712 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16713 		/* Turn off the debug UART. */
16714 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16715 		if (tg3_flag(tp, IS_NIC))
16716 			/* Keep VMain power. */
16717 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16718 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16719 	}
16720 
16721 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16722 		tp->grc_local_ctrl |=
16723 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16724 
16725 	/* Switch out of Vaux if it is a NIC */
16726 	tg3_pwrsrc_switch_to_vmain(tp);
16727 
16728 	/* Derive initial jumbo mode from MTU assigned in
16729 	 * ether_setup() via the alloc_etherdev() call
16730 	 */
16731 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16732 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16733 
16734 	/* Determine WakeOnLan speed to use. */
16735 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16736 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16737 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16738 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16739 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16740 	} else {
16741 		tg3_flag_set(tp, WOL_SPEED_100MB);
16742 	}
16743 
16744 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16745 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16746 
16747 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16748 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16749 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16750 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16751 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16752 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16753 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16754 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16755 
16756 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16757 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16758 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16759 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16760 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16761 
16762 	if (tg3_flag(tp, 5705_PLUS) &&
16763 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16764 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16765 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16766 	    !tg3_flag(tp, 57765_PLUS)) {
16767 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16768 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16769 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16770 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16771 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16772 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16773 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16774 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16775 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16776 		} else
16777 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16778 	}
16779 
16780 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16781 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16782 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16783 		if (tp->phy_otp == 0)
16784 			tp->phy_otp = TG3_OTP_DEFAULT;
16785 	}
16786 
16787 	if (tg3_flag(tp, CPMU_PRESENT))
16788 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16789 	else
16790 		tp->mi_mode = MAC_MI_MODE_BASE;
16791 
16792 	tp->coalesce_mode = 0;
16793 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16794 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16795 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16796 
16797 	/* Set these bits to enable statistics workaround. */
16798 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16799 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
16800 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16801 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16802 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16803 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16804 	}
16805 
16806 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16807 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16808 		tg3_flag_set(tp, USE_PHYLIB);
16809 
16810 	err = tg3_mdio_init(tp);
16811 	if (err)
16812 		return err;
16813 
16814 	/* Initialize data/descriptor byte/word swapping. */
16815 	val = tr32(GRC_MODE);
16816 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16817 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16818 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16819 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16820 			GRC_MODE_B2HRX_ENABLE |
16821 			GRC_MODE_HTX2B_ENABLE |
16822 			GRC_MODE_HOST_STACKUP);
16823 	else
16824 		val &= GRC_MODE_HOST_STACKUP;
16825 
16826 	tw32(GRC_MODE, val | tp->grc_mode);
16827 
16828 	tg3_switch_clocks(tp);
16829 
16830 	/* Clear this out for sanity. */
16831 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16832 
16833 	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16834 	tw32(TG3PCI_REG_BASE_ADDR, 0);
16835 
16836 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16837 			      &pci_state_reg);
16838 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16839 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16840 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16841 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16842 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16843 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16844 			void __iomem *sram_base;
16845 
16846 			/* Write some dummy words into the SRAM status block
16847 			 * area, see if it reads back correctly.  If the return
16848 			 * value is bad, force enable the PCIX workaround.
16849 			 */
16850 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16851 
16852 			writel(0x00000000, sram_base);
16853 			writel(0x00000000, sram_base + 4);
16854 			writel(0xffffffff, sram_base + 4);
16855 			if (readl(sram_base) != 0x00000000)
16856 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16857 		}
16858 	}
16859 
16860 	udelay(50);
16861 	tg3_nvram_init(tp);
16862 
16863 	/* If the device has an NVRAM, no need to load patch firmware */
16864 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16865 	    !tg3_flag(tp, NO_NVRAM))
16866 		tp->fw_needed = NULL;
16867 
16868 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16869 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16870 
16871 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16872 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16873 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16874 		tg3_flag_set(tp, IS_5788);
16875 
16876 	if (!tg3_flag(tp, IS_5788) &&
16877 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16878 		tg3_flag_set(tp, TAGGED_STATUS);
16879 	if (tg3_flag(tp, TAGGED_STATUS)) {
16880 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16881 				      HOSTCC_MODE_CLRTICK_TXBD);
16882 
16883 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16884 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16885 				       tp->misc_host_ctrl);
16886 	}
16887 
16888 	/* Preserve the APE MAC_MODE bits */
16889 	if (tg3_flag(tp, ENABLE_APE))
16890 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16891 	else
16892 		tp->mac_mode = 0;
16893 
16894 	if (tg3_10_100_only_device(tp, ent))
16895 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16896 
16897 	err = tg3_phy_probe(tp);
16898 	if (err) {
16899 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16900 		/* ... but do not return immediately ... */
16901 		tg3_mdio_fini(tp);
16902 	}
16903 
16904 	tg3_read_vpd(tp);
16905 	tg3_read_fw_ver(tp);
16906 
16907 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16908 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16909 	} else {
16910 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16911 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16912 		else
16913 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16914 	}
16915 
16916 	/* 5700 {AX,BX} chips have a broken status block link
16917 	 * change bit implementation, so we must use the
16918 	 * status register in those cases.
16919 	 */
16920 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16921 		tg3_flag_set(tp, USE_LINKCHG_REG);
16922 	else
16923 		tg3_flag_clear(tp, USE_LINKCHG_REG);
16924 
16925 	/* The led_ctrl is set during tg3_phy_probe, here we might
16926 	 * have to force the link status polling mechanism based
16927 	 * upon subsystem IDs.
16928 	 */
16929 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16930 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16931 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16932 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16933 		tg3_flag_set(tp, USE_LINKCHG_REG);
16934 	}
16935 
16936 	/* For all SERDES we poll the MAC status register. */
16937 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16938 		tg3_flag_set(tp, POLL_SERDES);
16939 	else
16940 		tg3_flag_clear(tp, POLL_SERDES);
16941 
16942 	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16943 		tg3_flag_set(tp, POLL_CPMU_LINK);
16944 
16945 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16946 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16947 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16948 	    tg3_flag(tp, PCIX_MODE)) {
16949 		tp->rx_offset = NET_SKB_PAD;
16950 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16951 		tp->rx_copy_thresh = ~(u16)0;
16952 #endif
16953 	}
16954 
16955 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16956 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16957 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16958 
16959 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16960 
16961 	/* Increment the rx prod index on the rx std ring by at most
16962 	 * 8 for these chips to workaround hw errata.
16963 	 */
16964 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16965 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16966 	    tg3_asic_rev(tp) == ASIC_REV_5755)
16967 		tp->rx_std_max_post = 8;
16968 
16969 	if (tg3_flag(tp, ASPM_WORKAROUND))
16970 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16971 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
16972 
16973 	return err;
16974 }
16975 
16976 #ifdef CONFIG_SPARC
16977 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16978 {
16979 	struct net_device *dev = tp->dev;
16980 	struct pci_dev *pdev = tp->pdev;
16981 	struct device_node *dp = pci_device_to_OF_node(pdev);
16982 	const unsigned char *addr;
16983 	int len;
16984 
16985 	addr = of_get_property(dp, "local-mac-address", &len);
16986 	if (addr && len == ETH_ALEN) {
16987 		memcpy(dev->dev_addr, addr, ETH_ALEN);
16988 		return 0;
16989 	}
16990 	return -ENODEV;
16991 }
16992 
16993 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16994 {
16995 	struct net_device *dev = tp->dev;
16996 
16997 	memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16998 	return 0;
16999 }
17000 #endif
17001 
17002 static int tg3_get_device_address(struct tg3 *tp)
17003 {
17004 	struct net_device *dev = tp->dev;
17005 	u32 hi, lo, mac_offset;
17006 	int addr_ok = 0;
17007 	int err;
17008 
17009 #ifdef CONFIG_SPARC
17010 	if (!tg3_get_macaddr_sparc(tp))
17011 		return 0;
17012 #endif
17013 
17014 	if (tg3_flag(tp, IS_SSB_CORE)) {
17015 		err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
17016 		if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
17017 			return 0;
17018 	}
17019 
17020 	mac_offset = 0x7c;
17021 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17022 	    tg3_flag(tp, 5780_CLASS)) {
17023 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17024 			mac_offset = 0xcc;
17025 		if (tg3_nvram_lock(tp))
17026 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17027 		else
17028 			tg3_nvram_unlock(tp);
17029 	} else if (tg3_flag(tp, 5717_PLUS)) {
17030 		if (tp->pci_fn & 1)
17031 			mac_offset = 0xcc;
17032 		if (tp->pci_fn > 1)
17033 			mac_offset += 0x18c;
17034 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17035 		mac_offset = 0x10;
17036 
17037 	/* First try to get it from MAC address mailbox. */
17038 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17039 	if ((hi >> 16) == 0x484b) {
17040 		dev->dev_addr[0] = (hi >>  8) & 0xff;
17041 		dev->dev_addr[1] = (hi >>  0) & 0xff;
17042 
17043 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17044 		dev->dev_addr[2] = (lo >> 24) & 0xff;
17045 		dev->dev_addr[3] = (lo >> 16) & 0xff;
17046 		dev->dev_addr[4] = (lo >>  8) & 0xff;
17047 		dev->dev_addr[5] = (lo >>  0) & 0xff;
17048 
17049 		/* Some old bootcode may report a 0 MAC address in SRAM */
17050 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17051 	}
17052 	if (!addr_ok) {
17053 		/* Next, try NVRAM. */
17054 		if (!tg3_flag(tp, NO_NVRAM) &&
17055 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17056 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17057 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17058 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17059 		}
17060 		/* Finally just fetch it out of the MAC control regs. */
17061 		else {
17062 			hi = tr32(MAC_ADDR_0_HIGH);
17063 			lo = tr32(MAC_ADDR_0_LOW);
17064 
17065 			dev->dev_addr[5] = lo & 0xff;
17066 			dev->dev_addr[4] = (lo >> 8) & 0xff;
17067 			dev->dev_addr[3] = (lo >> 16) & 0xff;
17068 			dev->dev_addr[2] = (lo >> 24) & 0xff;
17069 			dev->dev_addr[1] = hi & 0xff;
17070 			dev->dev_addr[0] = (hi >> 8) & 0xff;
17071 		}
17072 	}
17073 
17074 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17075 #ifdef CONFIG_SPARC
17076 		if (!tg3_get_default_macaddr_sparc(tp))
17077 			return 0;
17078 #endif
17079 		return -EINVAL;
17080 	}
17081 	return 0;
17082 }
17083 
17084 #define BOUNDARY_SINGLE_CACHELINE	1
17085 #define BOUNDARY_MULTI_CACHELINE	2
17086 
17087 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17088 {
17089 	int cacheline_size;
17090 	u8 byte;
17091 	int goal;
17092 
17093 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17094 	if (byte == 0)
17095 		cacheline_size = 1024;
17096 	else
17097 		cacheline_size = (int) byte * 4;
17098 
17099 	/* On 5703 and later chips, the boundary bits have no
17100 	 * effect.
17101 	 */
17102 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17103 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
17104 	    !tg3_flag(tp, PCI_EXPRESS))
17105 		goto out;
17106 
17107 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17108 	goal = BOUNDARY_MULTI_CACHELINE;
17109 #else
17110 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17111 	goal = BOUNDARY_SINGLE_CACHELINE;
17112 #else
17113 	goal = 0;
17114 #endif
17115 #endif
17116 
17117 	if (tg3_flag(tp, 57765_PLUS)) {
17118 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17119 		goto out;
17120 	}
17121 
17122 	if (!goal)
17123 		goto out;
17124 
17125 	/* PCI controllers on most RISC systems tend to disconnect
17126 	 * when a device tries to burst across a cache-line boundary.
17127 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17128 	 *
17129 	 * Unfortunately, for PCI-E there are only limited
17130 	 * write-side controls for this, and thus for reads
17131 	 * we will still get the disconnects.  We'll also waste
17132 	 * these PCI cycles for both read and write for chips
17133 	 * other than 5700 and 5701 which do not implement the
17134 	 * boundary bits.
17135 	 */
17136 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17137 		switch (cacheline_size) {
17138 		case 16:
17139 		case 32:
17140 		case 64:
17141 		case 128:
17142 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17143 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17144 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17145 			} else {
17146 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17147 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17148 			}
17149 			break;
17150 
17151 		case 256:
17152 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17153 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17154 			break;
17155 
17156 		default:
17157 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17158 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17159 			break;
17160 		}
17161 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
17162 		switch (cacheline_size) {
17163 		case 16:
17164 		case 32:
17165 		case 64:
17166 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17167 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17168 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17169 				break;
17170 			}
17171 			/* fallthrough */
17172 		case 128:
17173 		default:
17174 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17175 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17176 			break;
17177 		}
17178 	} else {
17179 		switch (cacheline_size) {
17180 		case 16:
17181 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17182 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
17183 					DMA_RWCTRL_WRITE_BNDRY_16);
17184 				break;
17185 			}
17186 			/* fallthrough */
17187 		case 32:
17188 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17189 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
17190 					DMA_RWCTRL_WRITE_BNDRY_32);
17191 				break;
17192 			}
17193 			/* fallthrough */
17194 		case 64:
17195 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17196 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
17197 					DMA_RWCTRL_WRITE_BNDRY_64);
17198 				break;
17199 			}
17200 			/* fallthrough */
17201 		case 128:
17202 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17203 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
17204 					DMA_RWCTRL_WRITE_BNDRY_128);
17205 				break;
17206 			}
17207 			/* fallthrough */
17208 		case 256:
17209 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
17210 				DMA_RWCTRL_WRITE_BNDRY_256);
17211 			break;
17212 		case 512:
17213 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
17214 				DMA_RWCTRL_WRITE_BNDRY_512);
17215 			break;
17216 		case 1024:
17217 		default:
17218 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17219 				DMA_RWCTRL_WRITE_BNDRY_1024);
17220 			break;
17221 		}
17222 	}
17223 
17224 out:
17225 	return val;
17226 }
17227 
17228 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17229 			   int size, bool to_device)
17230 {
17231 	struct tg3_internal_buffer_desc test_desc;
17232 	u32 sram_dma_descs;
17233 	int i, ret;
17234 
17235 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17236 
17237 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17238 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17239 	tw32(RDMAC_STATUS, 0);
17240 	tw32(WDMAC_STATUS, 0);
17241 
17242 	tw32(BUFMGR_MODE, 0);
17243 	tw32(FTQ_RESET, 0);
17244 
17245 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
17246 	test_desc.addr_lo = buf_dma & 0xffffffff;
17247 	test_desc.nic_mbuf = 0x00002100;
17248 	test_desc.len = size;
17249 
17250 	/*
17251 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17252 	 * the *second* time the tg3 driver was getting loaded after an
17253 	 * initial scan.
17254 	 *
17255 	 * Broadcom tells me:
17256 	 *   ...the DMA engine is connected to the GRC block and a DMA
17257 	 *   reset may affect the GRC block in some unpredictable way...
17258 	 *   The behavior of resets to individual blocks has not been tested.
17259 	 *
17260 	 * Broadcom noted the GRC reset will also reset all sub-components.
17261 	 */
17262 	if (to_device) {
17263 		test_desc.cqid_sqid = (13 << 8) | 2;
17264 
17265 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17266 		udelay(40);
17267 	} else {
17268 		test_desc.cqid_sqid = (16 << 8) | 7;
17269 
17270 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17271 		udelay(40);
17272 	}
17273 	test_desc.flags = 0x00000005;
17274 
17275 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17276 		u32 val;
17277 
17278 		val = *(((u32 *)&test_desc) + i);
17279 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17280 				       sram_dma_descs + (i * sizeof(u32)));
17281 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17282 	}
17283 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17284 
17285 	if (to_device)
17286 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17287 	else
17288 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17289 
17290 	ret = -ENODEV;
17291 	for (i = 0; i < 40; i++) {
17292 		u32 val;
17293 
17294 		if (to_device)
17295 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17296 		else
17297 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17298 		if ((val & 0xffff) == sram_dma_descs) {
17299 			ret = 0;
17300 			break;
17301 		}
17302 
17303 		udelay(100);
17304 	}
17305 
17306 	return ret;
17307 }
17308 
17309 #define TEST_BUFFER_SIZE	0x2000
17310 
17311 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17312 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17313 	{ },
17314 };
17315 
17316 static int tg3_test_dma(struct tg3 *tp)
17317 {
17318 	dma_addr_t buf_dma;
17319 	u32 *buf, saved_dma_rwctrl;
17320 	int ret = 0;
17321 
17322 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17323 				 &buf_dma, GFP_KERNEL);
17324 	if (!buf) {
17325 		ret = -ENOMEM;
17326 		goto out_nofree;
17327 	}
17328 
17329 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17330 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17331 
17332 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17333 
17334 	if (tg3_flag(tp, 57765_PLUS))
17335 		goto out;
17336 
17337 	if (tg3_flag(tp, PCI_EXPRESS)) {
17338 		/* DMA read watermark not used on PCIE */
17339 		tp->dma_rwctrl |= 0x00180000;
17340 	} else if (!tg3_flag(tp, PCIX_MODE)) {
17341 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17342 		    tg3_asic_rev(tp) == ASIC_REV_5750)
17343 			tp->dma_rwctrl |= 0x003f0000;
17344 		else
17345 			tp->dma_rwctrl |= 0x003f000f;
17346 	} else {
17347 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17348 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17349 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17350 			u32 read_water = 0x7;
17351 
17352 			/* If the 5704 is behind the EPB bridge, we can
17353 			 * do the less restrictive ONE_DMA workaround for
17354 			 * better performance.
17355 			 */
17356 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17357 			    tg3_asic_rev(tp) == ASIC_REV_5704)
17358 				tp->dma_rwctrl |= 0x8000;
17359 			else if (ccval == 0x6 || ccval == 0x7)
17360 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17361 
17362 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17363 				read_water = 4;
17364 			/* Set bit 23 to enable PCIX hw bug fix */
17365 			tp->dma_rwctrl |=
17366 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17367 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17368 				(1 << 23);
17369 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17370 			/* 5780 always in PCIX mode */
17371 			tp->dma_rwctrl |= 0x00144000;
17372 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17373 			/* 5714 always in PCIX mode */
17374 			tp->dma_rwctrl |= 0x00148000;
17375 		} else {
17376 			tp->dma_rwctrl |= 0x001b000f;
17377 		}
17378 	}
17379 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17380 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17381 
17382 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17383 	    tg3_asic_rev(tp) == ASIC_REV_5704)
17384 		tp->dma_rwctrl &= 0xfffffff0;
17385 
17386 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17387 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17388 		/* Remove this if it causes problems for some boards. */
17389 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17390 
17391 		/* On 5700/5701 chips, we need to set this bit.
17392 		 * Otherwise the chip will issue cacheline transactions
17393 		 * to streamable DMA memory with not all the byte
17394 		 * enables turned on.  This is an error on several
17395 		 * RISC PCI controllers, in particular sparc64.
17396 		 *
17397 		 * On 5703/5704 chips, this bit has been reassigned
17398 		 * a different meaning.  In particular, it is used
17399 		 * on those chips to enable a PCI-X workaround.
17400 		 */
17401 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17402 	}
17403 
17404 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17405 
17406 
17407 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17408 	    tg3_asic_rev(tp) != ASIC_REV_5701)
17409 		goto out;
17410 
17411 	/* It is best to perform DMA test with maximum write burst size
17412 	 * to expose the 5700/5701 write DMA bug.
17413 	 */
17414 	saved_dma_rwctrl = tp->dma_rwctrl;
17415 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17416 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17417 
17418 	while (1) {
17419 		u32 *p = buf, i;
17420 
17421 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17422 			p[i] = i;
17423 
17424 		/* Send the buffer to the chip. */
17425 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17426 		if (ret) {
17427 			dev_err(&tp->pdev->dev,
17428 				"%s: Buffer write failed. err = %d\n",
17429 				__func__, ret);
17430 			break;
17431 		}
17432 
17433 		/* Now read it back. */
17434 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17435 		if (ret) {
17436 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17437 				"err = %d\n", __func__, ret);
17438 			break;
17439 		}
17440 
17441 		/* Verify it. */
17442 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17443 			if (p[i] == i)
17444 				continue;
17445 
17446 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17447 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17448 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17449 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17450 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17451 				break;
17452 			} else {
17453 				dev_err(&tp->pdev->dev,
17454 					"%s: Buffer corrupted on read back! "
17455 					"(%d != %d)\n", __func__, p[i], i);
17456 				ret = -ENODEV;
17457 				goto out;
17458 			}
17459 		}
17460 
17461 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17462 			/* Success. */
17463 			ret = 0;
17464 			break;
17465 		}
17466 	}
17467 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17468 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17469 		/* DMA test passed without adjusting DMA boundary,
17470 		 * now look for chipsets that are known to expose the
17471 		 * DMA bug without failing the test.
17472 		 */
17473 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17474 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17475 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17476 		} else {
17477 			/* Safe to use the calculated DMA boundary. */
17478 			tp->dma_rwctrl = saved_dma_rwctrl;
17479 		}
17480 
17481 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17482 	}
17483 
17484 out:
17485 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17486 out_nofree:
17487 	return ret;
17488 }
17489 
17490 static void tg3_init_bufmgr_config(struct tg3 *tp)
17491 {
17492 	if (tg3_flag(tp, 57765_PLUS)) {
17493 		tp->bufmgr_config.mbuf_read_dma_low_water =
17494 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17495 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17496 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17497 		tp->bufmgr_config.mbuf_high_water =
17498 			DEFAULT_MB_HIGH_WATER_57765;
17499 
17500 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17501 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17502 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17503 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17504 		tp->bufmgr_config.mbuf_high_water_jumbo =
17505 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17506 	} else if (tg3_flag(tp, 5705_PLUS)) {
17507 		tp->bufmgr_config.mbuf_read_dma_low_water =
17508 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17509 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17510 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17511 		tp->bufmgr_config.mbuf_high_water =
17512 			DEFAULT_MB_HIGH_WATER_5705;
17513 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17514 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17515 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17516 			tp->bufmgr_config.mbuf_high_water =
17517 				DEFAULT_MB_HIGH_WATER_5906;
17518 		}
17519 
17520 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17521 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17522 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17523 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17524 		tp->bufmgr_config.mbuf_high_water_jumbo =
17525 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17526 	} else {
17527 		tp->bufmgr_config.mbuf_read_dma_low_water =
17528 			DEFAULT_MB_RDMA_LOW_WATER;
17529 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17530 			DEFAULT_MB_MACRX_LOW_WATER;
17531 		tp->bufmgr_config.mbuf_high_water =
17532 			DEFAULT_MB_HIGH_WATER;
17533 
17534 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17535 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17536 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17537 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17538 		tp->bufmgr_config.mbuf_high_water_jumbo =
17539 			DEFAULT_MB_HIGH_WATER_JUMBO;
17540 	}
17541 
17542 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17543 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17544 }
17545 
17546 static char *tg3_phy_string(struct tg3 *tp)
17547 {
17548 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17549 	case TG3_PHY_ID_BCM5400:	return "5400";
17550 	case TG3_PHY_ID_BCM5401:	return "5401";
17551 	case TG3_PHY_ID_BCM5411:	return "5411";
17552 	case TG3_PHY_ID_BCM5701:	return "5701";
17553 	case TG3_PHY_ID_BCM5703:	return "5703";
17554 	case TG3_PHY_ID_BCM5704:	return "5704";
17555 	case TG3_PHY_ID_BCM5705:	return "5705";
17556 	case TG3_PHY_ID_BCM5750:	return "5750";
17557 	case TG3_PHY_ID_BCM5752:	return "5752";
17558 	case TG3_PHY_ID_BCM5714:	return "5714";
17559 	case TG3_PHY_ID_BCM5780:	return "5780";
17560 	case TG3_PHY_ID_BCM5755:	return "5755";
17561 	case TG3_PHY_ID_BCM5787:	return "5787";
17562 	case TG3_PHY_ID_BCM5784:	return "5784";
17563 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17564 	case TG3_PHY_ID_BCM5906:	return "5906";
17565 	case TG3_PHY_ID_BCM5761:	return "5761";
17566 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17567 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17568 	case TG3_PHY_ID_BCM57765:	return "57765";
17569 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17570 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17571 	case TG3_PHY_ID_BCM5762:	return "5762C";
17572 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17573 	case 0:			return "serdes";
17574 	default:		return "unknown";
17575 	}
17576 }
17577 
17578 static char *tg3_bus_string(struct tg3 *tp, char *str)
17579 {
17580 	if (tg3_flag(tp, PCI_EXPRESS)) {
17581 		strcpy(str, "PCI Express");
17582 		return str;
17583 	} else if (tg3_flag(tp, PCIX_MODE)) {
17584 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17585 
17586 		strcpy(str, "PCIX:");
17587 
17588 		if ((clock_ctrl == 7) ||
17589 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17590 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17591 			strcat(str, "133MHz");
17592 		else if (clock_ctrl == 0)
17593 			strcat(str, "33MHz");
17594 		else if (clock_ctrl == 2)
17595 			strcat(str, "50MHz");
17596 		else if (clock_ctrl == 4)
17597 			strcat(str, "66MHz");
17598 		else if (clock_ctrl == 6)
17599 			strcat(str, "100MHz");
17600 	} else {
17601 		strcpy(str, "PCI:");
17602 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17603 			strcat(str, "66MHz");
17604 		else
17605 			strcat(str, "33MHz");
17606 	}
17607 	if (tg3_flag(tp, PCI_32BIT))
17608 		strcat(str, ":32-bit");
17609 	else
17610 		strcat(str, ":64-bit");
17611 	return str;
17612 }
17613 
17614 static void tg3_init_coal(struct tg3 *tp)
17615 {
17616 	struct ethtool_coalesce *ec = &tp->coal;
17617 
17618 	memset(ec, 0, sizeof(*ec));
17619 	ec->cmd = ETHTOOL_GCOALESCE;
17620 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17621 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17622 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17623 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17624 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17625 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17626 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17627 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17628 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17629 
17630 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17631 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17632 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17633 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17634 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17635 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17636 	}
17637 
17638 	if (tg3_flag(tp, 5705_PLUS)) {
17639 		ec->rx_coalesce_usecs_irq = 0;
17640 		ec->tx_coalesce_usecs_irq = 0;
17641 		ec->stats_block_coalesce_usecs = 0;
17642 	}
17643 }
17644 
17645 static int tg3_init_one(struct pci_dev *pdev,
17646 				  const struct pci_device_id *ent)
17647 {
17648 	struct net_device *dev;
17649 	struct tg3 *tp;
17650 	int i, err;
17651 	u32 sndmbx, rcvmbx, intmbx;
17652 	char str[40];
17653 	u64 dma_mask, persist_dma_mask;
17654 	netdev_features_t features = 0;
17655 
17656 	printk_once(KERN_INFO "%s\n", version);
17657 
17658 	err = pci_enable_device(pdev);
17659 	if (err) {
17660 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17661 		return err;
17662 	}
17663 
17664 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17665 	if (err) {
17666 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17667 		goto err_out_disable_pdev;
17668 	}
17669 
17670 	pci_set_master(pdev);
17671 
17672 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17673 	if (!dev) {
17674 		err = -ENOMEM;
17675 		goto err_out_free_res;
17676 	}
17677 
17678 	SET_NETDEV_DEV(dev, &pdev->dev);
17679 
17680 	tp = netdev_priv(dev);
17681 	tp->pdev = pdev;
17682 	tp->dev = dev;
17683 	tp->rx_mode = TG3_DEF_RX_MODE;
17684 	tp->tx_mode = TG3_DEF_TX_MODE;
17685 	tp->irq_sync = 1;
17686 	tp->pcierr_recovery = false;
17687 
17688 	if (tg3_debug > 0)
17689 		tp->msg_enable = tg3_debug;
17690 	else
17691 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17692 
17693 	if (pdev_is_ssb_gige_core(pdev)) {
17694 		tg3_flag_set(tp, IS_SSB_CORE);
17695 		if (ssb_gige_must_flush_posted_writes(pdev))
17696 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17697 		if (ssb_gige_one_dma_at_once(pdev))
17698 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17699 		if (ssb_gige_have_roboswitch(pdev)) {
17700 			tg3_flag_set(tp, USE_PHYLIB);
17701 			tg3_flag_set(tp, ROBOSWITCH);
17702 		}
17703 		if (ssb_gige_is_rgmii(pdev))
17704 			tg3_flag_set(tp, RGMII_MODE);
17705 	}
17706 
17707 	/* The word/byte swap controls here control register access byte
17708 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17709 	 * setting below.
17710 	 */
17711 	tp->misc_host_ctrl =
17712 		MISC_HOST_CTRL_MASK_PCI_INT |
17713 		MISC_HOST_CTRL_WORD_SWAP |
17714 		MISC_HOST_CTRL_INDIR_ACCESS |
17715 		MISC_HOST_CTRL_PCISTATE_RW;
17716 
17717 	/* The NONFRM (non-frame) byte/word swap controls take effect
17718 	 * on descriptor entries, anything which isn't packet data.
17719 	 *
17720 	 * The StrongARM chips on the board (one for tx, one for rx)
17721 	 * are running in big-endian mode.
17722 	 */
17723 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17724 			GRC_MODE_WSWAP_NONFRM_DATA);
17725 #ifdef __BIG_ENDIAN
17726 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17727 #endif
17728 	spin_lock_init(&tp->lock);
17729 	spin_lock_init(&tp->indirect_lock);
17730 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17731 
17732 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17733 	if (!tp->regs) {
17734 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17735 		err = -ENOMEM;
17736 		goto err_out_free_dev;
17737 	}
17738 
17739 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17740 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17741 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17742 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17743 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17744 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17745 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17746 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17747 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17748 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17749 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17750 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17751 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17752 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17753 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17754 		tg3_flag_set(tp, ENABLE_APE);
17755 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17756 		if (!tp->aperegs) {
17757 			dev_err(&pdev->dev,
17758 				"Cannot map APE registers, aborting\n");
17759 			err = -ENOMEM;
17760 			goto err_out_iounmap;
17761 		}
17762 	}
17763 
17764 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17765 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17766 
17767 	dev->ethtool_ops = &tg3_ethtool_ops;
17768 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17769 	dev->netdev_ops = &tg3_netdev_ops;
17770 	dev->irq = pdev->irq;
17771 
17772 	err = tg3_get_invariants(tp, ent);
17773 	if (err) {
17774 		dev_err(&pdev->dev,
17775 			"Problem fetching invariants of chip, aborting\n");
17776 		goto err_out_apeunmap;
17777 	}
17778 
17779 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17780 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17781 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17782 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17783 	 * do DMA address check in tg3_start_xmit().
17784 	 */
17785 	if (tg3_flag(tp, IS_5788))
17786 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17787 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17788 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17789 #ifdef CONFIG_HIGHMEM
17790 		dma_mask = DMA_BIT_MASK(64);
17791 #endif
17792 	} else
17793 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17794 
17795 	/* Configure DMA attributes. */
17796 	if (dma_mask > DMA_BIT_MASK(32)) {
17797 		err = pci_set_dma_mask(pdev, dma_mask);
17798 		if (!err) {
17799 			features |= NETIF_F_HIGHDMA;
17800 			err = pci_set_consistent_dma_mask(pdev,
17801 							  persist_dma_mask);
17802 			if (err < 0) {
17803 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17804 					"DMA for consistent allocations\n");
17805 				goto err_out_apeunmap;
17806 			}
17807 		}
17808 	}
17809 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17810 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17811 		if (err) {
17812 			dev_err(&pdev->dev,
17813 				"No usable DMA configuration, aborting\n");
17814 			goto err_out_apeunmap;
17815 		}
17816 	}
17817 
17818 	tg3_init_bufmgr_config(tp);
17819 
17820 	/* 5700 B0 chips do not support checksumming correctly due
17821 	 * to hardware bugs.
17822 	 */
17823 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17824 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17825 
17826 		if (tg3_flag(tp, 5755_PLUS))
17827 			features |= NETIF_F_IPV6_CSUM;
17828 	}
17829 
17830 	/* TSO is on by default on chips that support hardware TSO.
17831 	 * Firmware TSO on older chips gives lower performance, so it
17832 	 * is off by default, but can be enabled using ethtool.
17833 	 */
17834 	if ((tg3_flag(tp, HW_TSO_1) ||
17835 	     tg3_flag(tp, HW_TSO_2) ||
17836 	     tg3_flag(tp, HW_TSO_3)) &&
17837 	    (features & NETIF_F_IP_CSUM))
17838 		features |= NETIF_F_TSO;
17839 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17840 		if (features & NETIF_F_IPV6_CSUM)
17841 			features |= NETIF_F_TSO6;
17842 		if (tg3_flag(tp, HW_TSO_3) ||
17843 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17844 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17845 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17846 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17847 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17848 			features |= NETIF_F_TSO_ECN;
17849 	}
17850 
17851 	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17852 			 NETIF_F_HW_VLAN_CTAG_RX;
17853 	dev->vlan_features |= features;
17854 
17855 	/*
17856 	 * Add loopback capability only for a subset of devices that support
17857 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17858 	 * loopback for the remaining devices.
17859 	 */
17860 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17861 	    !tg3_flag(tp, CPMU_PRESENT))
17862 		/* Add the loopback capability */
17863 		features |= NETIF_F_LOOPBACK;
17864 
17865 	dev->hw_features |= features;
17866 	dev->priv_flags |= IFF_UNICAST_FLT;
17867 
17868 	/* MTU range: 60 - 9000 or 1500, depending on hardware */
17869 	dev->min_mtu = TG3_MIN_MTU;
17870 	dev->max_mtu = TG3_MAX_MTU(tp);
17871 
17872 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17873 	    !tg3_flag(tp, TSO_CAPABLE) &&
17874 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17875 		tg3_flag_set(tp, MAX_RXPEND_64);
17876 		tp->rx_pending = 63;
17877 	}
17878 
17879 	err = tg3_get_device_address(tp);
17880 	if (err) {
17881 		dev_err(&pdev->dev,
17882 			"Could not obtain valid ethernet address, aborting\n");
17883 		goto err_out_apeunmap;
17884 	}
17885 
17886 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17887 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17888 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17889 	for (i = 0; i < tp->irq_max; i++) {
17890 		struct tg3_napi *tnapi = &tp->napi[i];
17891 
17892 		tnapi->tp = tp;
17893 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17894 
17895 		tnapi->int_mbox = intmbx;
17896 		if (i <= 4)
17897 			intmbx += 0x8;
17898 		else
17899 			intmbx += 0x4;
17900 
17901 		tnapi->consmbox = rcvmbx;
17902 		tnapi->prodmbox = sndmbx;
17903 
17904 		if (i)
17905 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17906 		else
17907 			tnapi->coal_now = HOSTCC_MODE_NOW;
17908 
17909 		if (!tg3_flag(tp, SUPPORT_MSIX))
17910 			break;
17911 
17912 		/*
17913 		 * If we support MSIX, we'll be using RSS.  If we're using
17914 		 * RSS, the first vector only handles link interrupts and the
17915 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17916 		 * mailbox values for the next iteration.  The values we setup
17917 		 * above are still useful for the single vectored mode.
17918 		 */
17919 		if (!i)
17920 			continue;
17921 
17922 		rcvmbx += 0x8;
17923 
17924 		if (sndmbx & 0x4)
17925 			sndmbx -= 0x4;
17926 		else
17927 			sndmbx += 0xc;
17928 	}
17929 
17930 	/*
17931 	 * Reset chip in case UNDI or EFI driver did not shutdown
17932 	 * DMA self test will enable WDMAC and we'll see (spurious)
17933 	 * pending DMA on the PCI bus at that point.
17934 	 */
17935 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17936 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17937 		tg3_full_lock(tp, 0);
17938 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17939 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17940 		tg3_full_unlock(tp);
17941 	}
17942 
17943 	err = tg3_test_dma(tp);
17944 	if (err) {
17945 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17946 		goto err_out_apeunmap;
17947 	}
17948 
17949 	tg3_init_coal(tp);
17950 
17951 	pci_set_drvdata(pdev, dev);
17952 
17953 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17954 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17955 	    tg3_asic_rev(tp) == ASIC_REV_5762)
17956 		tg3_flag_set(tp, PTP_CAPABLE);
17957 
17958 	tg3_timer_init(tp);
17959 
17960 	tg3_carrier_off(tp);
17961 
17962 	err = register_netdev(dev);
17963 	if (err) {
17964 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17965 		goto err_out_apeunmap;
17966 	}
17967 
17968 	if (tg3_flag(tp, PTP_CAPABLE)) {
17969 		tg3_ptp_init(tp);
17970 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17971 						   &tp->pdev->dev);
17972 		if (IS_ERR(tp->ptp_clock))
17973 			tp->ptp_clock = NULL;
17974 	}
17975 
17976 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17977 		    tp->board_part_number,
17978 		    tg3_chip_rev_id(tp),
17979 		    tg3_bus_string(tp, str),
17980 		    dev->dev_addr);
17981 
17982 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17983 		char *ethtype;
17984 
17985 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17986 			ethtype = "10/100Base-TX";
17987 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17988 			ethtype = "1000Base-SX";
17989 		else
17990 			ethtype = "10/100/1000Base-T";
17991 
17992 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17993 			    "(WireSpeed[%d], EEE[%d])\n",
17994 			    tg3_phy_string(tp), ethtype,
17995 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17996 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17997 	}
17998 
17999 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
18000 		    (dev->features & NETIF_F_RXCSUM) != 0,
18001 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
18002 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
18003 		    tg3_flag(tp, ENABLE_ASF) != 0,
18004 		    tg3_flag(tp, TSO_CAPABLE) != 0);
18005 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18006 		    tp->dma_rwctrl,
18007 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18008 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18009 
18010 	pci_save_state(pdev);
18011 
18012 	return 0;
18013 
18014 err_out_apeunmap:
18015 	if (tp->aperegs) {
18016 		iounmap(tp->aperegs);
18017 		tp->aperegs = NULL;
18018 	}
18019 
18020 err_out_iounmap:
18021 	if (tp->regs) {
18022 		iounmap(tp->regs);
18023 		tp->regs = NULL;
18024 	}
18025 
18026 err_out_free_dev:
18027 	free_netdev(dev);
18028 
18029 err_out_free_res:
18030 	pci_release_regions(pdev);
18031 
18032 err_out_disable_pdev:
18033 	if (pci_is_enabled(pdev))
18034 		pci_disable_device(pdev);
18035 	return err;
18036 }
18037 
18038 static void tg3_remove_one(struct pci_dev *pdev)
18039 {
18040 	struct net_device *dev = pci_get_drvdata(pdev);
18041 
18042 	if (dev) {
18043 		struct tg3 *tp = netdev_priv(dev);
18044 
18045 		tg3_ptp_fini(tp);
18046 
18047 		release_firmware(tp->fw);
18048 
18049 		tg3_reset_task_cancel(tp);
18050 
18051 		if (tg3_flag(tp, USE_PHYLIB)) {
18052 			tg3_phy_fini(tp);
18053 			tg3_mdio_fini(tp);
18054 		}
18055 
18056 		unregister_netdev(dev);
18057 		if (tp->aperegs) {
18058 			iounmap(tp->aperegs);
18059 			tp->aperegs = NULL;
18060 		}
18061 		if (tp->regs) {
18062 			iounmap(tp->regs);
18063 			tp->regs = NULL;
18064 		}
18065 		free_netdev(dev);
18066 		pci_release_regions(pdev);
18067 		pci_disable_device(pdev);
18068 	}
18069 }
18070 
18071 #ifdef CONFIG_PM_SLEEP
18072 static int tg3_suspend(struct device *device)
18073 {
18074 	struct pci_dev *pdev = to_pci_dev(device);
18075 	struct net_device *dev = pci_get_drvdata(pdev);
18076 	struct tg3 *tp = netdev_priv(dev);
18077 	int err = 0;
18078 
18079 	rtnl_lock();
18080 
18081 	if (!netif_running(dev))
18082 		goto unlock;
18083 
18084 	tg3_reset_task_cancel(tp);
18085 	tg3_phy_stop(tp);
18086 	tg3_netif_stop(tp);
18087 
18088 	tg3_timer_stop(tp);
18089 
18090 	tg3_full_lock(tp, 1);
18091 	tg3_disable_ints(tp);
18092 	tg3_full_unlock(tp);
18093 
18094 	netif_device_detach(dev);
18095 
18096 	tg3_full_lock(tp, 0);
18097 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18098 	tg3_flag_clear(tp, INIT_COMPLETE);
18099 	tg3_full_unlock(tp);
18100 
18101 	err = tg3_power_down_prepare(tp);
18102 	if (err) {
18103 		int err2;
18104 
18105 		tg3_full_lock(tp, 0);
18106 
18107 		tg3_flag_set(tp, INIT_COMPLETE);
18108 		err2 = tg3_restart_hw(tp, true);
18109 		if (err2)
18110 			goto out;
18111 
18112 		tg3_timer_start(tp);
18113 
18114 		netif_device_attach(dev);
18115 		tg3_netif_start(tp);
18116 
18117 out:
18118 		tg3_full_unlock(tp);
18119 
18120 		if (!err2)
18121 			tg3_phy_start(tp);
18122 	}
18123 
18124 unlock:
18125 	rtnl_unlock();
18126 	return err;
18127 }
18128 
18129 static int tg3_resume(struct device *device)
18130 {
18131 	struct pci_dev *pdev = to_pci_dev(device);
18132 	struct net_device *dev = pci_get_drvdata(pdev);
18133 	struct tg3 *tp = netdev_priv(dev);
18134 	int err = 0;
18135 
18136 	rtnl_lock();
18137 
18138 	if (!netif_running(dev))
18139 		goto unlock;
18140 
18141 	netif_device_attach(dev);
18142 
18143 	tg3_full_lock(tp, 0);
18144 
18145 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18146 
18147 	tg3_flag_set(tp, INIT_COMPLETE);
18148 	err = tg3_restart_hw(tp,
18149 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18150 	if (err)
18151 		goto out;
18152 
18153 	tg3_timer_start(tp);
18154 
18155 	tg3_netif_start(tp);
18156 
18157 out:
18158 	tg3_full_unlock(tp);
18159 
18160 	if (!err)
18161 		tg3_phy_start(tp);
18162 
18163 unlock:
18164 	rtnl_unlock();
18165 	return err;
18166 }
18167 #endif /* CONFIG_PM_SLEEP */
18168 
18169 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18170 
18171 static void tg3_shutdown(struct pci_dev *pdev)
18172 {
18173 	struct net_device *dev = pci_get_drvdata(pdev);
18174 	struct tg3 *tp = netdev_priv(dev);
18175 
18176 	rtnl_lock();
18177 	netif_device_detach(dev);
18178 
18179 	if (netif_running(dev))
18180 		dev_close(dev);
18181 
18182 	if (system_state == SYSTEM_POWER_OFF)
18183 		tg3_power_down(tp);
18184 
18185 	rtnl_unlock();
18186 }
18187 
18188 /**
18189  * tg3_io_error_detected - called when PCI error is detected
18190  * @pdev: Pointer to PCI device
18191  * @state: The current pci connection state
18192  *
18193  * This function is called after a PCI bus error affecting
18194  * this device has been detected.
18195  */
18196 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18197 					      pci_channel_state_t state)
18198 {
18199 	struct net_device *netdev = pci_get_drvdata(pdev);
18200 	struct tg3 *tp = netdev_priv(netdev);
18201 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18202 
18203 	netdev_info(netdev, "PCI I/O error detected\n");
18204 
18205 	rtnl_lock();
18206 
18207 	/* We probably don't have netdev yet */
18208 	if (!netdev || !netif_running(netdev))
18209 		goto done;
18210 
18211 	/* We needn't recover from permanent error */
18212 	if (state == pci_channel_io_frozen)
18213 		tp->pcierr_recovery = true;
18214 
18215 	tg3_phy_stop(tp);
18216 
18217 	tg3_netif_stop(tp);
18218 
18219 	tg3_timer_stop(tp);
18220 
18221 	/* Want to make sure that the reset task doesn't run */
18222 	tg3_reset_task_cancel(tp);
18223 
18224 	netif_device_detach(netdev);
18225 
18226 	/* Clean up software state, even if MMIO is blocked */
18227 	tg3_full_lock(tp, 0);
18228 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18229 	tg3_full_unlock(tp);
18230 
18231 done:
18232 	if (state == pci_channel_io_perm_failure) {
18233 		if (netdev) {
18234 			tg3_napi_enable(tp);
18235 			dev_close(netdev);
18236 		}
18237 		err = PCI_ERS_RESULT_DISCONNECT;
18238 	} else {
18239 		pci_disable_device(pdev);
18240 	}
18241 
18242 	rtnl_unlock();
18243 
18244 	return err;
18245 }
18246 
18247 /**
18248  * tg3_io_slot_reset - called after the pci bus has been reset.
18249  * @pdev: Pointer to PCI device
18250  *
18251  * Restart the card from scratch, as if from a cold-boot.
18252  * At this point, the card has exprienced a hard reset,
18253  * followed by fixups by BIOS, and has its config space
18254  * set up identically to what it was at cold boot.
18255  */
18256 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18257 {
18258 	struct net_device *netdev = pci_get_drvdata(pdev);
18259 	struct tg3 *tp = netdev_priv(netdev);
18260 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18261 	int err;
18262 
18263 	rtnl_lock();
18264 
18265 	if (pci_enable_device(pdev)) {
18266 		dev_err(&pdev->dev,
18267 			"Cannot re-enable PCI device after reset.\n");
18268 		goto done;
18269 	}
18270 
18271 	pci_set_master(pdev);
18272 	pci_restore_state(pdev);
18273 	pci_save_state(pdev);
18274 
18275 	if (!netdev || !netif_running(netdev)) {
18276 		rc = PCI_ERS_RESULT_RECOVERED;
18277 		goto done;
18278 	}
18279 
18280 	err = tg3_power_up(tp);
18281 	if (err)
18282 		goto done;
18283 
18284 	rc = PCI_ERS_RESULT_RECOVERED;
18285 
18286 done:
18287 	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18288 		tg3_napi_enable(tp);
18289 		dev_close(netdev);
18290 	}
18291 	rtnl_unlock();
18292 
18293 	return rc;
18294 }
18295 
18296 /**
18297  * tg3_io_resume - called when traffic can start flowing again.
18298  * @pdev: Pointer to PCI device
18299  *
18300  * This callback is called when the error recovery driver tells
18301  * us that its OK to resume normal operation.
18302  */
18303 static void tg3_io_resume(struct pci_dev *pdev)
18304 {
18305 	struct net_device *netdev = pci_get_drvdata(pdev);
18306 	struct tg3 *tp = netdev_priv(netdev);
18307 	int err;
18308 
18309 	rtnl_lock();
18310 
18311 	if (!netdev || !netif_running(netdev))
18312 		goto done;
18313 
18314 	tg3_full_lock(tp, 0);
18315 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18316 	tg3_flag_set(tp, INIT_COMPLETE);
18317 	err = tg3_restart_hw(tp, true);
18318 	if (err) {
18319 		tg3_full_unlock(tp);
18320 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
18321 		goto done;
18322 	}
18323 
18324 	netif_device_attach(netdev);
18325 
18326 	tg3_timer_start(tp);
18327 
18328 	tg3_netif_start(tp);
18329 
18330 	tg3_full_unlock(tp);
18331 
18332 	tg3_phy_start(tp);
18333 
18334 done:
18335 	tp->pcierr_recovery = false;
18336 	rtnl_unlock();
18337 }
18338 
18339 static const struct pci_error_handlers tg3_err_handler = {
18340 	.error_detected	= tg3_io_error_detected,
18341 	.slot_reset	= tg3_io_slot_reset,
18342 	.resume		= tg3_io_resume
18343 };
18344 
18345 static struct pci_driver tg3_driver = {
18346 	.name		= DRV_MODULE_NAME,
18347 	.id_table	= tg3_pci_tbl,
18348 	.probe		= tg3_init_one,
18349 	.remove		= tg3_remove_one,
18350 	.err_handler	= &tg3_err_handler,
18351 	.driver.pm	= &tg3_pm_ops,
18352 	.shutdown	= tg3_shutdown,
18353 };
18354 
18355 module_pci_driver(tg3_driver);
18356