1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *	Derived from proprietary unpublished source code,
14  *	Copyright (C) 2000-2016 Broadcom Corporation.
15  *	Copyright (C) 2016-2017 Broadcom Ltd.
16  *	Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *	refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *	Permission is hereby granted for the distribution of this firmware
20  *	data in hexadecimal or equivalent format, provided this copyright
21  *	notice is accompanying it.
22  */
23 
24 
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58 
59 #include <net/checksum.h>
60 #include <net/ip.h>
61 
62 #include <linux/io.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
65 
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
68 
69 #define BAR_0	0
70 #define BAR_2	2
71 
72 #include "tg3.h"
73 
74 /* Functions & macros to verify TG3_FLAGS types */
75 
76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78 	return test_bit(flag, bits);
79 }
80 
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83 	set_bit(flag, bits);
84 }
85 
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
87 {
88 	clear_bit(flag, bits);
89 }
90 
91 #define tg3_flag(tp, flag)				\
92 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag)				\
94 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag)			\
96 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
97 
98 #define DRV_MODULE_NAME		"tg3"
99 /* DO NOT UPDATE TG3_*_NUM defines */
100 #define TG3_MAJ_NUM			3
101 #define TG3_MIN_NUM			137
102 
103 #define RESET_KIND_SHUTDOWN	0
104 #define RESET_KIND_INIT		1
105 #define RESET_KIND_SUSPEND	2
106 
107 #define TG3_DEF_RX_MODE		0
108 #define TG3_DEF_TX_MODE		0
109 #define TG3_DEF_MSG_ENABLE	  \
110 	(NETIF_MSG_DRV		| \
111 	 NETIF_MSG_PROBE	| \
112 	 NETIF_MSG_LINK		| \
113 	 NETIF_MSG_TIMER	| \
114 	 NETIF_MSG_IFDOWN	| \
115 	 NETIF_MSG_IFUP		| \
116 	 NETIF_MSG_RX_ERR	| \
117 	 NETIF_MSG_TX_ERR)
118 
119 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
120 
121 /* length of time before we decide the hardware is borked,
122  * and dev->tx_timeout() should be called to fix the problem
123  */
124 
125 #define TG3_TX_TIMEOUT			(5 * HZ)
126 
127 /* hardware minimum and maximum for a single frame's data payload */
128 #define TG3_MIN_MTU			ETH_ZLEN
129 #define TG3_MAX_MTU(tp)	\
130 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 
132 /* These numbers seem to be hard coded in the NIC firmware somehow.
133  * You can't change the ring sizes, but you can change where you place
134  * them in the NIC onboard memory.
135  */
136 #define TG3_RX_STD_RING_SIZE(tp) \
137 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
138 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
139 #define TG3_DEF_RX_RING_PENDING		200
140 #define TG3_RX_JMB_RING_SIZE(tp) \
141 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
142 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
143 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
144 
145 /* Do not place this n-ring entries value into the tp struct itself,
146  * we really want to expose these constants to GCC so that modulo et
147  * al.  operations are done with shifts and masks instead of with
148  * hw multiply/modulo instructions.  Another solution would be to
149  * replace things like '% foo' with '& (foo - 1)'.
150  */
151 
152 #define TG3_TX_RING_SIZE		512
153 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
154 
155 #define TG3_RX_STD_RING_BYTES(tp) \
156 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
157 #define TG3_RX_JMB_RING_BYTES(tp) \
158 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
159 #define TG3_RX_RCB_RING_BYTES(tp) \
160 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
161 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
162 				 TG3_TX_RING_SIZE)
163 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 
165 #define TG3_DMA_BYTE_ENAB		64
166 
167 #define TG3_RX_STD_DMA_SZ		1536
168 #define TG3_RX_JMB_DMA_SZ		9046
169 
170 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
171 
172 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
173 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 
175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
176 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 
178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
179 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 
181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
182  * that are at least dword aligned when used in PCIX mode.  The driver
183  * works around this bug by double copying the packet.  This workaround
184  * is built into the normal double copy length check for efficiency.
185  *
186  * However, the double copy is only necessary on those architectures
187  * where unaligned memory accesses are inefficient.  For those architectures
188  * where unaligned memory accesses incur little penalty, we can reintegrate
189  * the 5701 in the normal rx path.  Doing so saves a device structure
190  * dereference by hardcoding the double copy threshold in place.
191  */
192 #define TG3_RX_COPY_THRESHOLD		256
193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
194 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
195 #else
196 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
197 #endif
198 
199 #if (NET_IP_ALIGN != 0)
200 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
201 #else
202 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
203 #endif
204 
205 /* minimum number of free TX descriptors required to wake up TX process */
206 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
207 #define TG3_TX_BD_DMA_MAX_2K		2048
208 #define TG3_TX_BD_DMA_MAX_4K		4096
209 
210 #define TG3_RAW_IP_ALIGN 2
211 
212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
214 
215 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
216 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
217 
218 #define FIRMWARE_TG3		"tigon/tg3.bin"
219 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
220 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
221 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
222 
223 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
224 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
225 MODULE_LICENSE("GPL");
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229 
230 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233 
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
236 
237 static const struct pci_device_id tg3_pci_tbl[] = {
238 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 			TG3_DRV_DATA_FLAG_5705_10_100},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 			TG3_DRV_DATA_FLAG_5705_10_100},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 			TG3_DRV_DATA_FLAG_5705_10_100},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 			PCI_VENDOR_ID_LENOVO,
288 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
347 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
348 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
349 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
350 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
351 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
352 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
353 	{}
354 };
355 
356 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
357 
358 static const struct {
359 	const char string[ETH_GSTRING_LEN];
360 } ethtool_stats_keys[] = {
361 	{ "rx_octets" },
362 	{ "rx_fragments" },
363 	{ "rx_ucast_packets" },
364 	{ "rx_mcast_packets" },
365 	{ "rx_bcast_packets" },
366 	{ "rx_fcs_errors" },
367 	{ "rx_align_errors" },
368 	{ "rx_xon_pause_rcvd" },
369 	{ "rx_xoff_pause_rcvd" },
370 	{ "rx_mac_ctrl_rcvd" },
371 	{ "rx_xoff_entered" },
372 	{ "rx_frame_too_long_errors" },
373 	{ "rx_jabbers" },
374 	{ "rx_undersize_packets" },
375 	{ "rx_in_length_errors" },
376 	{ "rx_out_length_errors" },
377 	{ "rx_64_or_less_octet_packets" },
378 	{ "rx_65_to_127_octet_packets" },
379 	{ "rx_128_to_255_octet_packets" },
380 	{ "rx_256_to_511_octet_packets" },
381 	{ "rx_512_to_1023_octet_packets" },
382 	{ "rx_1024_to_1522_octet_packets" },
383 	{ "rx_1523_to_2047_octet_packets" },
384 	{ "rx_2048_to_4095_octet_packets" },
385 	{ "rx_4096_to_8191_octet_packets" },
386 	{ "rx_8192_to_9022_octet_packets" },
387 
388 	{ "tx_octets" },
389 	{ "tx_collisions" },
390 
391 	{ "tx_xon_sent" },
392 	{ "tx_xoff_sent" },
393 	{ "tx_flow_control" },
394 	{ "tx_mac_errors" },
395 	{ "tx_single_collisions" },
396 	{ "tx_mult_collisions" },
397 	{ "tx_deferred" },
398 	{ "tx_excessive_collisions" },
399 	{ "tx_late_collisions" },
400 	{ "tx_collide_2times" },
401 	{ "tx_collide_3times" },
402 	{ "tx_collide_4times" },
403 	{ "tx_collide_5times" },
404 	{ "tx_collide_6times" },
405 	{ "tx_collide_7times" },
406 	{ "tx_collide_8times" },
407 	{ "tx_collide_9times" },
408 	{ "tx_collide_10times" },
409 	{ "tx_collide_11times" },
410 	{ "tx_collide_12times" },
411 	{ "tx_collide_13times" },
412 	{ "tx_collide_14times" },
413 	{ "tx_collide_15times" },
414 	{ "tx_ucast_packets" },
415 	{ "tx_mcast_packets" },
416 	{ "tx_bcast_packets" },
417 	{ "tx_carrier_sense_errors" },
418 	{ "tx_discards" },
419 	{ "tx_errors" },
420 
421 	{ "dma_writeq_full" },
422 	{ "dma_write_prioq_full" },
423 	{ "rxbds_empty" },
424 	{ "rx_discards" },
425 	{ "rx_errors" },
426 	{ "rx_threshold_hit" },
427 
428 	{ "dma_readq_full" },
429 	{ "dma_read_prioq_full" },
430 	{ "tx_comp_queue_full" },
431 
432 	{ "ring_set_send_prod_index" },
433 	{ "ring_status_update" },
434 	{ "nic_irqs" },
435 	{ "nic_avoided_irqs" },
436 	{ "nic_tx_threshold_hit" },
437 
438 	{ "mbuf_lwm_thresh_hit" },
439 };
440 
441 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
442 #define TG3_NVRAM_TEST		0
443 #define TG3_LINK_TEST		1
444 #define TG3_REGISTER_TEST	2
445 #define TG3_MEMORY_TEST		3
446 #define TG3_MAC_LOOPB_TEST	4
447 #define TG3_PHY_LOOPB_TEST	5
448 #define TG3_EXT_LOOPB_TEST	6
449 #define TG3_INTERRUPT_TEST	7
450 
451 
452 static const struct {
453 	const char string[ETH_GSTRING_LEN];
454 } ethtool_test_keys[] = {
455 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
456 	[TG3_LINK_TEST]		= { "link test         (online) " },
457 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
458 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
459 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
460 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
461 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
462 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
463 };
464 
465 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
466 
467 
468 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
469 {
470 	writel(val, tp->regs + off);
471 }
472 
473 static u32 tg3_read32(struct tg3 *tp, u32 off)
474 {
475 	return readl(tp->regs + off);
476 }
477 
478 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
479 {
480 	writel(val, tp->aperegs + off);
481 }
482 
483 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
484 {
485 	return readl(tp->aperegs + off);
486 }
487 
488 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
489 {
490 	unsigned long flags;
491 
492 	spin_lock_irqsave(&tp->indirect_lock, flags);
493 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
494 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
495 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
496 }
497 
498 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
499 {
500 	writel(val, tp->regs + off);
501 	readl(tp->regs + off);
502 }
503 
504 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
505 {
506 	unsigned long flags;
507 	u32 val;
508 
509 	spin_lock_irqsave(&tp->indirect_lock, flags);
510 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
511 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
512 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
513 	return val;
514 }
515 
516 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
517 {
518 	unsigned long flags;
519 
520 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
521 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
522 				       TG3_64BIT_REG_LOW, val);
523 		return;
524 	}
525 	if (off == TG3_RX_STD_PROD_IDX_REG) {
526 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
527 				       TG3_64BIT_REG_LOW, val);
528 		return;
529 	}
530 
531 	spin_lock_irqsave(&tp->indirect_lock, flags);
532 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
533 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
534 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
535 
536 	/* In indirect mode when disabling interrupts, we also need
537 	 * to clear the interrupt bit in the GRC local ctrl register.
538 	 */
539 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
540 	    (val == 0x1)) {
541 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
542 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
543 	}
544 }
545 
546 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
547 {
548 	unsigned long flags;
549 	u32 val;
550 
551 	spin_lock_irqsave(&tp->indirect_lock, flags);
552 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
553 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
554 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
555 	return val;
556 }
557 
558 /* usec_wait specifies the wait time in usec when writing to certain registers
559  * where it is unsafe to read back the register without some delay.
560  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
561  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
562  */
563 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
564 {
565 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
566 		/* Non-posted methods */
567 		tp->write32(tp, off, val);
568 	else {
569 		/* Posted method */
570 		tg3_write32(tp, off, val);
571 		if (usec_wait)
572 			udelay(usec_wait);
573 		tp->read32(tp, off);
574 	}
575 	/* Wait again after the read for the posted method to guarantee that
576 	 * the wait time is met.
577 	 */
578 	if (usec_wait)
579 		udelay(usec_wait);
580 }
581 
582 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
583 {
584 	tp->write32_mbox(tp, off, val);
585 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
586 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
587 	     !tg3_flag(tp, ICH_WORKAROUND)))
588 		tp->read32_mbox(tp, off);
589 }
590 
591 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
592 {
593 	void __iomem *mbox = tp->regs + off;
594 	writel(val, mbox);
595 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
596 		writel(val, mbox);
597 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
598 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
599 		readl(mbox);
600 }
601 
602 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
603 {
604 	return readl(tp->regs + off + GRCMBOX_BASE);
605 }
606 
607 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
608 {
609 	writel(val, tp->regs + off + GRCMBOX_BASE);
610 }
611 
612 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
613 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
614 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
615 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
616 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
617 
618 #define tw32(reg, val)			tp->write32(tp, reg, val)
619 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
620 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
621 #define tr32(reg)			tp->read32(tp, reg)
622 
623 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
624 {
625 	unsigned long flags;
626 
627 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
628 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
629 		return;
630 
631 	spin_lock_irqsave(&tp->indirect_lock, flags);
632 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
633 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
634 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
635 
636 		/* Always leave this as zero. */
637 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
638 	} else {
639 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
640 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
641 
642 		/* Always leave this as zero. */
643 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
644 	}
645 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
646 }
647 
648 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
649 {
650 	unsigned long flags;
651 
652 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
653 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
654 		*val = 0;
655 		return;
656 	}
657 
658 	spin_lock_irqsave(&tp->indirect_lock, flags);
659 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
660 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
661 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
662 
663 		/* Always leave this as zero. */
664 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
665 	} else {
666 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
667 		*val = tr32(TG3PCI_MEM_WIN_DATA);
668 
669 		/* Always leave this as zero. */
670 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
671 	}
672 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
673 }
674 
675 static void tg3_ape_lock_init(struct tg3 *tp)
676 {
677 	int i;
678 	u32 regbase, bit;
679 
680 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
681 		regbase = TG3_APE_LOCK_GRANT;
682 	else
683 		regbase = TG3_APE_PER_LOCK_GRANT;
684 
685 	/* Make sure the driver hasn't any stale locks. */
686 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
687 		switch (i) {
688 		case TG3_APE_LOCK_PHY0:
689 		case TG3_APE_LOCK_PHY1:
690 		case TG3_APE_LOCK_PHY2:
691 		case TG3_APE_LOCK_PHY3:
692 			bit = APE_LOCK_GRANT_DRIVER;
693 			break;
694 		default:
695 			if (!tp->pci_fn)
696 				bit = APE_LOCK_GRANT_DRIVER;
697 			else
698 				bit = 1 << tp->pci_fn;
699 		}
700 		tg3_ape_write32(tp, regbase + 4 * i, bit);
701 	}
702 
703 }
704 
705 static int tg3_ape_lock(struct tg3 *tp, int locknum)
706 {
707 	int i, off;
708 	int ret = 0;
709 	u32 status, req, gnt, bit;
710 
711 	if (!tg3_flag(tp, ENABLE_APE))
712 		return 0;
713 
714 	switch (locknum) {
715 	case TG3_APE_LOCK_GPIO:
716 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
717 			return 0;
718 		fallthrough;
719 	case TG3_APE_LOCK_GRC:
720 	case TG3_APE_LOCK_MEM:
721 		if (!tp->pci_fn)
722 			bit = APE_LOCK_REQ_DRIVER;
723 		else
724 			bit = 1 << tp->pci_fn;
725 		break;
726 	case TG3_APE_LOCK_PHY0:
727 	case TG3_APE_LOCK_PHY1:
728 	case TG3_APE_LOCK_PHY2:
729 	case TG3_APE_LOCK_PHY3:
730 		bit = APE_LOCK_REQ_DRIVER;
731 		break;
732 	default:
733 		return -EINVAL;
734 	}
735 
736 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
737 		req = TG3_APE_LOCK_REQ;
738 		gnt = TG3_APE_LOCK_GRANT;
739 	} else {
740 		req = TG3_APE_PER_LOCK_REQ;
741 		gnt = TG3_APE_PER_LOCK_GRANT;
742 	}
743 
744 	off = 4 * locknum;
745 
746 	tg3_ape_write32(tp, req + off, bit);
747 
748 	/* Wait for up to 1 millisecond to acquire lock. */
749 	for (i = 0; i < 100; i++) {
750 		status = tg3_ape_read32(tp, gnt + off);
751 		if (status == bit)
752 			break;
753 		if (pci_channel_offline(tp->pdev))
754 			break;
755 
756 		udelay(10);
757 	}
758 
759 	if (status != bit) {
760 		/* Revoke the lock request. */
761 		tg3_ape_write32(tp, gnt + off, bit);
762 		ret = -EBUSY;
763 	}
764 
765 	return ret;
766 }
767 
768 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
769 {
770 	u32 gnt, bit;
771 
772 	if (!tg3_flag(tp, ENABLE_APE))
773 		return;
774 
775 	switch (locknum) {
776 	case TG3_APE_LOCK_GPIO:
777 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
778 			return;
779 		fallthrough;
780 	case TG3_APE_LOCK_GRC:
781 	case TG3_APE_LOCK_MEM:
782 		if (!tp->pci_fn)
783 			bit = APE_LOCK_GRANT_DRIVER;
784 		else
785 			bit = 1 << tp->pci_fn;
786 		break;
787 	case TG3_APE_LOCK_PHY0:
788 	case TG3_APE_LOCK_PHY1:
789 	case TG3_APE_LOCK_PHY2:
790 	case TG3_APE_LOCK_PHY3:
791 		bit = APE_LOCK_GRANT_DRIVER;
792 		break;
793 	default:
794 		return;
795 	}
796 
797 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
798 		gnt = TG3_APE_LOCK_GRANT;
799 	else
800 		gnt = TG3_APE_PER_LOCK_GRANT;
801 
802 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
803 }
804 
805 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
806 {
807 	u32 apedata;
808 
809 	while (timeout_us) {
810 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
811 			return -EBUSY;
812 
813 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
814 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
815 			break;
816 
817 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
818 
819 		udelay(10);
820 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
821 	}
822 
823 	return timeout_us ? 0 : -EBUSY;
824 }
825 
826 #ifdef CONFIG_TIGON3_HWMON
827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
828 {
829 	u32 i, apedata;
830 
831 	for (i = 0; i < timeout_us / 10; i++) {
832 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
833 
834 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
835 			break;
836 
837 		udelay(10);
838 	}
839 
840 	return i == timeout_us / 10;
841 }
842 
843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
844 				   u32 len)
845 {
846 	int err;
847 	u32 i, bufoff, msgoff, maxlen, apedata;
848 
849 	if (!tg3_flag(tp, APE_HAS_NCSI))
850 		return 0;
851 
852 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
853 	if (apedata != APE_SEG_SIG_MAGIC)
854 		return -ENODEV;
855 
856 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
857 	if (!(apedata & APE_FW_STATUS_READY))
858 		return -EAGAIN;
859 
860 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
861 		 TG3_APE_SHMEM_BASE;
862 	msgoff = bufoff + 2 * sizeof(u32);
863 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
864 
865 	while (len) {
866 		u32 length;
867 
868 		/* Cap xfer sizes to scratchpad limits. */
869 		length = (len > maxlen) ? maxlen : len;
870 		len -= length;
871 
872 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
873 		if (!(apedata & APE_FW_STATUS_READY))
874 			return -EAGAIN;
875 
876 		/* Wait for up to 1 msec for APE to service previous event. */
877 		err = tg3_ape_event_lock(tp, 1000);
878 		if (err)
879 			return err;
880 
881 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
882 			  APE_EVENT_STATUS_SCRTCHPD_READ |
883 			  APE_EVENT_STATUS_EVENT_PENDING;
884 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
885 
886 		tg3_ape_write32(tp, bufoff, base_off);
887 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
888 
889 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
890 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
891 
892 		base_off += length;
893 
894 		if (tg3_ape_wait_for_event(tp, 30000))
895 			return -EAGAIN;
896 
897 		for (i = 0; length; i += 4, length -= 4) {
898 			u32 val = tg3_ape_read32(tp, msgoff + i);
899 			memcpy(data, &val, sizeof(u32));
900 			data++;
901 		}
902 	}
903 
904 	return 0;
905 }
906 #endif
907 
908 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
909 {
910 	int err;
911 	u32 apedata;
912 
913 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
914 	if (apedata != APE_SEG_SIG_MAGIC)
915 		return -EAGAIN;
916 
917 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
918 	if (!(apedata & APE_FW_STATUS_READY))
919 		return -EAGAIN;
920 
921 	/* Wait for up to 20 millisecond for APE to service previous event. */
922 	err = tg3_ape_event_lock(tp, 20000);
923 	if (err)
924 		return err;
925 
926 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
927 			event | APE_EVENT_STATUS_EVENT_PENDING);
928 
929 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
930 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
931 
932 	return 0;
933 }
934 
935 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
936 {
937 	u32 event;
938 	u32 apedata;
939 
940 	if (!tg3_flag(tp, ENABLE_APE))
941 		return;
942 
943 	switch (kind) {
944 	case RESET_KIND_INIT:
945 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
946 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
947 				APE_HOST_SEG_SIG_MAGIC);
948 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
949 				APE_HOST_SEG_LEN_MAGIC);
950 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
951 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
952 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
953 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
954 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
955 				APE_HOST_BEHAV_NO_PHYLOCK);
956 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
957 				    TG3_APE_HOST_DRVR_STATE_START);
958 
959 		event = APE_EVENT_STATUS_STATE_START;
960 		break;
961 	case RESET_KIND_SHUTDOWN:
962 		if (device_may_wakeup(&tp->pdev->dev) &&
963 		    tg3_flag(tp, WOL_ENABLE)) {
964 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
965 					    TG3_APE_HOST_WOL_SPEED_AUTO);
966 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
967 		} else
968 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
969 
970 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
971 
972 		event = APE_EVENT_STATUS_STATE_UNLOAD;
973 		break;
974 	default:
975 		return;
976 	}
977 
978 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
979 
980 	tg3_ape_send_event(tp, event);
981 }
982 
983 static void tg3_send_ape_heartbeat(struct tg3 *tp,
984 				   unsigned long interval)
985 {
986 	/* Check if hb interval has exceeded */
987 	if (!tg3_flag(tp, ENABLE_APE) ||
988 	    time_before(jiffies, tp->ape_hb_jiffies + interval))
989 		return;
990 
991 	tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
992 	tp->ape_hb_jiffies = jiffies;
993 }
994 
995 static void tg3_disable_ints(struct tg3 *tp)
996 {
997 	int i;
998 
999 	tw32(TG3PCI_MISC_HOST_CTRL,
1000 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1001 	for (i = 0; i < tp->irq_max; i++)
1002 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1003 }
1004 
1005 static void tg3_enable_ints(struct tg3 *tp)
1006 {
1007 	int i;
1008 
1009 	tp->irq_sync = 0;
1010 	wmb();
1011 
1012 	tw32(TG3PCI_MISC_HOST_CTRL,
1013 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1014 
1015 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1016 	for (i = 0; i < tp->irq_cnt; i++) {
1017 		struct tg3_napi *tnapi = &tp->napi[i];
1018 
1019 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1020 		if (tg3_flag(tp, 1SHOT_MSI))
1021 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022 
1023 		tp->coal_now |= tnapi->coal_now;
1024 	}
1025 
1026 	/* Force an initial interrupt */
1027 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1028 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1029 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1030 	else
1031 		tw32(HOSTCC_MODE, tp->coal_now);
1032 
1033 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1034 }
1035 
1036 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1037 {
1038 	struct tg3 *tp = tnapi->tp;
1039 	struct tg3_hw_status *sblk = tnapi->hw_status;
1040 	unsigned int work_exists = 0;
1041 
1042 	/* check for phy events */
1043 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1044 		if (sblk->status & SD_STATUS_LINK_CHG)
1045 			work_exists = 1;
1046 	}
1047 
1048 	/* check for TX work to do */
1049 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1050 		work_exists = 1;
1051 
1052 	/* check for RX work to do */
1053 	if (tnapi->rx_rcb_prod_idx &&
1054 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1055 		work_exists = 1;
1056 
1057 	return work_exists;
1058 }
1059 
1060 /* tg3_int_reenable
1061  *  similar to tg3_enable_ints, but it accurately determines whether there
1062  *  is new work pending and can return without flushing the PIO write
1063  *  which reenables interrupts
1064  */
1065 static void tg3_int_reenable(struct tg3_napi *tnapi)
1066 {
1067 	struct tg3 *tp = tnapi->tp;
1068 
1069 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1070 
1071 	/* When doing tagged status, this work check is unnecessary.
1072 	 * The last_tag we write above tells the chip which piece of
1073 	 * work we've completed.
1074 	 */
1075 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1076 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1077 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1078 }
1079 
1080 static void tg3_switch_clocks(struct tg3 *tp)
1081 {
1082 	u32 clock_ctrl;
1083 	u32 orig_clock_ctrl;
1084 
1085 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1086 		return;
1087 
1088 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1089 
1090 	orig_clock_ctrl = clock_ctrl;
1091 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1092 		       CLOCK_CTRL_CLKRUN_OENABLE |
1093 		       0x1f);
1094 	tp->pci_clock_ctrl = clock_ctrl;
1095 
1096 	if (tg3_flag(tp, 5705_PLUS)) {
1097 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1098 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1099 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1100 		}
1101 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1102 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1103 			    clock_ctrl |
1104 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1105 			    40);
1106 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1107 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1108 			    40);
1109 	}
1110 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1111 }
1112 
1113 #define PHY_BUSY_LOOPS	5000
1114 
1115 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1116 			 u32 *val)
1117 {
1118 	u32 frame_val;
1119 	unsigned int loops;
1120 	int ret;
1121 
1122 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1123 		tw32_f(MAC_MI_MODE,
1124 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1125 		udelay(80);
1126 	}
1127 
1128 	tg3_ape_lock(tp, tp->phy_ape_lock);
1129 
1130 	*val = 0x0;
1131 
1132 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1133 		      MI_COM_PHY_ADDR_MASK);
1134 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1135 		      MI_COM_REG_ADDR_MASK);
1136 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1137 
1138 	tw32_f(MAC_MI_COM, frame_val);
1139 
1140 	loops = PHY_BUSY_LOOPS;
1141 	while (loops != 0) {
1142 		udelay(10);
1143 		frame_val = tr32(MAC_MI_COM);
1144 
1145 		if ((frame_val & MI_COM_BUSY) == 0) {
1146 			udelay(5);
1147 			frame_val = tr32(MAC_MI_COM);
1148 			break;
1149 		}
1150 		loops -= 1;
1151 	}
1152 
1153 	ret = -EBUSY;
1154 	if (loops != 0) {
1155 		*val = frame_val & MI_COM_DATA_MASK;
1156 		ret = 0;
1157 	}
1158 
1159 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1160 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1161 		udelay(80);
1162 	}
1163 
1164 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1165 
1166 	return ret;
1167 }
1168 
1169 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1170 {
1171 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1172 }
1173 
1174 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1175 			  u32 val)
1176 {
1177 	u32 frame_val;
1178 	unsigned int loops;
1179 	int ret;
1180 
1181 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1182 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1183 		return 0;
1184 
1185 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1186 		tw32_f(MAC_MI_MODE,
1187 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1188 		udelay(80);
1189 	}
1190 
1191 	tg3_ape_lock(tp, tp->phy_ape_lock);
1192 
1193 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1194 		      MI_COM_PHY_ADDR_MASK);
1195 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1196 		      MI_COM_REG_ADDR_MASK);
1197 	frame_val |= (val & MI_COM_DATA_MASK);
1198 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1199 
1200 	tw32_f(MAC_MI_COM, frame_val);
1201 
1202 	loops = PHY_BUSY_LOOPS;
1203 	while (loops != 0) {
1204 		udelay(10);
1205 		frame_val = tr32(MAC_MI_COM);
1206 		if ((frame_val & MI_COM_BUSY) == 0) {
1207 			udelay(5);
1208 			frame_val = tr32(MAC_MI_COM);
1209 			break;
1210 		}
1211 		loops -= 1;
1212 	}
1213 
1214 	ret = -EBUSY;
1215 	if (loops != 0)
1216 		ret = 0;
1217 
1218 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1219 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1220 		udelay(80);
1221 	}
1222 
1223 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1224 
1225 	return ret;
1226 }
1227 
1228 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1229 {
1230 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1231 }
1232 
1233 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1234 {
1235 	int err;
1236 
1237 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1238 	if (err)
1239 		goto done;
1240 
1241 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1242 	if (err)
1243 		goto done;
1244 
1245 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1246 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1247 	if (err)
1248 		goto done;
1249 
1250 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1251 
1252 done:
1253 	return err;
1254 }
1255 
1256 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1257 {
1258 	int err;
1259 
1260 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1261 	if (err)
1262 		goto done;
1263 
1264 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1265 	if (err)
1266 		goto done;
1267 
1268 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1269 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1270 	if (err)
1271 		goto done;
1272 
1273 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1274 
1275 done:
1276 	return err;
1277 }
1278 
1279 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1280 {
1281 	int err;
1282 
1283 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1284 	if (!err)
1285 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1286 
1287 	return err;
1288 }
1289 
1290 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1291 {
1292 	int err;
1293 
1294 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1295 	if (!err)
1296 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1297 
1298 	return err;
1299 }
1300 
1301 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1302 {
1303 	int err;
1304 
1305 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1306 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1307 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1308 	if (!err)
1309 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1310 
1311 	return err;
1312 }
1313 
1314 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1315 {
1316 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1317 		set |= MII_TG3_AUXCTL_MISC_WREN;
1318 
1319 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1320 }
1321 
1322 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1323 {
1324 	u32 val;
1325 	int err;
1326 
1327 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1328 
1329 	if (err)
1330 		return err;
1331 
1332 	if (enable)
1333 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1334 	else
1335 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1336 
1337 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1338 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1339 
1340 	return err;
1341 }
1342 
1343 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1344 {
1345 	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1346 			    reg | val | MII_TG3_MISC_SHDW_WREN);
1347 }
1348 
1349 static int tg3_bmcr_reset(struct tg3 *tp)
1350 {
1351 	u32 phy_control;
1352 	int limit, err;
1353 
1354 	/* OK, reset it, and poll the BMCR_RESET bit until it
1355 	 * clears or we time out.
1356 	 */
1357 	phy_control = BMCR_RESET;
1358 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1359 	if (err != 0)
1360 		return -EBUSY;
1361 
1362 	limit = 5000;
1363 	while (limit--) {
1364 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1365 		if (err != 0)
1366 			return -EBUSY;
1367 
1368 		if ((phy_control & BMCR_RESET) == 0) {
1369 			udelay(40);
1370 			break;
1371 		}
1372 		udelay(10);
1373 	}
1374 	if (limit < 0)
1375 		return -EBUSY;
1376 
1377 	return 0;
1378 }
1379 
1380 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1381 {
1382 	struct tg3 *tp = bp->priv;
1383 	u32 val;
1384 
1385 	spin_lock_bh(&tp->lock);
1386 
1387 	if (__tg3_readphy(tp, mii_id, reg, &val))
1388 		val = -EIO;
1389 
1390 	spin_unlock_bh(&tp->lock);
1391 
1392 	return val;
1393 }
1394 
1395 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1396 {
1397 	struct tg3 *tp = bp->priv;
1398 	u32 ret = 0;
1399 
1400 	spin_lock_bh(&tp->lock);
1401 
1402 	if (__tg3_writephy(tp, mii_id, reg, val))
1403 		ret = -EIO;
1404 
1405 	spin_unlock_bh(&tp->lock);
1406 
1407 	return ret;
1408 }
1409 
1410 static void tg3_mdio_config_5785(struct tg3 *tp)
1411 {
1412 	u32 val;
1413 	struct phy_device *phydev;
1414 
1415 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1416 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1417 	case PHY_ID_BCM50610:
1418 	case PHY_ID_BCM50610M:
1419 		val = MAC_PHYCFG2_50610_LED_MODES;
1420 		break;
1421 	case PHY_ID_BCMAC131:
1422 		val = MAC_PHYCFG2_AC131_LED_MODES;
1423 		break;
1424 	case PHY_ID_RTL8211C:
1425 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1426 		break;
1427 	case PHY_ID_RTL8201E:
1428 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1429 		break;
1430 	default:
1431 		return;
1432 	}
1433 
1434 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1435 		tw32(MAC_PHYCFG2, val);
1436 
1437 		val = tr32(MAC_PHYCFG1);
1438 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1439 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1440 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1441 		tw32(MAC_PHYCFG1, val);
1442 
1443 		return;
1444 	}
1445 
1446 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1447 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1448 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1449 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1450 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1451 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1452 		       MAC_PHYCFG2_INBAND_ENABLE;
1453 
1454 	tw32(MAC_PHYCFG2, val);
1455 
1456 	val = tr32(MAC_PHYCFG1);
1457 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1458 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1459 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1460 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1461 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1462 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1463 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1464 	}
1465 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1466 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1467 	tw32(MAC_PHYCFG1, val);
1468 
1469 	val = tr32(MAC_EXT_RGMII_MODE);
1470 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1471 		 MAC_RGMII_MODE_RX_QUALITY |
1472 		 MAC_RGMII_MODE_RX_ACTIVITY |
1473 		 MAC_RGMII_MODE_RX_ENG_DET |
1474 		 MAC_RGMII_MODE_TX_ENABLE |
1475 		 MAC_RGMII_MODE_TX_LOWPWR |
1476 		 MAC_RGMII_MODE_TX_RESET);
1477 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1478 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1479 			val |= MAC_RGMII_MODE_RX_INT_B |
1480 			       MAC_RGMII_MODE_RX_QUALITY |
1481 			       MAC_RGMII_MODE_RX_ACTIVITY |
1482 			       MAC_RGMII_MODE_RX_ENG_DET;
1483 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1484 			val |= MAC_RGMII_MODE_TX_ENABLE |
1485 			       MAC_RGMII_MODE_TX_LOWPWR |
1486 			       MAC_RGMII_MODE_TX_RESET;
1487 	}
1488 	tw32(MAC_EXT_RGMII_MODE, val);
1489 }
1490 
1491 static void tg3_mdio_start(struct tg3 *tp)
1492 {
1493 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1494 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1495 	udelay(80);
1496 
1497 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1498 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1499 		tg3_mdio_config_5785(tp);
1500 }
1501 
1502 static int tg3_mdio_init(struct tg3 *tp)
1503 {
1504 	int i;
1505 	u32 reg;
1506 	struct phy_device *phydev;
1507 
1508 	if (tg3_flag(tp, 5717_PLUS)) {
1509 		u32 is_serdes;
1510 
1511 		tp->phy_addr = tp->pci_fn + 1;
1512 
1513 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1514 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1515 		else
1516 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1517 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1518 		if (is_serdes)
1519 			tp->phy_addr += 7;
1520 	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1521 		int addr;
1522 
1523 		addr = ssb_gige_get_phyaddr(tp->pdev);
1524 		if (addr < 0)
1525 			return addr;
1526 		tp->phy_addr = addr;
1527 	} else
1528 		tp->phy_addr = TG3_PHY_MII_ADDR;
1529 
1530 	tg3_mdio_start(tp);
1531 
1532 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1533 		return 0;
1534 
1535 	tp->mdio_bus = mdiobus_alloc();
1536 	if (tp->mdio_bus == NULL)
1537 		return -ENOMEM;
1538 
1539 	tp->mdio_bus->name     = "tg3 mdio bus";
1540 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1541 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1542 	tp->mdio_bus->priv     = tp;
1543 	tp->mdio_bus->parent   = &tp->pdev->dev;
1544 	tp->mdio_bus->read     = &tg3_mdio_read;
1545 	tp->mdio_bus->write    = &tg3_mdio_write;
1546 	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1547 
1548 	/* The bus registration will look for all the PHYs on the mdio bus.
1549 	 * Unfortunately, it does not ensure the PHY is powered up before
1550 	 * accessing the PHY ID registers.  A chip reset is the
1551 	 * quickest way to bring the device back to an operational state..
1552 	 */
1553 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1554 		tg3_bmcr_reset(tp);
1555 
1556 	i = mdiobus_register(tp->mdio_bus);
1557 	if (i) {
1558 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1559 		mdiobus_free(tp->mdio_bus);
1560 		return i;
1561 	}
1562 
1563 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1564 
1565 	if (!phydev || !phydev->drv) {
1566 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1567 		mdiobus_unregister(tp->mdio_bus);
1568 		mdiobus_free(tp->mdio_bus);
1569 		return -ENODEV;
1570 	}
1571 
1572 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1573 	case PHY_ID_BCM57780:
1574 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1575 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1576 		break;
1577 	case PHY_ID_BCM50610:
1578 	case PHY_ID_BCM50610M:
1579 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1580 				     PHY_BRCM_RX_REFCLK_UNUSED |
1581 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1582 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1583 		fallthrough;
1584 	case PHY_ID_RTL8211C:
1585 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1586 		break;
1587 	case PHY_ID_RTL8201E:
1588 	case PHY_ID_BCMAC131:
1589 		phydev->interface = PHY_INTERFACE_MODE_MII;
1590 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1591 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1592 		break;
1593 	}
1594 
1595 	tg3_flag_set(tp, MDIOBUS_INITED);
1596 
1597 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1598 		tg3_mdio_config_5785(tp);
1599 
1600 	return 0;
1601 }
1602 
1603 static void tg3_mdio_fini(struct tg3 *tp)
1604 {
1605 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1606 		tg3_flag_clear(tp, MDIOBUS_INITED);
1607 		mdiobus_unregister(tp->mdio_bus);
1608 		mdiobus_free(tp->mdio_bus);
1609 	}
1610 }
1611 
1612 /* tp->lock is held. */
1613 static inline void tg3_generate_fw_event(struct tg3 *tp)
1614 {
1615 	u32 val;
1616 
1617 	val = tr32(GRC_RX_CPU_EVENT);
1618 	val |= GRC_RX_CPU_DRIVER_EVENT;
1619 	tw32_f(GRC_RX_CPU_EVENT, val);
1620 
1621 	tp->last_event_jiffies = jiffies;
1622 }
1623 
1624 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1625 
1626 /* tp->lock is held. */
1627 static void tg3_wait_for_event_ack(struct tg3 *tp)
1628 {
1629 	int i;
1630 	unsigned int delay_cnt;
1631 	long time_remain;
1632 
1633 	/* If enough time has passed, no wait is necessary. */
1634 	time_remain = (long)(tp->last_event_jiffies + 1 +
1635 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1636 		      (long)jiffies;
1637 	if (time_remain < 0)
1638 		return;
1639 
1640 	/* Check if we can shorten the wait time. */
1641 	delay_cnt = jiffies_to_usecs(time_remain);
1642 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1643 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1644 	delay_cnt = (delay_cnt >> 3) + 1;
1645 
1646 	for (i = 0; i < delay_cnt; i++) {
1647 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1648 			break;
1649 		if (pci_channel_offline(tp->pdev))
1650 			break;
1651 
1652 		udelay(8);
1653 	}
1654 }
1655 
1656 /* tp->lock is held. */
1657 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1658 {
1659 	u32 reg, val;
1660 
1661 	val = 0;
1662 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1663 		val = reg << 16;
1664 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1665 		val |= (reg & 0xffff);
1666 	*data++ = val;
1667 
1668 	val = 0;
1669 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1670 		val = reg << 16;
1671 	if (!tg3_readphy(tp, MII_LPA, &reg))
1672 		val |= (reg & 0xffff);
1673 	*data++ = val;
1674 
1675 	val = 0;
1676 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1677 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1678 			val = reg << 16;
1679 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1680 			val |= (reg & 0xffff);
1681 	}
1682 	*data++ = val;
1683 
1684 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1685 		val = reg << 16;
1686 	else
1687 		val = 0;
1688 	*data++ = val;
1689 }
1690 
1691 /* tp->lock is held. */
1692 static void tg3_ump_link_report(struct tg3 *tp)
1693 {
1694 	u32 data[4];
1695 
1696 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1697 		return;
1698 
1699 	tg3_phy_gather_ump_data(tp, data);
1700 
1701 	tg3_wait_for_event_ack(tp);
1702 
1703 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1704 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1705 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1706 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1707 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1708 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1709 
1710 	tg3_generate_fw_event(tp);
1711 }
1712 
1713 /* tp->lock is held. */
1714 static void tg3_stop_fw(struct tg3 *tp)
1715 {
1716 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1717 		/* Wait for RX cpu to ACK the previous event. */
1718 		tg3_wait_for_event_ack(tp);
1719 
1720 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1721 
1722 		tg3_generate_fw_event(tp);
1723 
1724 		/* Wait for RX cpu to ACK this event. */
1725 		tg3_wait_for_event_ack(tp);
1726 	}
1727 }
1728 
1729 /* tp->lock is held. */
1730 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1731 {
1732 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1733 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1734 
1735 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1736 		switch (kind) {
1737 		case RESET_KIND_INIT:
1738 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1739 				      DRV_STATE_START);
1740 			break;
1741 
1742 		case RESET_KIND_SHUTDOWN:
1743 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1744 				      DRV_STATE_UNLOAD);
1745 			break;
1746 
1747 		case RESET_KIND_SUSPEND:
1748 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1749 				      DRV_STATE_SUSPEND);
1750 			break;
1751 
1752 		default:
1753 			break;
1754 		}
1755 	}
1756 }
1757 
1758 /* tp->lock is held. */
1759 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1760 {
1761 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1762 		switch (kind) {
1763 		case RESET_KIND_INIT:
1764 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1765 				      DRV_STATE_START_DONE);
1766 			break;
1767 
1768 		case RESET_KIND_SHUTDOWN:
1769 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1770 				      DRV_STATE_UNLOAD_DONE);
1771 			break;
1772 
1773 		default:
1774 			break;
1775 		}
1776 	}
1777 }
1778 
1779 /* tp->lock is held. */
1780 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1781 {
1782 	if (tg3_flag(tp, ENABLE_ASF)) {
1783 		switch (kind) {
1784 		case RESET_KIND_INIT:
1785 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1786 				      DRV_STATE_START);
1787 			break;
1788 
1789 		case RESET_KIND_SHUTDOWN:
1790 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1791 				      DRV_STATE_UNLOAD);
1792 			break;
1793 
1794 		case RESET_KIND_SUSPEND:
1795 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1796 				      DRV_STATE_SUSPEND);
1797 			break;
1798 
1799 		default:
1800 			break;
1801 		}
1802 	}
1803 }
1804 
1805 static int tg3_poll_fw(struct tg3 *tp)
1806 {
1807 	int i;
1808 	u32 val;
1809 
1810 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1811 		return 0;
1812 
1813 	if (tg3_flag(tp, IS_SSB_CORE)) {
1814 		/* We don't use firmware. */
1815 		return 0;
1816 	}
1817 
1818 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1819 		/* Wait up to 20ms for init done. */
1820 		for (i = 0; i < 200; i++) {
1821 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1822 				return 0;
1823 			if (pci_channel_offline(tp->pdev))
1824 				return -ENODEV;
1825 
1826 			udelay(100);
1827 		}
1828 		return -ENODEV;
1829 	}
1830 
1831 	/* Wait for firmware initialization to complete. */
1832 	for (i = 0; i < 100000; i++) {
1833 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1834 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1835 			break;
1836 		if (pci_channel_offline(tp->pdev)) {
1837 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1838 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1839 				netdev_info(tp->dev, "No firmware running\n");
1840 			}
1841 
1842 			break;
1843 		}
1844 
1845 		udelay(10);
1846 	}
1847 
1848 	/* Chip might not be fitted with firmware.  Some Sun onboard
1849 	 * parts are configured like that.  So don't signal the timeout
1850 	 * of the above loop as an error, but do report the lack of
1851 	 * running firmware once.
1852 	 */
1853 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1854 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1855 
1856 		netdev_info(tp->dev, "No firmware running\n");
1857 	}
1858 
1859 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1860 		/* The 57765 A0 needs a little more
1861 		 * time to do some important work.
1862 		 */
1863 		mdelay(10);
1864 	}
1865 
1866 	return 0;
1867 }
1868 
1869 static void tg3_link_report(struct tg3 *tp)
1870 {
1871 	if (!netif_carrier_ok(tp->dev)) {
1872 		netif_info(tp, link, tp->dev, "Link is down\n");
1873 		tg3_ump_link_report(tp);
1874 	} else if (netif_msg_link(tp)) {
1875 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1876 			    (tp->link_config.active_speed == SPEED_1000 ?
1877 			     1000 :
1878 			     (tp->link_config.active_speed == SPEED_100 ?
1879 			      100 : 10)),
1880 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1881 			     "full" : "half"));
1882 
1883 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1884 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1885 			    "on" : "off",
1886 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1887 			    "on" : "off");
1888 
1889 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1890 			netdev_info(tp->dev, "EEE is %s\n",
1891 				    tp->setlpicnt ? "enabled" : "disabled");
1892 
1893 		tg3_ump_link_report(tp);
1894 	}
1895 
1896 	tp->link_up = netif_carrier_ok(tp->dev);
1897 }
1898 
1899 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1900 {
1901 	u32 flowctrl = 0;
1902 
1903 	if (adv & ADVERTISE_PAUSE_CAP) {
1904 		flowctrl |= FLOW_CTRL_RX;
1905 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1906 			flowctrl |= FLOW_CTRL_TX;
1907 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1908 		flowctrl |= FLOW_CTRL_TX;
1909 
1910 	return flowctrl;
1911 }
1912 
1913 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1914 {
1915 	u16 miireg;
1916 
1917 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1918 		miireg = ADVERTISE_1000XPAUSE;
1919 	else if (flow_ctrl & FLOW_CTRL_TX)
1920 		miireg = ADVERTISE_1000XPSE_ASYM;
1921 	else if (flow_ctrl & FLOW_CTRL_RX)
1922 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1923 	else
1924 		miireg = 0;
1925 
1926 	return miireg;
1927 }
1928 
1929 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1930 {
1931 	u32 flowctrl = 0;
1932 
1933 	if (adv & ADVERTISE_1000XPAUSE) {
1934 		flowctrl |= FLOW_CTRL_RX;
1935 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1936 			flowctrl |= FLOW_CTRL_TX;
1937 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1938 		flowctrl |= FLOW_CTRL_TX;
1939 
1940 	return flowctrl;
1941 }
1942 
1943 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1944 {
1945 	u8 cap = 0;
1946 
1947 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1948 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1949 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1950 		if (lcladv & ADVERTISE_1000XPAUSE)
1951 			cap = FLOW_CTRL_RX;
1952 		if (rmtadv & ADVERTISE_1000XPAUSE)
1953 			cap = FLOW_CTRL_TX;
1954 	}
1955 
1956 	return cap;
1957 }
1958 
1959 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1960 {
1961 	u8 autoneg;
1962 	u8 flowctrl = 0;
1963 	u32 old_rx_mode = tp->rx_mode;
1964 	u32 old_tx_mode = tp->tx_mode;
1965 
1966 	if (tg3_flag(tp, USE_PHYLIB))
1967 		autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1968 	else
1969 		autoneg = tp->link_config.autoneg;
1970 
1971 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1972 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1973 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1974 		else
1975 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1976 	} else
1977 		flowctrl = tp->link_config.flowctrl;
1978 
1979 	tp->link_config.active_flowctrl = flowctrl;
1980 
1981 	if (flowctrl & FLOW_CTRL_RX)
1982 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1983 	else
1984 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1985 
1986 	if (old_rx_mode != tp->rx_mode)
1987 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1988 
1989 	if (flowctrl & FLOW_CTRL_TX)
1990 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1991 	else
1992 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1993 
1994 	if (old_tx_mode != tp->tx_mode)
1995 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1996 }
1997 
1998 static void tg3_adjust_link(struct net_device *dev)
1999 {
2000 	u8 oldflowctrl, linkmesg = 0;
2001 	u32 mac_mode, lcl_adv, rmt_adv;
2002 	struct tg3 *tp = netdev_priv(dev);
2003 	struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2004 
2005 	spin_lock_bh(&tp->lock);
2006 
2007 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2008 				    MAC_MODE_HALF_DUPLEX);
2009 
2010 	oldflowctrl = tp->link_config.active_flowctrl;
2011 
2012 	if (phydev->link) {
2013 		lcl_adv = 0;
2014 		rmt_adv = 0;
2015 
2016 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2017 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2018 		else if (phydev->speed == SPEED_1000 ||
2019 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2020 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2021 		else
2022 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2023 
2024 		if (phydev->duplex == DUPLEX_HALF)
2025 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2026 		else {
2027 			lcl_adv = mii_advertise_flowctrl(
2028 				  tp->link_config.flowctrl);
2029 
2030 			if (phydev->pause)
2031 				rmt_adv = LPA_PAUSE_CAP;
2032 			if (phydev->asym_pause)
2033 				rmt_adv |= LPA_PAUSE_ASYM;
2034 		}
2035 
2036 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2037 	} else
2038 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2039 
2040 	if (mac_mode != tp->mac_mode) {
2041 		tp->mac_mode = mac_mode;
2042 		tw32_f(MAC_MODE, tp->mac_mode);
2043 		udelay(40);
2044 	}
2045 
2046 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2047 		if (phydev->speed == SPEED_10)
2048 			tw32(MAC_MI_STAT,
2049 			     MAC_MI_STAT_10MBPS_MODE |
2050 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2051 		else
2052 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2053 	}
2054 
2055 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2056 		tw32(MAC_TX_LENGTHS,
2057 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2058 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2059 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2060 	else
2061 		tw32(MAC_TX_LENGTHS,
2062 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2063 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2064 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2065 
2066 	if (phydev->link != tp->old_link ||
2067 	    phydev->speed != tp->link_config.active_speed ||
2068 	    phydev->duplex != tp->link_config.active_duplex ||
2069 	    oldflowctrl != tp->link_config.active_flowctrl)
2070 		linkmesg = 1;
2071 
2072 	tp->old_link = phydev->link;
2073 	tp->link_config.active_speed = phydev->speed;
2074 	tp->link_config.active_duplex = phydev->duplex;
2075 
2076 	spin_unlock_bh(&tp->lock);
2077 
2078 	if (linkmesg)
2079 		tg3_link_report(tp);
2080 }
2081 
2082 static int tg3_phy_init(struct tg3 *tp)
2083 {
2084 	struct phy_device *phydev;
2085 
2086 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2087 		return 0;
2088 
2089 	/* Bring the PHY back to a known state. */
2090 	tg3_bmcr_reset(tp);
2091 
2092 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2093 
2094 	/* Attach the MAC to the PHY. */
2095 	phydev = phy_connect(tp->dev, phydev_name(phydev),
2096 			     tg3_adjust_link, phydev->interface);
2097 	if (IS_ERR(phydev)) {
2098 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2099 		return PTR_ERR(phydev);
2100 	}
2101 
2102 	/* Mask with MAC supported features. */
2103 	switch (phydev->interface) {
2104 	case PHY_INTERFACE_MODE_GMII:
2105 	case PHY_INTERFACE_MODE_RGMII:
2106 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2107 			phy_set_max_speed(phydev, SPEED_1000);
2108 			phy_support_asym_pause(phydev);
2109 			break;
2110 		}
2111 		fallthrough;
2112 	case PHY_INTERFACE_MODE_MII:
2113 		phy_set_max_speed(phydev, SPEED_100);
2114 		phy_support_asym_pause(phydev);
2115 		break;
2116 	default:
2117 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2118 		return -EINVAL;
2119 	}
2120 
2121 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2122 
2123 	phy_attached_info(phydev);
2124 
2125 	return 0;
2126 }
2127 
2128 static void tg3_phy_start(struct tg3 *tp)
2129 {
2130 	struct phy_device *phydev;
2131 
2132 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2133 		return;
2134 
2135 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2136 
2137 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2138 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2139 		phydev->speed = tp->link_config.speed;
2140 		phydev->duplex = tp->link_config.duplex;
2141 		phydev->autoneg = tp->link_config.autoneg;
2142 		ethtool_convert_legacy_u32_to_link_mode(
2143 			phydev->advertising, tp->link_config.advertising);
2144 	}
2145 
2146 	phy_start(phydev);
2147 
2148 	phy_start_aneg(phydev);
2149 }
2150 
2151 static void tg3_phy_stop(struct tg3 *tp)
2152 {
2153 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2154 		return;
2155 
2156 	phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2157 }
2158 
2159 static void tg3_phy_fini(struct tg3 *tp)
2160 {
2161 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2162 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2163 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2164 	}
2165 }
2166 
2167 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2168 {
2169 	int err;
2170 	u32 val;
2171 
2172 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2173 		return 0;
2174 
2175 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2176 		/* Cannot do read-modify-write on 5401 */
2177 		err = tg3_phy_auxctl_write(tp,
2178 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2179 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2180 					   0x4c20);
2181 		goto done;
2182 	}
2183 
2184 	err = tg3_phy_auxctl_read(tp,
2185 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2186 	if (err)
2187 		return err;
2188 
2189 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2190 	err = tg3_phy_auxctl_write(tp,
2191 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2192 
2193 done:
2194 	return err;
2195 }
2196 
2197 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2198 {
2199 	u32 phytest;
2200 
2201 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2202 		u32 phy;
2203 
2204 		tg3_writephy(tp, MII_TG3_FET_TEST,
2205 			     phytest | MII_TG3_FET_SHADOW_EN);
2206 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2207 			if (enable)
2208 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2209 			else
2210 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2212 		}
2213 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2214 	}
2215 }
2216 
2217 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2218 {
2219 	u32 reg;
2220 
2221 	if (!tg3_flag(tp, 5705_PLUS) ||
2222 	    (tg3_flag(tp, 5717_PLUS) &&
2223 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2224 		return;
2225 
2226 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2227 		tg3_phy_fet_toggle_apd(tp, enable);
2228 		return;
2229 	}
2230 
2231 	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2232 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2233 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2234 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2235 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2236 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2237 
2238 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2239 
2240 
2241 	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2242 	if (enable)
2243 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2244 
2245 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2246 }
2247 
2248 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2249 {
2250 	u32 phy;
2251 
2252 	if (!tg3_flag(tp, 5705_PLUS) ||
2253 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2254 		return;
2255 
2256 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2257 		u32 ephy;
2258 
2259 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2260 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2261 
2262 			tg3_writephy(tp, MII_TG3_FET_TEST,
2263 				     ephy | MII_TG3_FET_SHADOW_EN);
2264 			if (!tg3_readphy(tp, reg, &phy)) {
2265 				if (enable)
2266 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2267 				else
2268 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2269 				tg3_writephy(tp, reg, phy);
2270 			}
2271 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2272 		}
2273 	} else {
2274 		int ret;
2275 
2276 		ret = tg3_phy_auxctl_read(tp,
2277 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2278 		if (!ret) {
2279 			if (enable)
2280 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2281 			else
2282 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2283 			tg3_phy_auxctl_write(tp,
2284 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2285 		}
2286 	}
2287 }
2288 
2289 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2290 {
2291 	int ret;
2292 	u32 val;
2293 
2294 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2295 		return;
2296 
2297 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2298 	if (!ret)
2299 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2300 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2301 }
2302 
2303 static void tg3_phy_apply_otp(struct tg3 *tp)
2304 {
2305 	u32 otp, phy;
2306 
2307 	if (!tp->phy_otp)
2308 		return;
2309 
2310 	otp = tp->phy_otp;
2311 
2312 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2313 		return;
2314 
2315 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2316 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2317 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2318 
2319 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2320 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2321 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2322 
2323 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2324 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2325 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2326 
2327 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2328 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2329 
2330 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2331 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2332 
2333 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2334 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2335 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2336 
2337 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2338 }
2339 
2340 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2341 {
2342 	u32 val;
2343 	struct ethtool_eee *dest = &tp->eee;
2344 
2345 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2346 		return;
2347 
2348 	if (eee)
2349 		dest = eee;
2350 
2351 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2352 		return;
2353 
2354 	/* Pull eee_active */
2355 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2356 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2357 		dest->eee_active = 1;
2358 	} else
2359 		dest->eee_active = 0;
2360 
2361 	/* Pull lp advertised settings */
2362 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2363 		return;
2364 	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2365 
2366 	/* Pull advertised and eee_enabled settings */
2367 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2368 		return;
2369 	dest->eee_enabled = !!val;
2370 	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2371 
2372 	/* Pull tx_lpi_enabled */
2373 	val = tr32(TG3_CPMU_EEE_MODE);
2374 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2375 
2376 	/* Pull lpi timer value */
2377 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2378 }
2379 
2380 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2381 {
2382 	u32 val;
2383 
2384 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2385 		return;
2386 
2387 	tp->setlpicnt = 0;
2388 
2389 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2390 	    current_link_up &&
2391 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2392 	    (tp->link_config.active_speed == SPEED_100 ||
2393 	     tp->link_config.active_speed == SPEED_1000)) {
2394 		u32 eeectl;
2395 
2396 		if (tp->link_config.active_speed == SPEED_1000)
2397 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2398 		else
2399 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2400 
2401 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2402 
2403 		tg3_eee_pull_config(tp, NULL);
2404 		if (tp->eee.eee_active)
2405 			tp->setlpicnt = 2;
2406 	}
2407 
2408 	if (!tp->setlpicnt) {
2409 		if (current_link_up &&
2410 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2411 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2412 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2413 		}
2414 
2415 		val = tr32(TG3_CPMU_EEE_MODE);
2416 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2417 	}
2418 }
2419 
2420 static void tg3_phy_eee_enable(struct tg3 *tp)
2421 {
2422 	u32 val;
2423 
2424 	if (tp->link_config.active_speed == SPEED_1000 &&
2425 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2426 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2427 	     tg3_flag(tp, 57765_CLASS)) &&
2428 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2429 		val = MII_TG3_DSP_TAP26_ALNOKO |
2430 		      MII_TG3_DSP_TAP26_RMRXSTO;
2431 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2432 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2433 	}
2434 
2435 	val = tr32(TG3_CPMU_EEE_MODE);
2436 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2437 }
2438 
2439 static int tg3_wait_macro_done(struct tg3 *tp)
2440 {
2441 	int limit = 100;
2442 
2443 	while (limit--) {
2444 		u32 tmp32;
2445 
2446 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2447 			if ((tmp32 & 0x1000) == 0)
2448 				break;
2449 		}
2450 	}
2451 	if (limit < 0)
2452 		return -EBUSY;
2453 
2454 	return 0;
2455 }
2456 
2457 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2458 {
2459 	static const u32 test_pat[4][6] = {
2460 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2461 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2462 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2463 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2464 	};
2465 	int chan;
2466 
2467 	for (chan = 0; chan < 4; chan++) {
2468 		int i;
2469 
2470 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2471 			     (chan * 0x2000) | 0x0200);
2472 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2473 
2474 		for (i = 0; i < 6; i++)
2475 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2476 				     test_pat[chan][i]);
2477 
2478 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2479 		if (tg3_wait_macro_done(tp)) {
2480 			*resetp = 1;
2481 			return -EBUSY;
2482 		}
2483 
2484 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2485 			     (chan * 0x2000) | 0x0200);
2486 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2487 		if (tg3_wait_macro_done(tp)) {
2488 			*resetp = 1;
2489 			return -EBUSY;
2490 		}
2491 
2492 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2493 		if (tg3_wait_macro_done(tp)) {
2494 			*resetp = 1;
2495 			return -EBUSY;
2496 		}
2497 
2498 		for (i = 0; i < 6; i += 2) {
2499 			u32 low, high;
2500 
2501 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2502 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2503 			    tg3_wait_macro_done(tp)) {
2504 				*resetp = 1;
2505 				return -EBUSY;
2506 			}
2507 			low &= 0x7fff;
2508 			high &= 0x000f;
2509 			if (low != test_pat[chan][i] ||
2510 			    high != test_pat[chan][i+1]) {
2511 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2512 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2513 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2514 
2515 				return -EBUSY;
2516 			}
2517 		}
2518 	}
2519 
2520 	return 0;
2521 }
2522 
2523 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2524 {
2525 	int chan;
2526 
2527 	for (chan = 0; chan < 4; chan++) {
2528 		int i;
2529 
2530 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2531 			     (chan * 0x2000) | 0x0200);
2532 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2533 		for (i = 0; i < 6; i++)
2534 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2535 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2536 		if (tg3_wait_macro_done(tp))
2537 			return -EBUSY;
2538 	}
2539 
2540 	return 0;
2541 }
2542 
2543 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2544 {
2545 	u32 reg32, phy9_orig;
2546 	int retries, do_phy_reset, err;
2547 
2548 	retries = 10;
2549 	do_phy_reset = 1;
2550 	do {
2551 		if (do_phy_reset) {
2552 			err = tg3_bmcr_reset(tp);
2553 			if (err)
2554 				return err;
2555 			do_phy_reset = 0;
2556 		}
2557 
2558 		/* Disable transmitter and interrupt.  */
2559 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2560 			continue;
2561 
2562 		reg32 |= 0x3000;
2563 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2564 
2565 		/* Set full-duplex, 1000 mbps.  */
2566 		tg3_writephy(tp, MII_BMCR,
2567 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2568 
2569 		/* Set to master mode.  */
2570 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2571 			continue;
2572 
2573 		tg3_writephy(tp, MII_CTRL1000,
2574 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2575 
2576 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2577 		if (err)
2578 			return err;
2579 
2580 		/* Block the PHY control access.  */
2581 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2582 
2583 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2584 		if (!err)
2585 			break;
2586 	} while (--retries);
2587 
2588 	err = tg3_phy_reset_chanpat(tp);
2589 	if (err)
2590 		return err;
2591 
2592 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2593 
2594 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2595 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2596 
2597 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2598 
2599 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2600 
2601 	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2602 	if (err)
2603 		return err;
2604 
2605 	reg32 &= ~0x3000;
2606 	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2607 
2608 	return 0;
2609 }
2610 
2611 static void tg3_carrier_off(struct tg3 *tp)
2612 {
2613 	netif_carrier_off(tp->dev);
2614 	tp->link_up = false;
2615 }
2616 
2617 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2618 {
2619 	if (tg3_flag(tp, ENABLE_ASF))
2620 		netdev_warn(tp->dev,
2621 			    "Management side-band traffic will be interrupted during phy settings change\n");
2622 }
2623 
2624 /* This will reset the tigon3 PHY if there is no valid
2625  * link unless the FORCE argument is non-zero.
2626  */
2627 static int tg3_phy_reset(struct tg3 *tp)
2628 {
2629 	u32 val, cpmuctrl;
2630 	int err;
2631 
2632 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2633 		val = tr32(GRC_MISC_CFG);
2634 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2635 		udelay(40);
2636 	}
2637 	err  = tg3_readphy(tp, MII_BMSR, &val);
2638 	err |= tg3_readphy(tp, MII_BMSR, &val);
2639 	if (err != 0)
2640 		return -EBUSY;
2641 
2642 	if (netif_running(tp->dev) && tp->link_up) {
2643 		netif_carrier_off(tp->dev);
2644 		tg3_link_report(tp);
2645 	}
2646 
2647 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2648 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2649 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2650 		err = tg3_phy_reset_5703_4_5(tp);
2651 		if (err)
2652 			return err;
2653 		goto out;
2654 	}
2655 
2656 	cpmuctrl = 0;
2657 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2658 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2659 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2660 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2661 			tw32(TG3_CPMU_CTRL,
2662 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2663 	}
2664 
2665 	err = tg3_bmcr_reset(tp);
2666 	if (err)
2667 		return err;
2668 
2669 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2670 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2671 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2672 
2673 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2674 	}
2675 
2676 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2677 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2678 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2679 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2680 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2681 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2682 			udelay(40);
2683 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2684 		}
2685 	}
2686 
2687 	if (tg3_flag(tp, 5717_PLUS) &&
2688 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2689 		return 0;
2690 
2691 	tg3_phy_apply_otp(tp);
2692 
2693 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2694 		tg3_phy_toggle_apd(tp, true);
2695 	else
2696 		tg3_phy_toggle_apd(tp, false);
2697 
2698 out:
2699 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2700 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2701 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2702 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2703 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2704 	}
2705 
2706 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2707 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2708 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2709 	}
2710 
2711 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2712 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2713 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2714 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2715 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2716 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2717 		}
2718 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2719 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2720 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2721 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2722 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2723 				tg3_writephy(tp, MII_TG3_TEST1,
2724 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2725 			} else
2726 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2727 
2728 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2729 		}
2730 	}
2731 
2732 	/* Set Extended packet length bit (bit 14) on all chips that */
2733 	/* support jumbo frames */
2734 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2735 		/* Cannot do read-modify-write on 5401 */
2736 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2737 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2738 		/* Set bit 14 with read-modify-write to preserve other bits */
2739 		err = tg3_phy_auxctl_read(tp,
2740 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2741 		if (!err)
2742 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2743 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2744 	}
2745 
2746 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2747 	 * jumbo frames transmission.
2748 	 */
2749 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2750 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2751 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2752 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2753 	}
2754 
2755 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2756 		/* adjust output voltage */
2757 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2758 	}
2759 
2760 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2761 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2762 
2763 	tg3_phy_toggle_automdix(tp, true);
2764 	tg3_phy_set_wirespeed(tp);
2765 	return 0;
2766 }
2767 
2768 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2769 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2770 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2771 					  TG3_GPIO_MSG_NEED_VAUX)
2772 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2773 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2774 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2775 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2776 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2777 
2778 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2779 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2780 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2781 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2782 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2783 
2784 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2785 {
2786 	u32 status, shift;
2787 
2788 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2789 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2790 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2791 	else
2792 		status = tr32(TG3_CPMU_DRV_STATUS);
2793 
2794 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2795 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2796 	status |= (newstat << shift);
2797 
2798 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2799 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2800 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2801 	else
2802 		tw32(TG3_CPMU_DRV_STATUS, status);
2803 
2804 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2805 }
2806 
2807 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2808 {
2809 	if (!tg3_flag(tp, IS_NIC))
2810 		return 0;
2811 
2812 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2813 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2814 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2815 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2816 			return -EIO;
2817 
2818 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2819 
2820 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2821 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2822 
2823 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2824 	} else {
2825 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2826 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2827 	}
2828 
2829 	return 0;
2830 }
2831 
2832 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2833 {
2834 	u32 grc_local_ctrl;
2835 
2836 	if (!tg3_flag(tp, IS_NIC) ||
2837 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2838 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2839 		return;
2840 
2841 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2842 
2843 	tw32_wait_f(GRC_LOCAL_CTRL,
2844 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2845 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2846 
2847 	tw32_wait_f(GRC_LOCAL_CTRL,
2848 		    grc_local_ctrl,
2849 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2850 
2851 	tw32_wait_f(GRC_LOCAL_CTRL,
2852 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2853 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2854 }
2855 
2856 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2857 {
2858 	if (!tg3_flag(tp, IS_NIC))
2859 		return;
2860 
2861 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2862 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2863 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2864 			    (GRC_LCLCTRL_GPIO_OE0 |
2865 			     GRC_LCLCTRL_GPIO_OE1 |
2866 			     GRC_LCLCTRL_GPIO_OE2 |
2867 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2868 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2869 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2870 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2871 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2872 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2873 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2874 				     GRC_LCLCTRL_GPIO_OE1 |
2875 				     GRC_LCLCTRL_GPIO_OE2 |
2876 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2877 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2878 				     tp->grc_local_ctrl;
2879 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2880 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2881 
2882 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2883 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2884 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2885 
2886 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2887 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2888 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2889 	} else {
2890 		u32 no_gpio2;
2891 		u32 grc_local_ctrl = 0;
2892 
2893 		/* Workaround to prevent overdrawing Amps. */
2894 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2895 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2896 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2897 				    grc_local_ctrl,
2898 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2899 		}
2900 
2901 		/* On 5753 and variants, GPIO2 cannot be used. */
2902 		no_gpio2 = tp->nic_sram_data_cfg &
2903 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2904 
2905 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2906 				  GRC_LCLCTRL_GPIO_OE1 |
2907 				  GRC_LCLCTRL_GPIO_OE2 |
2908 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2909 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2910 		if (no_gpio2) {
2911 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2912 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2913 		}
2914 		tw32_wait_f(GRC_LOCAL_CTRL,
2915 			    tp->grc_local_ctrl | grc_local_ctrl,
2916 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2917 
2918 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2919 
2920 		tw32_wait_f(GRC_LOCAL_CTRL,
2921 			    tp->grc_local_ctrl | grc_local_ctrl,
2922 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2923 
2924 		if (!no_gpio2) {
2925 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2926 			tw32_wait_f(GRC_LOCAL_CTRL,
2927 				    tp->grc_local_ctrl | grc_local_ctrl,
2928 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2929 		}
2930 	}
2931 }
2932 
2933 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2934 {
2935 	u32 msg = 0;
2936 
2937 	/* Serialize power state transitions */
2938 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2939 		return;
2940 
2941 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2942 		msg = TG3_GPIO_MSG_NEED_VAUX;
2943 
2944 	msg = tg3_set_function_status(tp, msg);
2945 
2946 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2947 		goto done;
2948 
2949 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2950 		tg3_pwrsrc_switch_to_vaux(tp);
2951 	else
2952 		tg3_pwrsrc_die_with_vmain(tp);
2953 
2954 done:
2955 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2956 }
2957 
2958 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2959 {
2960 	bool need_vaux = false;
2961 
2962 	/* The GPIOs do something completely different on 57765. */
2963 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2964 		return;
2965 
2966 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2967 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2968 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2969 		tg3_frob_aux_power_5717(tp, include_wol ?
2970 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2971 		return;
2972 	}
2973 
2974 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2975 		struct net_device *dev_peer;
2976 
2977 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2978 
2979 		/* remove_one() may have been run on the peer. */
2980 		if (dev_peer) {
2981 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2982 
2983 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2984 				return;
2985 
2986 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2987 			    tg3_flag(tp_peer, ENABLE_ASF))
2988 				need_vaux = true;
2989 		}
2990 	}
2991 
2992 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2993 	    tg3_flag(tp, ENABLE_ASF))
2994 		need_vaux = true;
2995 
2996 	if (need_vaux)
2997 		tg3_pwrsrc_switch_to_vaux(tp);
2998 	else
2999 		tg3_pwrsrc_die_with_vmain(tp);
3000 }
3001 
3002 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3003 {
3004 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3005 		return 1;
3006 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3007 		if (speed != SPEED_10)
3008 			return 1;
3009 	} else if (speed == SPEED_10)
3010 		return 1;
3011 
3012 	return 0;
3013 }
3014 
3015 static bool tg3_phy_power_bug(struct tg3 *tp)
3016 {
3017 	switch (tg3_asic_rev(tp)) {
3018 	case ASIC_REV_5700:
3019 	case ASIC_REV_5704:
3020 		return true;
3021 	case ASIC_REV_5780:
3022 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3023 			return true;
3024 		return false;
3025 	case ASIC_REV_5717:
3026 		if (!tp->pci_fn)
3027 			return true;
3028 		return false;
3029 	case ASIC_REV_5719:
3030 	case ASIC_REV_5720:
3031 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3032 		    !tp->pci_fn)
3033 			return true;
3034 		return false;
3035 	}
3036 
3037 	return false;
3038 }
3039 
3040 static bool tg3_phy_led_bug(struct tg3 *tp)
3041 {
3042 	switch (tg3_asic_rev(tp)) {
3043 	case ASIC_REV_5719:
3044 	case ASIC_REV_5720:
3045 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3046 		    !tp->pci_fn)
3047 			return true;
3048 		return false;
3049 	}
3050 
3051 	return false;
3052 }
3053 
3054 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3055 {
3056 	u32 val;
3057 
3058 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3059 		return;
3060 
3061 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3062 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3063 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3064 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3065 
3066 			sg_dig_ctrl |=
3067 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3068 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3069 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3070 		}
3071 		return;
3072 	}
3073 
3074 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3075 		tg3_bmcr_reset(tp);
3076 		val = tr32(GRC_MISC_CFG);
3077 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3078 		udelay(40);
3079 		return;
3080 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3081 		u32 phytest;
3082 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3083 			u32 phy;
3084 
3085 			tg3_writephy(tp, MII_ADVERTISE, 0);
3086 			tg3_writephy(tp, MII_BMCR,
3087 				     BMCR_ANENABLE | BMCR_ANRESTART);
3088 
3089 			tg3_writephy(tp, MII_TG3_FET_TEST,
3090 				     phytest | MII_TG3_FET_SHADOW_EN);
3091 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3092 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3093 				tg3_writephy(tp,
3094 					     MII_TG3_FET_SHDW_AUXMODE4,
3095 					     phy);
3096 			}
3097 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3098 		}
3099 		return;
3100 	} else if (do_low_power) {
3101 		if (!tg3_phy_led_bug(tp))
3102 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3103 				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3104 
3105 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3106 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3107 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3108 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3109 	}
3110 
3111 	/* The PHY should not be powered down on some chips because
3112 	 * of bugs.
3113 	 */
3114 	if (tg3_phy_power_bug(tp))
3115 		return;
3116 
3117 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3118 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3119 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3120 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3121 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3122 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3123 	}
3124 
3125 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3126 }
3127 
3128 /* tp->lock is held. */
3129 static int tg3_nvram_lock(struct tg3 *tp)
3130 {
3131 	if (tg3_flag(tp, NVRAM)) {
3132 		int i;
3133 
3134 		if (tp->nvram_lock_cnt == 0) {
3135 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3136 			for (i = 0; i < 8000; i++) {
3137 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3138 					break;
3139 				udelay(20);
3140 			}
3141 			if (i == 8000) {
3142 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3143 				return -ENODEV;
3144 			}
3145 		}
3146 		tp->nvram_lock_cnt++;
3147 	}
3148 	return 0;
3149 }
3150 
3151 /* tp->lock is held. */
3152 static void tg3_nvram_unlock(struct tg3 *tp)
3153 {
3154 	if (tg3_flag(tp, NVRAM)) {
3155 		if (tp->nvram_lock_cnt > 0)
3156 			tp->nvram_lock_cnt--;
3157 		if (tp->nvram_lock_cnt == 0)
3158 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3159 	}
3160 }
3161 
3162 /* tp->lock is held. */
3163 static void tg3_enable_nvram_access(struct tg3 *tp)
3164 {
3165 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3166 		u32 nvaccess = tr32(NVRAM_ACCESS);
3167 
3168 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3169 	}
3170 }
3171 
3172 /* tp->lock is held. */
3173 static void tg3_disable_nvram_access(struct tg3 *tp)
3174 {
3175 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3176 		u32 nvaccess = tr32(NVRAM_ACCESS);
3177 
3178 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3179 	}
3180 }
3181 
3182 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3183 					u32 offset, u32 *val)
3184 {
3185 	u32 tmp;
3186 	int i;
3187 
3188 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3189 		return -EINVAL;
3190 
3191 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3192 					EEPROM_ADDR_DEVID_MASK |
3193 					EEPROM_ADDR_READ);
3194 	tw32(GRC_EEPROM_ADDR,
3195 	     tmp |
3196 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3197 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3198 	      EEPROM_ADDR_ADDR_MASK) |
3199 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3200 
3201 	for (i = 0; i < 1000; i++) {
3202 		tmp = tr32(GRC_EEPROM_ADDR);
3203 
3204 		if (tmp & EEPROM_ADDR_COMPLETE)
3205 			break;
3206 		msleep(1);
3207 	}
3208 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3209 		return -EBUSY;
3210 
3211 	tmp = tr32(GRC_EEPROM_DATA);
3212 
3213 	/*
3214 	 * The data will always be opposite the native endian
3215 	 * format.  Perform a blind byteswap to compensate.
3216 	 */
3217 	*val = swab32(tmp);
3218 
3219 	return 0;
3220 }
3221 
3222 #define NVRAM_CMD_TIMEOUT 10000
3223 
3224 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3225 {
3226 	int i;
3227 
3228 	tw32(NVRAM_CMD, nvram_cmd);
3229 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3230 		usleep_range(10, 40);
3231 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3232 			udelay(10);
3233 			break;
3234 		}
3235 	}
3236 
3237 	if (i == NVRAM_CMD_TIMEOUT)
3238 		return -EBUSY;
3239 
3240 	return 0;
3241 }
3242 
3243 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3244 {
3245 	if (tg3_flag(tp, NVRAM) &&
3246 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3247 	    tg3_flag(tp, FLASH) &&
3248 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3249 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3250 
3251 		addr = ((addr / tp->nvram_pagesize) <<
3252 			ATMEL_AT45DB0X1B_PAGE_POS) +
3253 		       (addr % tp->nvram_pagesize);
3254 
3255 	return addr;
3256 }
3257 
3258 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3259 {
3260 	if (tg3_flag(tp, NVRAM) &&
3261 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3262 	    tg3_flag(tp, FLASH) &&
3263 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3264 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3265 
3266 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3267 			tp->nvram_pagesize) +
3268 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3269 
3270 	return addr;
3271 }
3272 
3273 /* NOTE: Data read in from NVRAM is byteswapped according to
3274  * the byteswapping settings for all other register accesses.
3275  * tg3 devices are BE devices, so on a BE machine, the data
3276  * returned will be exactly as it is seen in NVRAM.  On a LE
3277  * machine, the 32-bit value will be byteswapped.
3278  */
3279 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3280 {
3281 	int ret;
3282 
3283 	if (!tg3_flag(tp, NVRAM))
3284 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3285 
3286 	offset = tg3_nvram_phys_addr(tp, offset);
3287 
3288 	if (offset > NVRAM_ADDR_MSK)
3289 		return -EINVAL;
3290 
3291 	ret = tg3_nvram_lock(tp);
3292 	if (ret)
3293 		return ret;
3294 
3295 	tg3_enable_nvram_access(tp);
3296 
3297 	tw32(NVRAM_ADDR, offset);
3298 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3299 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3300 
3301 	if (ret == 0)
3302 		*val = tr32(NVRAM_RDDATA);
3303 
3304 	tg3_disable_nvram_access(tp);
3305 
3306 	tg3_nvram_unlock(tp);
3307 
3308 	return ret;
3309 }
3310 
3311 /* Ensures NVRAM data is in bytestream format. */
3312 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3313 {
3314 	u32 v;
3315 	int res = tg3_nvram_read(tp, offset, &v);
3316 	if (!res)
3317 		*val = cpu_to_be32(v);
3318 	return res;
3319 }
3320 
3321 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3322 				    u32 offset, u32 len, u8 *buf)
3323 {
3324 	int i, j, rc = 0;
3325 	u32 val;
3326 
3327 	for (i = 0; i < len; i += 4) {
3328 		u32 addr;
3329 		__be32 data;
3330 
3331 		addr = offset + i;
3332 
3333 		memcpy(&data, buf + i, 4);
3334 
3335 		/*
3336 		 * The SEEPROM interface expects the data to always be opposite
3337 		 * the native endian format.  We accomplish this by reversing
3338 		 * all the operations that would have been performed on the
3339 		 * data from a call to tg3_nvram_read_be32().
3340 		 */
3341 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3342 
3343 		val = tr32(GRC_EEPROM_ADDR);
3344 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3345 
3346 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3347 			EEPROM_ADDR_READ);
3348 		tw32(GRC_EEPROM_ADDR, val |
3349 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3350 			(addr & EEPROM_ADDR_ADDR_MASK) |
3351 			EEPROM_ADDR_START |
3352 			EEPROM_ADDR_WRITE);
3353 
3354 		for (j = 0; j < 1000; j++) {
3355 			val = tr32(GRC_EEPROM_ADDR);
3356 
3357 			if (val & EEPROM_ADDR_COMPLETE)
3358 				break;
3359 			msleep(1);
3360 		}
3361 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3362 			rc = -EBUSY;
3363 			break;
3364 		}
3365 	}
3366 
3367 	return rc;
3368 }
3369 
3370 /* offset and length are dword aligned */
3371 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3372 		u8 *buf)
3373 {
3374 	int ret = 0;
3375 	u32 pagesize = tp->nvram_pagesize;
3376 	u32 pagemask = pagesize - 1;
3377 	u32 nvram_cmd;
3378 	u8 *tmp;
3379 
3380 	tmp = kmalloc(pagesize, GFP_KERNEL);
3381 	if (tmp == NULL)
3382 		return -ENOMEM;
3383 
3384 	while (len) {
3385 		int j;
3386 		u32 phy_addr, page_off, size;
3387 
3388 		phy_addr = offset & ~pagemask;
3389 
3390 		for (j = 0; j < pagesize; j += 4) {
3391 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3392 						  (__be32 *) (tmp + j));
3393 			if (ret)
3394 				break;
3395 		}
3396 		if (ret)
3397 			break;
3398 
3399 		page_off = offset & pagemask;
3400 		size = pagesize;
3401 		if (len < size)
3402 			size = len;
3403 
3404 		len -= size;
3405 
3406 		memcpy(tmp + page_off, buf, size);
3407 
3408 		offset = offset + (pagesize - page_off);
3409 
3410 		tg3_enable_nvram_access(tp);
3411 
3412 		/*
3413 		 * Before we can erase the flash page, we need
3414 		 * to issue a special "write enable" command.
3415 		 */
3416 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3417 
3418 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3419 			break;
3420 
3421 		/* Erase the target page */
3422 		tw32(NVRAM_ADDR, phy_addr);
3423 
3424 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3425 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3426 
3427 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3428 			break;
3429 
3430 		/* Issue another write enable to start the write. */
3431 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3432 
3433 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3434 			break;
3435 
3436 		for (j = 0; j < pagesize; j += 4) {
3437 			__be32 data;
3438 
3439 			data = *((__be32 *) (tmp + j));
3440 
3441 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3442 
3443 			tw32(NVRAM_ADDR, phy_addr + j);
3444 
3445 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3446 				NVRAM_CMD_WR;
3447 
3448 			if (j == 0)
3449 				nvram_cmd |= NVRAM_CMD_FIRST;
3450 			else if (j == (pagesize - 4))
3451 				nvram_cmd |= NVRAM_CMD_LAST;
3452 
3453 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3454 			if (ret)
3455 				break;
3456 		}
3457 		if (ret)
3458 			break;
3459 	}
3460 
3461 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3462 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3463 
3464 	kfree(tmp);
3465 
3466 	return ret;
3467 }
3468 
3469 /* offset and length are dword aligned */
3470 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3471 		u8 *buf)
3472 {
3473 	int i, ret = 0;
3474 
3475 	for (i = 0; i < len; i += 4, offset += 4) {
3476 		u32 page_off, phy_addr, nvram_cmd;
3477 		__be32 data;
3478 
3479 		memcpy(&data, buf + i, 4);
3480 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3481 
3482 		page_off = offset % tp->nvram_pagesize;
3483 
3484 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3485 
3486 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3487 
3488 		if (page_off == 0 || i == 0)
3489 			nvram_cmd |= NVRAM_CMD_FIRST;
3490 		if (page_off == (tp->nvram_pagesize - 4))
3491 			nvram_cmd |= NVRAM_CMD_LAST;
3492 
3493 		if (i == (len - 4))
3494 			nvram_cmd |= NVRAM_CMD_LAST;
3495 
3496 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3497 		    !tg3_flag(tp, FLASH) ||
3498 		    !tg3_flag(tp, 57765_PLUS))
3499 			tw32(NVRAM_ADDR, phy_addr);
3500 
3501 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3502 		    !tg3_flag(tp, 5755_PLUS) &&
3503 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3504 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3505 			u32 cmd;
3506 
3507 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3508 			ret = tg3_nvram_exec_cmd(tp, cmd);
3509 			if (ret)
3510 				break;
3511 		}
3512 		if (!tg3_flag(tp, FLASH)) {
3513 			/* We always do complete word writes to eeprom. */
3514 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3515 		}
3516 
3517 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3518 		if (ret)
3519 			break;
3520 	}
3521 	return ret;
3522 }
3523 
3524 /* offset and length are dword aligned */
3525 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3526 {
3527 	int ret;
3528 
3529 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3530 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3531 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3532 		udelay(40);
3533 	}
3534 
3535 	if (!tg3_flag(tp, NVRAM)) {
3536 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3537 	} else {
3538 		u32 grc_mode;
3539 
3540 		ret = tg3_nvram_lock(tp);
3541 		if (ret)
3542 			return ret;
3543 
3544 		tg3_enable_nvram_access(tp);
3545 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3546 			tw32(NVRAM_WRITE1, 0x406);
3547 
3548 		grc_mode = tr32(GRC_MODE);
3549 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3550 
3551 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3552 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3553 				buf);
3554 		} else {
3555 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3556 				buf);
3557 		}
3558 
3559 		grc_mode = tr32(GRC_MODE);
3560 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3561 
3562 		tg3_disable_nvram_access(tp);
3563 		tg3_nvram_unlock(tp);
3564 	}
3565 
3566 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3567 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3568 		udelay(40);
3569 	}
3570 
3571 	return ret;
3572 }
3573 
3574 #define RX_CPU_SCRATCH_BASE	0x30000
3575 #define RX_CPU_SCRATCH_SIZE	0x04000
3576 #define TX_CPU_SCRATCH_BASE	0x34000
3577 #define TX_CPU_SCRATCH_SIZE	0x04000
3578 
3579 /* tp->lock is held. */
3580 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3581 {
3582 	int i;
3583 	const int iters = 10000;
3584 
3585 	for (i = 0; i < iters; i++) {
3586 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3587 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3588 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3589 			break;
3590 		if (pci_channel_offline(tp->pdev))
3591 			return -EBUSY;
3592 	}
3593 
3594 	return (i == iters) ? -EBUSY : 0;
3595 }
3596 
3597 /* tp->lock is held. */
3598 static int tg3_rxcpu_pause(struct tg3 *tp)
3599 {
3600 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3601 
3602 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3603 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3604 	udelay(10);
3605 
3606 	return rc;
3607 }
3608 
3609 /* tp->lock is held. */
3610 static int tg3_txcpu_pause(struct tg3 *tp)
3611 {
3612 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3613 }
3614 
3615 /* tp->lock is held. */
3616 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3617 {
3618 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3619 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3620 }
3621 
3622 /* tp->lock is held. */
3623 static void tg3_rxcpu_resume(struct tg3 *tp)
3624 {
3625 	tg3_resume_cpu(tp, RX_CPU_BASE);
3626 }
3627 
3628 /* tp->lock is held. */
3629 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3630 {
3631 	int rc;
3632 
3633 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3634 
3635 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3636 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3637 
3638 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3639 		return 0;
3640 	}
3641 	if (cpu_base == RX_CPU_BASE) {
3642 		rc = tg3_rxcpu_pause(tp);
3643 	} else {
3644 		/*
3645 		 * There is only an Rx CPU for the 5750 derivative in the
3646 		 * BCM4785.
3647 		 */
3648 		if (tg3_flag(tp, IS_SSB_CORE))
3649 			return 0;
3650 
3651 		rc = tg3_txcpu_pause(tp);
3652 	}
3653 
3654 	if (rc) {
3655 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3656 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3657 		return -ENODEV;
3658 	}
3659 
3660 	/* Clear firmware's nvram arbitration. */
3661 	if (tg3_flag(tp, NVRAM))
3662 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3663 	return 0;
3664 }
3665 
3666 static int tg3_fw_data_len(struct tg3 *tp,
3667 			   const struct tg3_firmware_hdr *fw_hdr)
3668 {
3669 	int fw_len;
3670 
3671 	/* Non fragmented firmware have one firmware header followed by a
3672 	 * contiguous chunk of data to be written. The length field in that
3673 	 * header is not the length of data to be written but the complete
3674 	 * length of the bss. The data length is determined based on
3675 	 * tp->fw->size minus headers.
3676 	 *
3677 	 * Fragmented firmware have a main header followed by multiple
3678 	 * fragments. Each fragment is identical to non fragmented firmware
3679 	 * with a firmware header followed by a contiguous chunk of data. In
3680 	 * the main header, the length field is unused and set to 0xffffffff.
3681 	 * In each fragment header the length is the entire size of that
3682 	 * fragment i.e. fragment data + header length. Data length is
3683 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3684 	 */
3685 	if (tp->fw_len == 0xffffffff)
3686 		fw_len = be32_to_cpu(fw_hdr->len);
3687 	else
3688 		fw_len = tp->fw->size;
3689 
3690 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3691 }
3692 
3693 /* tp->lock is held. */
3694 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3695 				 u32 cpu_scratch_base, int cpu_scratch_size,
3696 				 const struct tg3_firmware_hdr *fw_hdr)
3697 {
3698 	int err, i;
3699 	void (*write_op)(struct tg3 *, u32, u32);
3700 	int total_len = tp->fw->size;
3701 
3702 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3703 		netdev_err(tp->dev,
3704 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3705 			   __func__);
3706 		return -EINVAL;
3707 	}
3708 
3709 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3710 		write_op = tg3_write_mem;
3711 	else
3712 		write_op = tg3_write_indirect_reg32;
3713 
3714 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3715 		/* It is possible that bootcode is still loading at this point.
3716 		 * Get the nvram lock first before halting the cpu.
3717 		 */
3718 		int lock_err = tg3_nvram_lock(tp);
3719 		err = tg3_halt_cpu(tp, cpu_base);
3720 		if (!lock_err)
3721 			tg3_nvram_unlock(tp);
3722 		if (err)
3723 			goto out;
3724 
3725 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3726 			write_op(tp, cpu_scratch_base + i, 0);
3727 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3728 		tw32(cpu_base + CPU_MODE,
3729 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3730 	} else {
3731 		/* Subtract additional main header for fragmented firmware and
3732 		 * advance to the first fragment
3733 		 */
3734 		total_len -= TG3_FW_HDR_LEN;
3735 		fw_hdr++;
3736 	}
3737 
3738 	do {
3739 		u32 *fw_data = (u32 *)(fw_hdr + 1);
3740 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3741 			write_op(tp, cpu_scratch_base +
3742 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3743 				     (i * sizeof(u32)),
3744 				 be32_to_cpu(fw_data[i]));
3745 
3746 		total_len -= be32_to_cpu(fw_hdr->len);
3747 
3748 		/* Advance to next fragment */
3749 		fw_hdr = (struct tg3_firmware_hdr *)
3750 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3751 	} while (total_len > 0);
3752 
3753 	err = 0;
3754 
3755 out:
3756 	return err;
3757 }
3758 
3759 /* tp->lock is held. */
3760 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3761 {
3762 	int i;
3763 	const int iters = 5;
3764 
3765 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3766 	tw32_f(cpu_base + CPU_PC, pc);
3767 
3768 	for (i = 0; i < iters; i++) {
3769 		if (tr32(cpu_base + CPU_PC) == pc)
3770 			break;
3771 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3772 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3773 		tw32_f(cpu_base + CPU_PC, pc);
3774 		udelay(1000);
3775 	}
3776 
3777 	return (i == iters) ? -EBUSY : 0;
3778 }
3779 
3780 /* tp->lock is held. */
3781 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3782 {
3783 	const struct tg3_firmware_hdr *fw_hdr;
3784 	int err;
3785 
3786 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3787 
3788 	/* Firmware blob starts with version numbers, followed by
3789 	   start address and length. We are setting complete length.
3790 	   length = end_address_of_bss - start_address_of_text.
3791 	   Remainder is the blob to be loaded contiguously
3792 	   from start address. */
3793 
3794 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3795 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3796 				    fw_hdr);
3797 	if (err)
3798 		return err;
3799 
3800 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3801 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3802 				    fw_hdr);
3803 	if (err)
3804 		return err;
3805 
3806 	/* Now startup only the RX cpu. */
3807 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3808 				       be32_to_cpu(fw_hdr->base_addr));
3809 	if (err) {
3810 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3811 			   "should be %08x\n", __func__,
3812 			   tr32(RX_CPU_BASE + CPU_PC),
3813 				be32_to_cpu(fw_hdr->base_addr));
3814 		return -ENODEV;
3815 	}
3816 
3817 	tg3_rxcpu_resume(tp);
3818 
3819 	return 0;
3820 }
3821 
3822 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3823 {
3824 	const int iters = 1000;
3825 	int i;
3826 	u32 val;
3827 
3828 	/* Wait for boot code to complete initialization and enter service
3829 	 * loop. It is then safe to download service patches
3830 	 */
3831 	for (i = 0; i < iters; i++) {
3832 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3833 			break;
3834 
3835 		udelay(10);
3836 	}
3837 
3838 	if (i == iters) {
3839 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3840 		return -EBUSY;
3841 	}
3842 
3843 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3844 	if (val & 0xff) {
3845 		netdev_warn(tp->dev,
3846 			    "Other patches exist. Not downloading EEE patch\n");
3847 		return -EEXIST;
3848 	}
3849 
3850 	return 0;
3851 }
3852 
3853 /* tp->lock is held. */
3854 static void tg3_load_57766_firmware(struct tg3 *tp)
3855 {
3856 	struct tg3_firmware_hdr *fw_hdr;
3857 
3858 	if (!tg3_flag(tp, NO_NVRAM))
3859 		return;
3860 
3861 	if (tg3_validate_rxcpu_state(tp))
3862 		return;
3863 
3864 	if (!tp->fw)
3865 		return;
3866 
3867 	/* This firmware blob has a different format than older firmware
3868 	 * releases as given below. The main difference is we have fragmented
3869 	 * data to be written to non-contiguous locations.
3870 	 *
3871 	 * In the beginning we have a firmware header identical to other
3872 	 * firmware which consists of version, base addr and length. The length
3873 	 * here is unused and set to 0xffffffff.
3874 	 *
3875 	 * This is followed by a series of firmware fragments which are
3876 	 * individually identical to previous firmware. i.e. they have the
3877 	 * firmware header and followed by data for that fragment. The version
3878 	 * field of the individual fragment header is unused.
3879 	 */
3880 
3881 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3882 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3883 		return;
3884 
3885 	if (tg3_rxcpu_pause(tp))
3886 		return;
3887 
3888 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3889 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3890 
3891 	tg3_rxcpu_resume(tp);
3892 }
3893 
3894 /* tp->lock is held. */
3895 static int tg3_load_tso_firmware(struct tg3 *tp)
3896 {
3897 	const struct tg3_firmware_hdr *fw_hdr;
3898 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3899 	int err;
3900 
3901 	if (!tg3_flag(tp, FW_TSO))
3902 		return 0;
3903 
3904 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3905 
3906 	/* Firmware blob starts with version numbers, followed by
3907 	   start address and length. We are setting complete length.
3908 	   length = end_address_of_bss - start_address_of_text.
3909 	   Remainder is the blob to be loaded contiguously
3910 	   from start address. */
3911 
3912 	cpu_scratch_size = tp->fw_len;
3913 
3914 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3915 		cpu_base = RX_CPU_BASE;
3916 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3917 	} else {
3918 		cpu_base = TX_CPU_BASE;
3919 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3920 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3921 	}
3922 
3923 	err = tg3_load_firmware_cpu(tp, cpu_base,
3924 				    cpu_scratch_base, cpu_scratch_size,
3925 				    fw_hdr);
3926 	if (err)
3927 		return err;
3928 
3929 	/* Now startup the cpu. */
3930 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3931 				       be32_to_cpu(fw_hdr->base_addr));
3932 	if (err) {
3933 		netdev_err(tp->dev,
3934 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3935 			   __func__, tr32(cpu_base + CPU_PC),
3936 			   be32_to_cpu(fw_hdr->base_addr));
3937 		return -ENODEV;
3938 	}
3939 
3940 	tg3_resume_cpu(tp, cpu_base);
3941 	return 0;
3942 }
3943 
3944 /* tp->lock is held. */
3945 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3946 				   int index)
3947 {
3948 	u32 addr_high, addr_low;
3949 
3950 	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3951 	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3952 		    (mac_addr[4] <<  8) | mac_addr[5]);
3953 
3954 	if (index < 4) {
3955 		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3956 		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3957 	} else {
3958 		index -= 4;
3959 		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3960 		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3961 	}
3962 }
3963 
3964 /* tp->lock is held. */
3965 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3966 {
3967 	u32 addr_high;
3968 	int i;
3969 
3970 	for (i = 0; i < 4; i++) {
3971 		if (i == 1 && skip_mac_1)
3972 			continue;
3973 		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3974 	}
3975 
3976 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3977 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3978 		for (i = 4; i < 16; i++)
3979 			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3980 	}
3981 
3982 	addr_high = (tp->dev->dev_addr[0] +
3983 		     tp->dev->dev_addr[1] +
3984 		     tp->dev->dev_addr[2] +
3985 		     tp->dev->dev_addr[3] +
3986 		     tp->dev->dev_addr[4] +
3987 		     tp->dev->dev_addr[5]) &
3988 		TX_BACKOFF_SEED_MASK;
3989 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3990 }
3991 
3992 static void tg3_enable_register_access(struct tg3 *tp)
3993 {
3994 	/*
3995 	 * Make sure register accesses (indirect or otherwise) will function
3996 	 * correctly.
3997 	 */
3998 	pci_write_config_dword(tp->pdev,
3999 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4000 }
4001 
4002 static int tg3_power_up(struct tg3 *tp)
4003 {
4004 	int err;
4005 
4006 	tg3_enable_register_access(tp);
4007 
4008 	err = pci_set_power_state(tp->pdev, PCI_D0);
4009 	if (!err) {
4010 		/* Switch out of Vaux if it is a NIC */
4011 		tg3_pwrsrc_switch_to_vmain(tp);
4012 	} else {
4013 		netdev_err(tp->dev, "Transition to D0 failed\n");
4014 	}
4015 
4016 	return err;
4017 }
4018 
4019 static int tg3_setup_phy(struct tg3 *, bool);
4020 
4021 static int tg3_power_down_prepare(struct tg3 *tp)
4022 {
4023 	u32 misc_host_ctrl;
4024 	bool device_should_wake, do_low_power;
4025 
4026 	tg3_enable_register_access(tp);
4027 
4028 	/* Restore the CLKREQ setting. */
4029 	if (tg3_flag(tp, CLKREQ_BUG))
4030 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4031 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4032 
4033 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4034 	tw32(TG3PCI_MISC_HOST_CTRL,
4035 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4036 
4037 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4038 			     tg3_flag(tp, WOL_ENABLE);
4039 
4040 	if (tg3_flag(tp, USE_PHYLIB)) {
4041 		do_low_power = false;
4042 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4043 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4044 			__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4045 			struct phy_device *phydev;
4046 			u32 phyid;
4047 
4048 			phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4049 
4050 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4051 
4052 			tp->link_config.speed = phydev->speed;
4053 			tp->link_config.duplex = phydev->duplex;
4054 			tp->link_config.autoneg = phydev->autoneg;
4055 			ethtool_convert_link_mode_to_legacy_u32(
4056 				&tp->link_config.advertising,
4057 				phydev->advertising);
4058 
4059 			linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4060 			linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4061 					 advertising);
4062 			linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4063 					 advertising);
4064 			linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4065 					 advertising);
4066 
4067 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4068 				if (tg3_flag(tp, WOL_SPEED_100MB)) {
4069 					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4070 							 advertising);
4071 					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4072 							 advertising);
4073 					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4074 							 advertising);
4075 				} else {
4076 					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4077 							 advertising);
4078 				}
4079 			}
4080 
4081 			linkmode_copy(phydev->advertising, advertising);
4082 			phy_start_aneg(phydev);
4083 
4084 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4085 			if (phyid != PHY_ID_BCMAC131) {
4086 				phyid &= PHY_BCM_OUI_MASK;
4087 				if (phyid == PHY_BCM_OUI_1 ||
4088 				    phyid == PHY_BCM_OUI_2 ||
4089 				    phyid == PHY_BCM_OUI_3)
4090 					do_low_power = true;
4091 			}
4092 		}
4093 	} else {
4094 		do_low_power = true;
4095 
4096 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4097 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4098 
4099 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4100 			tg3_setup_phy(tp, false);
4101 	}
4102 
4103 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4104 		u32 val;
4105 
4106 		val = tr32(GRC_VCPU_EXT_CTRL);
4107 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4108 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4109 		int i;
4110 		u32 val;
4111 
4112 		for (i = 0; i < 200; i++) {
4113 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4114 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4115 				break;
4116 			msleep(1);
4117 		}
4118 	}
4119 	if (tg3_flag(tp, WOL_CAP))
4120 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4121 						     WOL_DRV_STATE_SHUTDOWN |
4122 						     WOL_DRV_WOL |
4123 						     WOL_SET_MAGIC_PKT);
4124 
4125 	if (device_should_wake) {
4126 		u32 mac_mode;
4127 
4128 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4129 			if (do_low_power &&
4130 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4131 				tg3_phy_auxctl_write(tp,
4132 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4133 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4134 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4135 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4136 				udelay(40);
4137 			}
4138 
4139 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4140 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4141 			else if (tp->phy_flags &
4142 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4143 				if (tp->link_config.active_speed == SPEED_1000)
4144 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4145 				else
4146 					mac_mode = MAC_MODE_PORT_MODE_MII;
4147 			} else
4148 				mac_mode = MAC_MODE_PORT_MODE_MII;
4149 
4150 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4151 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4152 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4153 					     SPEED_100 : SPEED_10;
4154 				if (tg3_5700_link_polarity(tp, speed))
4155 					mac_mode |= MAC_MODE_LINK_POLARITY;
4156 				else
4157 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4158 			}
4159 		} else {
4160 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4161 		}
4162 
4163 		if (!tg3_flag(tp, 5750_PLUS))
4164 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4165 
4166 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4167 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4168 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4169 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4170 
4171 		if (tg3_flag(tp, ENABLE_APE))
4172 			mac_mode |= MAC_MODE_APE_TX_EN |
4173 				    MAC_MODE_APE_RX_EN |
4174 				    MAC_MODE_TDE_ENABLE;
4175 
4176 		tw32_f(MAC_MODE, mac_mode);
4177 		udelay(100);
4178 
4179 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4180 		udelay(10);
4181 	}
4182 
4183 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4184 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4185 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4186 		u32 base_val;
4187 
4188 		base_val = tp->pci_clock_ctrl;
4189 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4190 			     CLOCK_CTRL_TXCLK_DISABLE);
4191 
4192 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4193 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4194 	} else if (tg3_flag(tp, 5780_CLASS) ||
4195 		   tg3_flag(tp, CPMU_PRESENT) ||
4196 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4197 		/* do nothing */
4198 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4199 		u32 newbits1, newbits2;
4200 
4201 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4202 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4203 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4204 				    CLOCK_CTRL_TXCLK_DISABLE |
4205 				    CLOCK_CTRL_ALTCLK);
4206 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4207 		} else if (tg3_flag(tp, 5705_PLUS)) {
4208 			newbits1 = CLOCK_CTRL_625_CORE;
4209 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4210 		} else {
4211 			newbits1 = CLOCK_CTRL_ALTCLK;
4212 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4213 		}
4214 
4215 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4216 			    40);
4217 
4218 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4219 			    40);
4220 
4221 		if (!tg3_flag(tp, 5705_PLUS)) {
4222 			u32 newbits3;
4223 
4224 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4225 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4226 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4227 					    CLOCK_CTRL_TXCLK_DISABLE |
4228 					    CLOCK_CTRL_44MHZ_CORE);
4229 			} else {
4230 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4231 			}
4232 
4233 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4234 				    tp->pci_clock_ctrl | newbits3, 40);
4235 		}
4236 	}
4237 
4238 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4239 		tg3_power_down_phy(tp, do_low_power);
4240 
4241 	tg3_frob_aux_power(tp, true);
4242 
4243 	/* Workaround for unstable PLL clock */
4244 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4245 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4246 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4247 		u32 val = tr32(0x7d00);
4248 
4249 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4250 		tw32(0x7d00, val);
4251 		if (!tg3_flag(tp, ENABLE_ASF)) {
4252 			int err;
4253 
4254 			err = tg3_nvram_lock(tp);
4255 			tg3_halt_cpu(tp, RX_CPU_BASE);
4256 			if (!err)
4257 				tg3_nvram_unlock(tp);
4258 		}
4259 	}
4260 
4261 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4262 
4263 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4264 
4265 	return 0;
4266 }
4267 
4268 static void tg3_power_down(struct tg3 *tp)
4269 {
4270 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4271 	pci_set_power_state(tp->pdev, PCI_D3hot);
4272 }
4273 
4274 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4275 {
4276 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4277 	case MII_TG3_AUX_STAT_10HALF:
4278 		*speed = SPEED_10;
4279 		*duplex = DUPLEX_HALF;
4280 		break;
4281 
4282 	case MII_TG3_AUX_STAT_10FULL:
4283 		*speed = SPEED_10;
4284 		*duplex = DUPLEX_FULL;
4285 		break;
4286 
4287 	case MII_TG3_AUX_STAT_100HALF:
4288 		*speed = SPEED_100;
4289 		*duplex = DUPLEX_HALF;
4290 		break;
4291 
4292 	case MII_TG3_AUX_STAT_100FULL:
4293 		*speed = SPEED_100;
4294 		*duplex = DUPLEX_FULL;
4295 		break;
4296 
4297 	case MII_TG3_AUX_STAT_1000HALF:
4298 		*speed = SPEED_1000;
4299 		*duplex = DUPLEX_HALF;
4300 		break;
4301 
4302 	case MII_TG3_AUX_STAT_1000FULL:
4303 		*speed = SPEED_1000;
4304 		*duplex = DUPLEX_FULL;
4305 		break;
4306 
4307 	default:
4308 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4309 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4310 				 SPEED_10;
4311 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4312 				  DUPLEX_HALF;
4313 			break;
4314 		}
4315 		*speed = SPEED_UNKNOWN;
4316 		*duplex = DUPLEX_UNKNOWN;
4317 		break;
4318 	}
4319 }
4320 
4321 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4322 {
4323 	int err = 0;
4324 	u32 val, new_adv;
4325 
4326 	new_adv = ADVERTISE_CSMA;
4327 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4328 	new_adv |= mii_advertise_flowctrl(flowctrl);
4329 
4330 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4331 	if (err)
4332 		goto done;
4333 
4334 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4335 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4336 
4337 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4338 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4339 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4340 
4341 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4342 		if (err)
4343 			goto done;
4344 	}
4345 
4346 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4347 		goto done;
4348 
4349 	tw32(TG3_CPMU_EEE_MODE,
4350 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4351 
4352 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4353 	if (!err) {
4354 		u32 err2;
4355 
4356 		val = 0;
4357 		/* Advertise 100-BaseTX EEE ability */
4358 		if (advertise & ADVERTISED_100baseT_Full)
4359 			val |= MDIO_AN_EEE_ADV_100TX;
4360 		/* Advertise 1000-BaseT EEE ability */
4361 		if (advertise & ADVERTISED_1000baseT_Full)
4362 			val |= MDIO_AN_EEE_ADV_1000T;
4363 
4364 		if (!tp->eee.eee_enabled) {
4365 			val = 0;
4366 			tp->eee.advertised = 0;
4367 		} else {
4368 			tp->eee.advertised = advertise &
4369 					     (ADVERTISED_100baseT_Full |
4370 					      ADVERTISED_1000baseT_Full);
4371 		}
4372 
4373 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4374 		if (err)
4375 			val = 0;
4376 
4377 		switch (tg3_asic_rev(tp)) {
4378 		case ASIC_REV_5717:
4379 		case ASIC_REV_57765:
4380 		case ASIC_REV_57766:
4381 		case ASIC_REV_5719:
4382 			/* If we advertised any eee advertisements above... */
4383 			if (val)
4384 				val = MII_TG3_DSP_TAP26_ALNOKO |
4385 				      MII_TG3_DSP_TAP26_RMRXSTO |
4386 				      MII_TG3_DSP_TAP26_OPCSINPT;
4387 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4388 			fallthrough;
4389 		case ASIC_REV_5720:
4390 		case ASIC_REV_5762:
4391 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4392 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4393 						 MII_TG3_DSP_CH34TP2_HIBW01);
4394 		}
4395 
4396 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4397 		if (!err)
4398 			err = err2;
4399 	}
4400 
4401 done:
4402 	return err;
4403 }
4404 
4405 static void tg3_phy_copper_begin(struct tg3 *tp)
4406 {
4407 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4408 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4409 		u32 adv, fc;
4410 
4411 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4412 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4413 			adv = ADVERTISED_10baseT_Half |
4414 			      ADVERTISED_10baseT_Full;
4415 			if (tg3_flag(tp, WOL_SPEED_100MB))
4416 				adv |= ADVERTISED_100baseT_Half |
4417 				       ADVERTISED_100baseT_Full;
4418 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4419 				if (!(tp->phy_flags &
4420 				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
4421 					adv |= ADVERTISED_1000baseT_Half;
4422 				adv |= ADVERTISED_1000baseT_Full;
4423 			}
4424 
4425 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4426 		} else {
4427 			adv = tp->link_config.advertising;
4428 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4429 				adv &= ~(ADVERTISED_1000baseT_Half |
4430 					 ADVERTISED_1000baseT_Full);
4431 
4432 			fc = tp->link_config.flowctrl;
4433 		}
4434 
4435 		tg3_phy_autoneg_cfg(tp, adv, fc);
4436 
4437 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4438 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4439 			/* Normally during power down we want to autonegotiate
4440 			 * the lowest possible speed for WOL. However, to avoid
4441 			 * link flap, we leave it untouched.
4442 			 */
4443 			return;
4444 		}
4445 
4446 		tg3_writephy(tp, MII_BMCR,
4447 			     BMCR_ANENABLE | BMCR_ANRESTART);
4448 	} else {
4449 		int i;
4450 		u32 bmcr, orig_bmcr;
4451 
4452 		tp->link_config.active_speed = tp->link_config.speed;
4453 		tp->link_config.active_duplex = tp->link_config.duplex;
4454 
4455 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4456 			/* With autoneg disabled, 5715 only links up when the
4457 			 * advertisement register has the configured speed
4458 			 * enabled.
4459 			 */
4460 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4461 		}
4462 
4463 		bmcr = 0;
4464 		switch (tp->link_config.speed) {
4465 		default:
4466 		case SPEED_10:
4467 			break;
4468 
4469 		case SPEED_100:
4470 			bmcr |= BMCR_SPEED100;
4471 			break;
4472 
4473 		case SPEED_1000:
4474 			bmcr |= BMCR_SPEED1000;
4475 			break;
4476 		}
4477 
4478 		if (tp->link_config.duplex == DUPLEX_FULL)
4479 			bmcr |= BMCR_FULLDPLX;
4480 
4481 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4482 		    (bmcr != orig_bmcr)) {
4483 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4484 			for (i = 0; i < 1500; i++) {
4485 				u32 tmp;
4486 
4487 				udelay(10);
4488 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4489 				    tg3_readphy(tp, MII_BMSR, &tmp))
4490 					continue;
4491 				if (!(tmp & BMSR_LSTATUS)) {
4492 					udelay(40);
4493 					break;
4494 				}
4495 			}
4496 			tg3_writephy(tp, MII_BMCR, bmcr);
4497 			udelay(40);
4498 		}
4499 	}
4500 }
4501 
4502 static int tg3_phy_pull_config(struct tg3 *tp)
4503 {
4504 	int err;
4505 	u32 val;
4506 
4507 	err = tg3_readphy(tp, MII_BMCR, &val);
4508 	if (err)
4509 		goto done;
4510 
4511 	if (!(val & BMCR_ANENABLE)) {
4512 		tp->link_config.autoneg = AUTONEG_DISABLE;
4513 		tp->link_config.advertising = 0;
4514 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4515 
4516 		err = -EIO;
4517 
4518 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4519 		case 0:
4520 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4521 				goto done;
4522 
4523 			tp->link_config.speed = SPEED_10;
4524 			break;
4525 		case BMCR_SPEED100:
4526 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4527 				goto done;
4528 
4529 			tp->link_config.speed = SPEED_100;
4530 			break;
4531 		case BMCR_SPEED1000:
4532 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4533 				tp->link_config.speed = SPEED_1000;
4534 				break;
4535 			}
4536 			fallthrough;
4537 		default:
4538 			goto done;
4539 		}
4540 
4541 		if (val & BMCR_FULLDPLX)
4542 			tp->link_config.duplex = DUPLEX_FULL;
4543 		else
4544 			tp->link_config.duplex = DUPLEX_HALF;
4545 
4546 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4547 
4548 		err = 0;
4549 		goto done;
4550 	}
4551 
4552 	tp->link_config.autoneg = AUTONEG_ENABLE;
4553 	tp->link_config.advertising = ADVERTISED_Autoneg;
4554 	tg3_flag_set(tp, PAUSE_AUTONEG);
4555 
4556 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4557 		u32 adv;
4558 
4559 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4560 		if (err)
4561 			goto done;
4562 
4563 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4564 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4565 
4566 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4567 	} else {
4568 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4569 	}
4570 
4571 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4572 		u32 adv;
4573 
4574 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4575 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4576 			if (err)
4577 				goto done;
4578 
4579 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4580 		} else {
4581 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4582 			if (err)
4583 				goto done;
4584 
4585 			adv = tg3_decode_flowctrl_1000X(val);
4586 			tp->link_config.flowctrl = adv;
4587 
4588 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4589 			adv = mii_adv_to_ethtool_adv_x(val);
4590 		}
4591 
4592 		tp->link_config.advertising |= adv;
4593 	}
4594 
4595 done:
4596 	return err;
4597 }
4598 
4599 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4600 {
4601 	int err;
4602 
4603 	/* Turn off tap power management. */
4604 	/* Set Extended packet length bit */
4605 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4606 
4607 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4608 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4609 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4610 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4611 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4612 
4613 	udelay(40);
4614 
4615 	return err;
4616 }
4617 
4618 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4619 {
4620 	struct ethtool_eee eee;
4621 
4622 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4623 		return true;
4624 
4625 	tg3_eee_pull_config(tp, &eee);
4626 
4627 	if (tp->eee.eee_enabled) {
4628 		if (tp->eee.advertised != eee.advertised ||
4629 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4630 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4631 			return false;
4632 	} else {
4633 		/* EEE is disabled but we're advertising */
4634 		if (eee.advertised)
4635 			return false;
4636 	}
4637 
4638 	return true;
4639 }
4640 
4641 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4642 {
4643 	u32 advmsk, tgtadv, advertising;
4644 
4645 	advertising = tp->link_config.advertising;
4646 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4647 
4648 	advmsk = ADVERTISE_ALL;
4649 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4650 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4651 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4652 	}
4653 
4654 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4655 		return false;
4656 
4657 	if ((*lcladv & advmsk) != tgtadv)
4658 		return false;
4659 
4660 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4661 		u32 tg3_ctrl;
4662 
4663 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4664 
4665 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4666 			return false;
4667 
4668 		if (tgtadv &&
4669 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4670 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4671 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4672 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4673 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4674 		} else {
4675 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4676 		}
4677 
4678 		if (tg3_ctrl != tgtadv)
4679 			return false;
4680 	}
4681 
4682 	return true;
4683 }
4684 
4685 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4686 {
4687 	u32 lpeth = 0;
4688 
4689 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4690 		u32 val;
4691 
4692 		if (tg3_readphy(tp, MII_STAT1000, &val))
4693 			return false;
4694 
4695 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4696 	}
4697 
4698 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4699 		return false;
4700 
4701 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4702 	tp->link_config.rmt_adv = lpeth;
4703 
4704 	return true;
4705 }
4706 
4707 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4708 {
4709 	if (curr_link_up != tp->link_up) {
4710 		if (curr_link_up) {
4711 			netif_carrier_on(tp->dev);
4712 		} else {
4713 			netif_carrier_off(tp->dev);
4714 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4715 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4716 		}
4717 
4718 		tg3_link_report(tp);
4719 		return true;
4720 	}
4721 
4722 	return false;
4723 }
4724 
4725 static void tg3_clear_mac_status(struct tg3 *tp)
4726 {
4727 	tw32(MAC_EVENT, 0);
4728 
4729 	tw32_f(MAC_STATUS,
4730 	       MAC_STATUS_SYNC_CHANGED |
4731 	       MAC_STATUS_CFG_CHANGED |
4732 	       MAC_STATUS_MI_COMPLETION |
4733 	       MAC_STATUS_LNKSTATE_CHANGED);
4734 	udelay(40);
4735 }
4736 
4737 static void tg3_setup_eee(struct tg3 *tp)
4738 {
4739 	u32 val;
4740 
4741 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4742 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4743 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4744 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4745 
4746 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4747 
4748 	tw32_f(TG3_CPMU_EEE_CTRL,
4749 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4750 
4751 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4752 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4753 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4754 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4755 
4756 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4757 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4758 
4759 	if (tg3_flag(tp, ENABLE_APE))
4760 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4761 
4762 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4763 
4764 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4765 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4766 	       (tp->eee.tx_lpi_timer & 0xffff));
4767 
4768 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4769 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4770 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4771 }
4772 
4773 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4774 {
4775 	bool current_link_up;
4776 	u32 bmsr, val;
4777 	u32 lcl_adv, rmt_adv;
4778 	u32 current_speed;
4779 	u8 current_duplex;
4780 	int i, err;
4781 
4782 	tg3_clear_mac_status(tp);
4783 
4784 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4785 		tw32_f(MAC_MI_MODE,
4786 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4787 		udelay(80);
4788 	}
4789 
4790 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4791 
4792 	/* Some third-party PHYs need to be reset on link going
4793 	 * down.
4794 	 */
4795 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4796 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4797 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4798 	    tp->link_up) {
4799 		tg3_readphy(tp, MII_BMSR, &bmsr);
4800 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4801 		    !(bmsr & BMSR_LSTATUS))
4802 			force_reset = true;
4803 	}
4804 	if (force_reset)
4805 		tg3_phy_reset(tp);
4806 
4807 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4808 		tg3_readphy(tp, MII_BMSR, &bmsr);
4809 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4810 		    !tg3_flag(tp, INIT_COMPLETE))
4811 			bmsr = 0;
4812 
4813 		if (!(bmsr & BMSR_LSTATUS)) {
4814 			err = tg3_init_5401phy_dsp(tp);
4815 			if (err)
4816 				return err;
4817 
4818 			tg3_readphy(tp, MII_BMSR, &bmsr);
4819 			for (i = 0; i < 1000; i++) {
4820 				udelay(10);
4821 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4822 				    (bmsr & BMSR_LSTATUS)) {
4823 					udelay(40);
4824 					break;
4825 				}
4826 			}
4827 
4828 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4829 			    TG3_PHY_REV_BCM5401_B0 &&
4830 			    !(bmsr & BMSR_LSTATUS) &&
4831 			    tp->link_config.active_speed == SPEED_1000) {
4832 				err = tg3_phy_reset(tp);
4833 				if (!err)
4834 					err = tg3_init_5401phy_dsp(tp);
4835 				if (err)
4836 					return err;
4837 			}
4838 		}
4839 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4840 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4841 		/* 5701 {A0,B0} CRC bug workaround */
4842 		tg3_writephy(tp, 0x15, 0x0a75);
4843 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4844 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4845 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4846 	}
4847 
4848 	/* Clear pending interrupts... */
4849 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4850 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4851 
4852 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4853 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4854 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4855 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4856 
4857 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4858 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4859 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4860 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4861 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4862 		else
4863 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4864 	}
4865 
4866 	current_link_up = false;
4867 	current_speed = SPEED_UNKNOWN;
4868 	current_duplex = DUPLEX_UNKNOWN;
4869 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4870 	tp->link_config.rmt_adv = 0;
4871 
4872 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4873 		err = tg3_phy_auxctl_read(tp,
4874 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4875 					  &val);
4876 		if (!err && !(val & (1 << 10))) {
4877 			tg3_phy_auxctl_write(tp,
4878 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4879 					     val | (1 << 10));
4880 			goto relink;
4881 		}
4882 	}
4883 
4884 	bmsr = 0;
4885 	for (i = 0; i < 100; i++) {
4886 		tg3_readphy(tp, MII_BMSR, &bmsr);
4887 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4888 		    (bmsr & BMSR_LSTATUS))
4889 			break;
4890 		udelay(40);
4891 	}
4892 
4893 	if (bmsr & BMSR_LSTATUS) {
4894 		u32 aux_stat, bmcr;
4895 
4896 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4897 		for (i = 0; i < 2000; i++) {
4898 			udelay(10);
4899 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4900 			    aux_stat)
4901 				break;
4902 		}
4903 
4904 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4905 					     &current_speed,
4906 					     &current_duplex);
4907 
4908 		bmcr = 0;
4909 		for (i = 0; i < 200; i++) {
4910 			tg3_readphy(tp, MII_BMCR, &bmcr);
4911 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4912 				continue;
4913 			if (bmcr && bmcr != 0x7fff)
4914 				break;
4915 			udelay(10);
4916 		}
4917 
4918 		lcl_adv = 0;
4919 		rmt_adv = 0;
4920 
4921 		tp->link_config.active_speed = current_speed;
4922 		tp->link_config.active_duplex = current_duplex;
4923 
4924 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4925 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4926 
4927 			if ((bmcr & BMCR_ANENABLE) &&
4928 			    eee_config_ok &&
4929 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4930 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4931 				current_link_up = true;
4932 
4933 			/* EEE settings changes take effect only after a phy
4934 			 * reset.  If we have skipped a reset due to Link Flap
4935 			 * Avoidance being enabled, do it now.
4936 			 */
4937 			if (!eee_config_ok &&
4938 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4939 			    !force_reset) {
4940 				tg3_setup_eee(tp);
4941 				tg3_phy_reset(tp);
4942 			}
4943 		} else {
4944 			if (!(bmcr & BMCR_ANENABLE) &&
4945 			    tp->link_config.speed == current_speed &&
4946 			    tp->link_config.duplex == current_duplex) {
4947 				current_link_up = true;
4948 			}
4949 		}
4950 
4951 		if (current_link_up &&
4952 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4953 			u32 reg, bit;
4954 
4955 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4956 				reg = MII_TG3_FET_GEN_STAT;
4957 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4958 			} else {
4959 				reg = MII_TG3_EXT_STAT;
4960 				bit = MII_TG3_EXT_STAT_MDIX;
4961 			}
4962 
4963 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4964 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4965 
4966 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4967 		}
4968 	}
4969 
4970 relink:
4971 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4972 		tg3_phy_copper_begin(tp);
4973 
4974 		if (tg3_flag(tp, ROBOSWITCH)) {
4975 			current_link_up = true;
4976 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4977 			current_speed = SPEED_1000;
4978 			current_duplex = DUPLEX_FULL;
4979 			tp->link_config.active_speed = current_speed;
4980 			tp->link_config.active_duplex = current_duplex;
4981 		}
4982 
4983 		tg3_readphy(tp, MII_BMSR, &bmsr);
4984 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4985 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4986 			current_link_up = true;
4987 	}
4988 
4989 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4990 	if (current_link_up) {
4991 		if (tp->link_config.active_speed == SPEED_100 ||
4992 		    tp->link_config.active_speed == SPEED_10)
4993 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4994 		else
4995 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4996 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4997 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4998 	else
4999 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5000 
5001 	/* In order for the 5750 core in BCM4785 chip to work properly
5002 	 * in RGMII mode, the Led Control Register must be set up.
5003 	 */
5004 	if (tg3_flag(tp, RGMII_MODE)) {
5005 		u32 led_ctrl = tr32(MAC_LED_CTRL);
5006 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5007 
5008 		if (tp->link_config.active_speed == SPEED_10)
5009 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5010 		else if (tp->link_config.active_speed == SPEED_100)
5011 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5012 				     LED_CTRL_100MBPS_ON);
5013 		else if (tp->link_config.active_speed == SPEED_1000)
5014 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5015 				     LED_CTRL_1000MBPS_ON);
5016 
5017 		tw32(MAC_LED_CTRL, led_ctrl);
5018 		udelay(40);
5019 	}
5020 
5021 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5022 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5023 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5024 
5025 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5026 		if (current_link_up &&
5027 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5028 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5029 		else
5030 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5031 	}
5032 
5033 	/* ??? Without this setting Netgear GA302T PHY does not
5034 	 * ??? send/receive packets...
5035 	 */
5036 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5037 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5038 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5039 		tw32_f(MAC_MI_MODE, tp->mi_mode);
5040 		udelay(80);
5041 	}
5042 
5043 	tw32_f(MAC_MODE, tp->mac_mode);
5044 	udelay(40);
5045 
5046 	tg3_phy_eee_adjust(tp, current_link_up);
5047 
5048 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5049 		/* Polled via timer. */
5050 		tw32_f(MAC_EVENT, 0);
5051 	} else {
5052 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5053 	}
5054 	udelay(40);
5055 
5056 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5057 	    current_link_up &&
5058 	    tp->link_config.active_speed == SPEED_1000 &&
5059 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5060 		udelay(120);
5061 		tw32_f(MAC_STATUS,
5062 		     (MAC_STATUS_SYNC_CHANGED |
5063 		      MAC_STATUS_CFG_CHANGED));
5064 		udelay(40);
5065 		tg3_write_mem(tp,
5066 			      NIC_SRAM_FIRMWARE_MBOX,
5067 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5068 	}
5069 
5070 	/* Prevent send BD corruption. */
5071 	if (tg3_flag(tp, CLKREQ_BUG)) {
5072 		if (tp->link_config.active_speed == SPEED_100 ||
5073 		    tp->link_config.active_speed == SPEED_10)
5074 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5075 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5076 		else
5077 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5078 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5079 	}
5080 
5081 	tg3_test_and_report_link_chg(tp, current_link_up);
5082 
5083 	return 0;
5084 }
5085 
5086 struct tg3_fiber_aneginfo {
5087 	int state;
5088 #define ANEG_STATE_UNKNOWN		0
5089 #define ANEG_STATE_AN_ENABLE		1
5090 #define ANEG_STATE_RESTART_INIT		2
5091 #define ANEG_STATE_RESTART		3
5092 #define ANEG_STATE_DISABLE_LINK_OK	4
5093 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5094 #define ANEG_STATE_ABILITY_DETECT	6
5095 #define ANEG_STATE_ACK_DETECT_INIT	7
5096 #define ANEG_STATE_ACK_DETECT		8
5097 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5098 #define ANEG_STATE_COMPLETE_ACK		10
5099 #define ANEG_STATE_IDLE_DETECT_INIT	11
5100 #define ANEG_STATE_IDLE_DETECT		12
5101 #define ANEG_STATE_LINK_OK		13
5102 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5103 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5104 
5105 	u32 flags;
5106 #define MR_AN_ENABLE		0x00000001
5107 #define MR_RESTART_AN		0x00000002
5108 #define MR_AN_COMPLETE		0x00000004
5109 #define MR_PAGE_RX		0x00000008
5110 #define MR_NP_LOADED		0x00000010
5111 #define MR_TOGGLE_TX		0x00000020
5112 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5113 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5114 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5115 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5116 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5117 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5118 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5119 #define MR_TOGGLE_RX		0x00002000
5120 #define MR_NP_RX		0x00004000
5121 
5122 #define MR_LINK_OK		0x80000000
5123 
5124 	unsigned long link_time, cur_time;
5125 
5126 	u32 ability_match_cfg;
5127 	int ability_match_count;
5128 
5129 	char ability_match, idle_match, ack_match;
5130 
5131 	u32 txconfig, rxconfig;
5132 #define ANEG_CFG_NP		0x00000080
5133 #define ANEG_CFG_ACK		0x00000040
5134 #define ANEG_CFG_RF2		0x00000020
5135 #define ANEG_CFG_RF1		0x00000010
5136 #define ANEG_CFG_PS2		0x00000001
5137 #define ANEG_CFG_PS1		0x00008000
5138 #define ANEG_CFG_HD		0x00004000
5139 #define ANEG_CFG_FD		0x00002000
5140 #define ANEG_CFG_INVAL		0x00001f06
5141 
5142 };
5143 #define ANEG_OK		0
5144 #define ANEG_DONE	1
5145 #define ANEG_TIMER_ENAB	2
5146 #define ANEG_FAILED	-1
5147 
5148 #define ANEG_STATE_SETTLE_TIME	10000
5149 
5150 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5151 				   struct tg3_fiber_aneginfo *ap)
5152 {
5153 	u16 flowctrl;
5154 	unsigned long delta;
5155 	u32 rx_cfg_reg;
5156 	int ret;
5157 
5158 	if (ap->state == ANEG_STATE_UNKNOWN) {
5159 		ap->rxconfig = 0;
5160 		ap->link_time = 0;
5161 		ap->cur_time = 0;
5162 		ap->ability_match_cfg = 0;
5163 		ap->ability_match_count = 0;
5164 		ap->ability_match = 0;
5165 		ap->idle_match = 0;
5166 		ap->ack_match = 0;
5167 	}
5168 	ap->cur_time++;
5169 
5170 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5171 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5172 
5173 		if (rx_cfg_reg != ap->ability_match_cfg) {
5174 			ap->ability_match_cfg = rx_cfg_reg;
5175 			ap->ability_match = 0;
5176 			ap->ability_match_count = 0;
5177 		} else {
5178 			if (++ap->ability_match_count > 1) {
5179 				ap->ability_match = 1;
5180 				ap->ability_match_cfg = rx_cfg_reg;
5181 			}
5182 		}
5183 		if (rx_cfg_reg & ANEG_CFG_ACK)
5184 			ap->ack_match = 1;
5185 		else
5186 			ap->ack_match = 0;
5187 
5188 		ap->idle_match = 0;
5189 	} else {
5190 		ap->idle_match = 1;
5191 		ap->ability_match_cfg = 0;
5192 		ap->ability_match_count = 0;
5193 		ap->ability_match = 0;
5194 		ap->ack_match = 0;
5195 
5196 		rx_cfg_reg = 0;
5197 	}
5198 
5199 	ap->rxconfig = rx_cfg_reg;
5200 	ret = ANEG_OK;
5201 
5202 	switch (ap->state) {
5203 	case ANEG_STATE_UNKNOWN:
5204 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5205 			ap->state = ANEG_STATE_AN_ENABLE;
5206 
5207 		fallthrough;
5208 	case ANEG_STATE_AN_ENABLE:
5209 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5210 		if (ap->flags & MR_AN_ENABLE) {
5211 			ap->link_time = 0;
5212 			ap->cur_time = 0;
5213 			ap->ability_match_cfg = 0;
5214 			ap->ability_match_count = 0;
5215 			ap->ability_match = 0;
5216 			ap->idle_match = 0;
5217 			ap->ack_match = 0;
5218 
5219 			ap->state = ANEG_STATE_RESTART_INIT;
5220 		} else {
5221 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5222 		}
5223 		break;
5224 
5225 	case ANEG_STATE_RESTART_INIT:
5226 		ap->link_time = ap->cur_time;
5227 		ap->flags &= ~(MR_NP_LOADED);
5228 		ap->txconfig = 0;
5229 		tw32(MAC_TX_AUTO_NEG, 0);
5230 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5231 		tw32_f(MAC_MODE, tp->mac_mode);
5232 		udelay(40);
5233 
5234 		ret = ANEG_TIMER_ENAB;
5235 		ap->state = ANEG_STATE_RESTART;
5236 
5237 		fallthrough;
5238 	case ANEG_STATE_RESTART:
5239 		delta = ap->cur_time - ap->link_time;
5240 		if (delta > ANEG_STATE_SETTLE_TIME)
5241 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5242 		else
5243 			ret = ANEG_TIMER_ENAB;
5244 		break;
5245 
5246 	case ANEG_STATE_DISABLE_LINK_OK:
5247 		ret = ANEG_DONE;
5248 		break;
5249 
5250 	case ANEG_STATE_ABILITY_DETECT_INIT:
5251 		ap->flags &= ~(MR_TOGGLE_TX);
5252 		ap->txconfig = ANEG_CFG_FD;
5253 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5254 		if (flowctrl & ADVERTISE_1000XPAUSE)
5255 			ap->txconfig |= ANEG_CFG_PS1;
5256 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5257 			ap->txconfig |= ANEG_CFG_PS2;
5258 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5259 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5260 		tw32_f(MAC_MODE, tp->mac_mode);
5261 		udelay(40);
5262 
5263 		ap->state = ANEG_STATE_ABILITY_DETECT;
5264 		break;
5265 
5266 	case ANEG_STATE_ABILITY_DETECT:
5267 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5268 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5269 		break;
5270 
5271 	case ANEG_STATE_ACK_DETECT_INIT:
5272 		ap->txconfig |= ANEG_CFG_ACK;
5273 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5274 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5275 		tw32_f(MAC_MODE, tp->mac_mode);
5276 		udelay(40);
5277 
5278 		ap->state = ANEG_STATE_ACK_DETECT;
5279 
5280 		fallthrough;
5281 	case ANEG_STATE_ACK_DETECT:
5282 		if (ap->ack_match != 0) {
5283 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5284 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5285 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5286 			} else {
5287 				ap->state = ANEG_STATE_AN_ENABLE;
5288 			}
5289 		} else if (ap->ability_match != 0 &&
5290 			   ap->rxconfig == 0) {
5291 			ap->state = ANEG_STATE_AN_ENABLE;
5292 		}
5293 		break;
5294 
5295 	case ANEG_STATE_COMPLETE_ACK_INIT:
5296 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5297 			ret = ANEG_FAILED;
5298 			break;
5299 		}
5300 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5301 			       MR_LP_ADV_HALF_DUPLEX |
5302 			       MR_LP_ADV_SYM_PAUSE |
5303 			       MR_LP_ADV_ASYM_PAUSE |
5304 			       MR_LP_ADV_REMOTE_FAULT1 |
5305 			       MR_LP_ADV_REMOTE_FAULT2 |
5306 			       MR_LP_ADV_NEXT_PAGE |
5307 			       MR_TOGGLE_RX |
5308 			       MR_NP_RX);
5309 		if (ap->rxconfig & ANEG_CFG_FD)
5310 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5311 		if (ap->rxconfig & ANEG_CFG_HD)
5312 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5313 		if (ap->rxconfig & ANEG_CFG_PS1)
5314 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5315 		if (ap->rxconfig & ANEG_CFG_PS2)
5316 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5317 		if (ap->rxconfig & ANEG_CFG_RF1)
5318 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5319 		if (ap->rxconfig & ANEG_CFG_RF2)
5320 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5321 		if (ap->rxconfig & ANEG_CFG_NP)
5322 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5323 
5324 		ap->link_time = ap->cur_time;
5325 
5326 		ap->flags ^= (MR_TOGGLE_TX);
5327 		if (ap->rxconfig & 0x0008)
5328 			ap->flags |= MR_TOGGLE_RX;
5329 		if (ap->rxconfig & ANEG_CFG_NP)
5330 			ap->flags |= MR_NP_RX;
5331 		ap->flags |= MR_PAGE_RX;
5332 
5333 		ap->state = ANEG_STATE_COMPLETE_ACK;
5334 		ret = ANEG_TIMER_ENAB;
5335 		break;
5336 
5337 	case ANEG_STATE_COMPLETE_ACK:
5338 		if (ap->ability_match != 0 &&
5339 		    ap->rxconfig == 0) {
5340 			ap->state = ANEG_STATE_AN_ENABLE;
5341 			break;
5342 		}
5343 		delta = ap->cur_time - ap->link_time;
5344 		if (delta > ANEG_STATE_SETTLE_TIME) {
5345 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5346 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5347 			} else {
5348 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5349 				    !(ap->flags & MR_NP_RX)) {
5350 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5351 				} else {
5352 					ret = ANEG_FAILED;
5353 				}
5354 			}
5355 		}
5356 		break;
5357 
5358 	case ANEG_STATE_IDLE_DETECT_INIT:
5359 		ap->link_time = ap->cur_time;
5360 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5361 		tw32_f(MAC_MODE, tp->mac_mode);
5362 		udelay(40);
5363 
5364 		ap->state = ANEG_STATE_IDLE_DETECT;
5365 		ret = ANEG_TIMER_ENAB;
5366 		break;
5367 
5368 	case ANEG_STATE_IDLE_DETECT:
5369 		if (ap->ability_match != 0 &&
5370 		    ap->rxconfig == 0) {
5371 			ap->state = ANEG_STATE_AN_ENABLE;
5372 			break;
5373 		}
5374 		delta = ap->cur_time - ap->link_time;
5375 		if (delta > ANEG_STATE_SETTLE_TIME) {
5376 			/* XXX another gem from the Broadcom driver :( */
5377 			ap->state = ANEG_STATE_LINK_OK;
5378 		}
5379 		break;
5380 
5381 	case ANEG_STATE_LINK_OK:
5382 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5383 		ret = ANEG_DONE;
5384 		break;
5385 
5386 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5387 		/* ??? unimplemented */
5388 		break;
5389 
5390 	case ANEG_STATE_NEXT_PAGE_WAIT:
5391 		/* ??? unimplemented */
5392 		break;
5393 
5394 	default:
5395 		ret = ANEG_FAILED;
5396 		break;
5397 	}
5398 
5399 	return ret;
5400 }
5401 
5402 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5403 {
5404 	int res = 0;
5405 	struct tg3_fiber_aneginfo aninfo;
5406 	int status = ANEG_FAILED;
5407 	unsigned int tick;
5408 	u32 tmp;
5409 
5410 	tw32_f(MAC_TX_AUTO_NEG, 0);
5411 
5412 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5413 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5414 	udelay(40);
5415 
5416 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5417 	udelay(40);
5418 
5419 	memset(&aninfo, 0, sizeof(aninfo));
5420 	aninfo.flags |= MR_AN_ENABLE;
5421 	aninfo.state = ANEG_STATE_UNKNOWN;
5422 	aninfo.cur_time = 0;
5423 	tick = 0;
5424 	while (++tick < 195000) {
5425 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5426 		if (status == ANEG_DONE || status == ANEG_FAILED)
5427 			break;
5428 
5429 		udelay(1);
5430 	}
5431 
5432 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5433 	tw32_f(MAC_MODE, tp->mac_mode);
5434 	udelay(40);
5435 
5436 	*txflags = aninfo.txconfig;
5437 	*rxflags = aninfo.flags;
5438 
5439 	if (status == ANEG_DONE &&
5440 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5441 			     MR_LP_ADV_FULL_DUPLEX)))
5442 		res = 1;
5443 
5444 	return res;
5445 }
5446 
5447 static void tg3_init_bcm8002(struct tg3 *tp)
5448 {
5449 	u32 mac_status = tr32(MAC_STATUS);
5450 	int i;
5451 
5452 	/* Reset when initting first time or we have a link. */
5453 	if (tg3_flag(tp, INIT_COMPLETE) &&
5454 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5455 		return;
5456 
5457 	/* Set PLL lock range. */
5458 	tg3_writephy(tp, 0x16, 0x8007);
5459 
5460 	/* SW reset */
5461 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5462 
5463 	/* Wait for reset to complete. */
5464 	/* XXX schedule_timeout() ... */
5465 	for (i = 0; i < 500; i++)
5466 		udelay(10);
5467 
5468 	/* Config mode; select PMA/Ch 1 regs. */
5469 	tg3_writephy(tp, 0x10, 0x8411);
5470 
5471 	/* Enable auto-lock and comdet, select txclk for tx. */
5472 	tg3_writephy(tp, 0x11, 0x0a10);
5473 
5474 	tg3_writephy(tp, 0x18, 0x00a0);
5475 	tg3_writephy(tp, 0x16, 0x41ff);
5476 
5477 	/* Assert and deassert POR. */
5478 	tg3_writephy(tp, 0x13, 0x0400);
5479 	udelay(40);
5480 	tg3_writephy(tp, 0x13, 0x0000);
5481 
5482 	tg3_writephy(tp, 0x11, 0x0a50);
5483 	udelay(40);
5484 	tg3_writephy(tp, 0x11, 0x0a10);
5485 
5486 	/* Wait for signal to stabilize */
5487 	/* XXX schedule_timeout() ... */
5488 	for (i = 0; i < 15000; i++)
5489 		udelay(10);
5490 
5491 	/* Deselect the channel register so we can read the PHYID
5492 	 * later.
5493 	 */
5494 	tg3_writephy(tp, 0x10, 0x8011);
5495 }
5496 
5497 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5498 {
5499 	u16 flowctrl;
5500 	bool current_link_up;
5501 	u32 sg_dig_ctrl, sg_dig_status;
5502 	u32 serdes_cfg, expected_sg_dig_ctrl;
5503 	int workaround, port_a;
5504 
5505 	serdes_cfg = 0;
5506 	workaround = 0;
5507 	port_a = 1;
5508 	current_link_up = false;
5509 
5510 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5511 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5512 		workaround = 1;
5513 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5514 			port_a = 0;
5515 
5516 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5517 		/* preserve bits 20-23 for voltage regulator */
5518 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5519 	}
5520 
5521 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5522 
5523 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5524 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5525 			if (workaround) {
5526 				u32 val = serdes_cfg;
5527 
5528 				if (port_a)
5529 					val |= 0xc010000;
5530 				else
5531 					val |= 0x4010000;
5532 				tw32_f(MAC_SERDES_CFG, val);
5533 			}
5534 
5535 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5536 		}
5537 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5538 			tg3_setup_flow_control(tp, 0, 0);
5539 			current_link_up = true;
5540 		}
5541 		goto out;
5542 	}
5543 
5544 	/* Want auto-negotiation.  */
5545 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5546 
5547 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5548 	if (flowctrl & ADVERTISE_1000XPAUSE)
5549 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5550 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5551 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5552 
5553 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5554 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5555 		    tp->serdes_counter &&
5556 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5557 				    MAC_STATUS_RCVD_CFG)) ==
5558 		     MAC_STATUS_PCS_SYNCED)) {
5559 			tp->serdes_counter--;
5560 			current_link_up = true;
5561 			goto out;
5562 		}
5563 restart_autoneg:
5564 		if (workaround)
5565 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5566 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5567 		udelay(5);
5568 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5569 
5570 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5571 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5572 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5573 				 MAC_STATUS_SIGNAL_DET)) {
5574 		sg_dig_status = tr32(SG_DIG_STATUS);
5575 		mac_status = tr32(MAC_STATUS);
5576 
5577 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5578 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5579 			u32 local_adv = 0, remote_adv = 0;
5580 
5581 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5582 				local_adv |= ADVERTISE_1000XPAUSE;
5583 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5584 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5585 
5586 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5587 				remote_adv |= LPA_1000XPAUSE;
5588 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5589 				remote_adv |= LPA_1000XPAUSE_ASYM;
5590 
5591 			tp->link_config.rmt_adv =
5592 					   mii_adv_to_ethtool_adv_x(remote_adv);
5593 
5594 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5595 			current_link_up = true;
5596 			tp->serdes_counter = 0;
5597 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5598 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5599 			if (tp->serdes_counter)
5600 				tp->serdes_counter--;
5601 			else {
5602 				if (workaround) {
5603 					u32 val = serdes_cfg;
5604 
5605 					if (port_a)
5606 						val |= 0xc010000;
5607 					else
5608 						val |= 0x4010000;
5609 
5610 					tw32_f(MAC_SERDES_CFG, val);
5611 				}
5612 
5613 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5614 				udelay(40);
5615 
5616 				/* Link parallel detection - link is up */
5617 				/* only if we have PCS_SYNC and not */
5618 				/* receiving config code words */
5619 				mac_status = tr32(MAC_STATUS);
5620 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5621 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5622 					tg3_setup_flow_control(tp, 0, 0);
5623 					current_link_up = true;
5624 					tp->phy_flags |=
5625 						TG3_PHYFLG_PARALLEL_DETECT;
5626 					tp->serdes_counter =
5627 						SERDES_PARALLEL_DET_TIMEOUT;
5628 				} else
5629 					goto restart_autoneg;
5630 			}
5631 		}
5632 	} else {
5633 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5634 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5635 	}
5636 
5637 out:
5638 	return current_link_up;
5639 }
5640 
5641 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5642 {
5643 	bool current_link_up = false;
5644 
5645 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5646 		goto out;
5647 
5648 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5649 		u32 txflags, rxflags;
5650 		int i;
5651 
5652 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5653 			u32 local_adv = 0, remote_adv = 0;
5654 
5655 			if (txflags & ANEG_CFG_PS1)
5656 				local_adv |= ADVERTISE_1000XPAUSE;
5657 			if (txflags & ANEG_CFG_PS2)
5658 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5659 
5660 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5661 				remote_adv |= LPA_1000XPAUSE;
5662 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5663 				remote_adv |= LPA_1000XPAUSE_ASYM;
5664 
5665 			tp->link_config.rmt_adv =
5666 					   mii_adv_to_ethtool_adv_x(remote_adv);
5667 
5668 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5669 
5670 			current_link_up = true;
5671 		}
5672 		for (i = 0; i < 30; i++) {
5673 			udelay(20);
5674 			tw32_f(MAC_STATUS,
5675 			       (MAC_STATUS_SYNC_CHANGED |
5676 				MAC_STATUS_CFG_CHANGED));
5677 			udelay(40);
5678 			if ((tr32(MAC_STATUS) &
5679 			     (MAC_STATUS_SYNC_CHANGED |
5680 			      MAC_STATUS_CFG_CHANGED)) == 0)
5681 				break;
5682 		}
5683 
5684 		mac_status = tr32(MAC_STATUS);
5685 		if (!current_link_up &&
5686 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5687 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5688 			current_link_up = true;
5689 	} else {
5690 		tg3_setup_flow_control(tp, 0, 0);
5691 
5692 		/* Forcing 1000FD link up. */
5693 		current_link_up = true;
5694 
5695 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5696 		udelay(40);
5697 
5698 		tw32_f(MAC_MODE, tp->mac_mode);
5699 		udelay(40);
5700 	}
5701 
5702 out:
5703 	return current_link_up;
5704 }
5705 
5706 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5707 {
5708 	u32 orig_pause_cfg;
5709 	u32 orig_active_speed;
5710 	u8 orig_active_duplex;
5711 	u32 mac_status;
5712 	bool current_link_up;
5713 	int i;
5714 
5715 	orig_pause_cfg = tp->link_config.active_flowctrl;
5716 	orig_active_speed = tp->link_config.active_speed;
5717 	orig_active_duplex = tp->link_config.active_duplex;
5718 
5719 	if (!tg3_flag(tp, HW_AUTONEG) &&
5720 	    tp->link_up &&
5721 	    tg3_flag(tp, INIT_COMPLETE)) {
5722 		mac_status = tr32(MAC_STATUS);
5723 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5724 			       MAC_STATUS_SIGNAL_DET |
5725 			       MAC_STATUS_CFG_CHANGED |
5726 			       MAC_STATUS_RCVD_CFG);
5727 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5728 				   MAC_STATUS_SIGNAL_DET)) {
5729 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5730 					    MAC_STATUS_CFG_CHANGED));
5731 			return 0;
5732 		}
5733 	}
5734 
5735 	tw32_f(MAC_TX_AUTO_NEG, 0);
5736 
5737 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5738 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5739 	tw32_f(MAC_MODE, tp->mac_mode);
5740 	udelay(40);
5741 
5742 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5743 		tg3_init_bcm8002(tp);
5744 
5745 	/* Enable link change event even when serdes polling.  */
5746 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5747 	udelay(40);
5748 
5749 	tp->link_config.rmt_adv = 0;
5750 	mac_status = tr32(MAC_STATUS);
5751 
5752 	if (tg3_flag(tp, HW_AUTONEG))
5753 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5754 	else
5755 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5756 
5757 	tp->napi[0].hw_status->status =
5758 		(SD_STATUS_UPDATED |
5759 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5760 
5761 	for (i = 0; i < 100; i++) {
5762 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5763 				    MAC_STATUS_CFG_CHANGED));
5764 		udelay(5);
5765 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5766 					 MAC_STATUS_CFG_CHANGED |
5767 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5768 			break;
5769 	}
5770 
5771 	mac_status = tr32(MAC_STATUS);
5772 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5773 		current_link_up = false;
5774 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5775 		    tp->serdes_counter == 0) {
5776 			tw32_f(MAC_MODE, (tp->mac_mode |
5777 					  MAC_MODE_SEND_CONFIGS));
5778 			udelay(1);
5779 			tw32_f(MAC_MODE, tp->mac_mode);
5780 		}
5781 	}
5782 
5783 	if (current_link_up) {
5784 		tp->link_config.active_speed = SPEED_1000;
5785 		tp->link_config.active_duplex = DUPLEX_FULL;
5786 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5787 				    LED_CTRL_LNKLED_OVERRIDE |
5788 				    LED_CTRL_1000MBPS_ON));
5789 	} else {
5790 		tp->link_config.active_speed = SPEED_UNKNOWN;
5791 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5792 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5793 				    LED_CTRL_LNKLED_OVERRIDE |
5794 				    LED_CTRL_TRAFFIC_OVERRIDE));
5795 	}
5796 
5797 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5798 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5799 		if (orig_pause_cfg != now_pause_cfg ||
5800 		    orig_active_speed != tp->link_config.active_speed ||
5801 		    orig_active_duplex != tp->link_config.active_duplex)
5802 			tg3_link_report(tp);
5803 	}
5804 
5805 	return 0;
5806 }
5807 
5808 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5809 {
5810 	int err = 0;
5811 	u32 bmsr, bmcr;
5812 	u32 current_speed = SPEED_UNKNOWN;
5813 	u8 current_duplex = DUPLEX_UNKNOWN;
5814 	bool current_link_up = false;
5815 	u32 local_adv, remote_adv, sgsr;
5816 
5817 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5818 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5819 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5820 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5821 
5822 		if (force_reset)
5823 			tg3_phy_reset(tp);
5824 
5825 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5826 
5827 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5828 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5829 		} else {
5830 			current_link_up = true;
5831 			if (sgsr & SERDES_TG3_SPEED_1000) {
5832 				current_speed = SPEED_1000;
5833 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5834 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5835 				current_speed = SPEED_100;
5836 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5837 			} else {
5838 				current_speed = SPEED_10;
5839 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5840 			}
5841 
5842 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5843 				current_duplex = DUPLEX_FULL;
5844 			else
5845 				current_duplex = DUPLEX_HALF;
5846 		}
5847 
5848 		tw32_f(MAC_MODE, tp->mac_mode);
5849 		udelay(40);
5850 
5851 		tg3_clear_mac_status(tp);
5852 
5853 		goto fiber_setup_done;
5854 	}
5855 
5856 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5857 	tw32_f(MAC_MODE, tp->mac_mode);
5858 	udelay(40);
5859 
5860 	tg3_clear_mac_status(tp);
5861 
5862 	if (force_reset)
5863 		tg3_phy_reset(tp);
5864 
5865 	tp->link_config.rmt_adv = 0;
5866 
5867 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5868 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5869 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5870 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5871 			bmsr |= BMSR_LSTATUS;
5872 		else
5873 			bmsr &= ~BMSR_LSTATUS;
5874 	}
5875 
5876 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5877 
5878 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5879 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5880 		/* do nothing, just check for link up at the end */
5881 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5882 		u32 adv, newadv;
5883 
5884 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5885 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5886 				 ADVERTISE_1000XPAUSE |
5887 				 ADVERTISE_1000XPSE_ASYM |
5888 				 ADVERTISE_SLCT);
5889 
5890 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5891 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5892 
5893 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5894 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5895 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5896 			tg3_writephy(tp, MII_BMCR, bmcr);
5897 
5898 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5899 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5900 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5901 
5902 			return err;
5903 		}
5904 	} else {
5905 		u32 new_bmcr;
5906 
5907 		bmcr &= ~BMCR_SPEED1000;
5908 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5909 
5910 		if (tp->link_config.duplex == DUPLEX_FULL)
5911 			new_bmcr |= BMCR_FULLDPLX;
5912 
5913 		if (new_bmcr != bmcr) {
5914 			/* BMCR_SPEED1000 is a reserved bit that needs
5915 			 * to be set on write.
5916 			 */
5917 			new_bmcr |= BMCR_SPEED1000;
5918 
5919 			/* Force a linkdown */
5920 			if (tp->link_up) {
5921 				u32 adv;
5922 
5923 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5924 				adv &= ~(ADVERTISE_1000XFULL |
5925 					 ADVERTISE_1000XHALF |
5926 					 ADVERTISE_SLCT);
5927 				tg3_writephy(tp, MII_ADVERTISE, adv);
5928 				tg3_writephy(tp, MII_BMCR, bmcr |
5929 							   BMCR_ANRESTART |
5930 							   BMCR_ANENABLE);
5931 				udelay(10);
5932 				tg3_carrier_off(tp);
5933 			}
5934 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5935 			bmcr = new_bmcr;
5936 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5937 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5938 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5939 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5940 					bmsr |= BMSR_LSTATUS;
5941 				else
5942 					bmsr &= ~BMSR_LSTATUS;
5943 			}
5944 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5945 		}
5946 	}
5947 
5948 	if (bmsr & BMSR_LSTATUS) {
5949 		current_speed = SPEED_1000;
5950 		current_link_up = true;
5951 		if (bmcr & BMCR_FULLDPLX)
5952 			current_duplex = DUPLEX_FULL;
5953 		else
5954 			current_duplex = DUPLEX_HALF;
5955 
5956 		local_adv = 0;
5957 		remote_adv = 0;
5958 
5959 		if (bmcr & BMCR_ANENABLE) {
5960 			u32 common;
5961 
5962 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5963 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5964 			common = local_adv & remote_adv;
5965 			if (common & (ADVERTISE_1000XHALF |
5966 				      ADVERTISE_1000XFULL)) {
5967 				if (common & ADVERTISE_1000XFULL)
5968 					current_duplex = DUPLEX_FULL;
5969 				else
5970 					current_duplex = DUPLEX_HALF;
5971 
5972 				tp->link_config.rmt_adv =
5973 					   mii_adv_to_ethtool_adv_x(remote_adv);
5974 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5975 				/* Link is up via parallel detect */
5976 			} else {
5977 				current_link_up = false;
5978 			}
5979 		}
5980 	}
5981 
5982 fiber_setup_done:
5983 	if (current_link_up && current_duplex == DUPLEX_FULL)
5984 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5985 
5986 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5987 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5988 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5989 
5990 	tw32_f(MAC_MODE, tp->mac_mode);
5991 	udelay(40);
5992 
5993 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5994 
5995 	tp->link_config.active_speed = current_speed;
5996 	tp->link_config.active_duplex = current_duplex;
5997 
5998 	tg3_test_and_report_link_chg(tp, current_link_up);
5999 	return err;
6000 }
6001 
6002 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6003 {
6004 	if (tp->serdes_counter) {
6005 		/* Give autoneg time to complete. */
6006 		tp->serdes_counter--;
6007 		return;
6008 	}
6009 
6010 	if (!tp->link_up &&
6011 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6012 		u32 bmcr;
6013 
6014 		tg3_readphy(tp, MII_BMCR, &bmcr);
6015 		if (bmcr & BMCR_ANENABLE) {
6016 			u32 phy1, phy2;
6017 
6018 			/* Select shadow register 0x1f */
6019 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6020 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6021 
6022 			/* Select expansion interrupt status register */
6023 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6024 					 MII_TG3_DSP_EXP1_INT_STAT);
6025 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6026 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6027 
6028 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6029 				/* We have signal detect and not receiving
6030 				 * config code words, link is up by parallel
6031 				 * detection.
6032 				 */
6033 
6034 				bmcr &= ~BMCR_ANENABLE;
6035 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6036 				tg3_writephy(tp, MII_BMCR, bmcr);
6037 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6038 			}
6039 		}
6040 	} else if (tp->link_up &&
6041 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6042 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6043 		u32 phy2;
6044 
6045 		/* Select expansion interrupt status register */
6046 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6047 				 MII_TG3_DSP_EXP1_INT_STAT);
6048 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6049 		if (phy2 & 0x20) {
6050 			u32 bmcr;
6051 
6052 			/* Config code words received, turn on autoneg. */
6053 			tg3_readphy(tp, MII_BMCR, &bmcr);
6054 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6055 
6056 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6057 
6058 		}
6059 	}
6060 }
6061 
6062 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6063 {
6064 	u32 val;
6065 	int err;
6066 
6067 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6068 		err = tg3_setup_fiber_phy(tp, force_reset);
6069 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6070 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6071 	else
6072 		err = tg3_setup_copper_phy(tp, force_reset);
6073 
6074 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6075 		u32 scale;
6076 
6077 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6078 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6079 			scale = 65;
6080 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6081 			scale = 6;
6082 		else
6083 			scale = 12;
6084 
6085 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6086 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6087 		tw32(GRC_MISC_CFG, val);
6088 	}
6089 
6090 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6091 	      (6 << TX_LENGTHS_IPG_SHIFT);
6092 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6093 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6094 		val |= tr32(MAC_TX_LENGTHS) &
6095 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6096 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6097 
6098 	if (tp->link_config.active_speed == SPEED_1000 &&
6099 	    tp->link_config.active_duplex == DUPLEX_HALF)
6100 		tw32(MAC_TX_LENGTHS, val |
6101 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6102 	else
6103 		tw32(MAC_TX_LENGTHS, val |
6104 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6105 
6106 	if (!tg3_flag(tp, 5705_PLUS)) {
6107 		if (tp->link_up) {
6108 			tw32(HOSTCC_STAT_COAL_TICKS,
6109 			     tp->coal.stats_block_coalesce_usecs);
6110 		} else {
6111 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6112 		}
6113 	}
6114 
6115 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6116 		val = tr32(PCIE_PWR_MGMT_THRESH);
6117 		if (!tp->link_up)
6118 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6119 			      tp->pwrmgmt_thresh;
6120 		else
6121 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6122 		tw32(PCIE_PWR_MGMT_THRESH, val);
6123 	}
6124 
6125 	return err;
6126 }
6127 
6128 /* tp->lock must be held */
6129 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6130 {
6131 	u64 stamp;
6132 
6133 	ptp_read_system_prets(sts);
6134 	stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6135 	ptp_read_system_postts(sts);
6136 	stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6137 
6138 	return stamp;
6139 }
6140 
6141 /* tp->lock must be held */
6142 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6143 {
6144 	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6145 
6146 	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6147 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6148 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6149 	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6150 }
6151 
6152 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6153 static inline void tg3_full_unlock(struct tg3 *tp);
6154 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6155 {
6156 	struct tg3 *tp = netdev_priv(dev);
6157 
6158 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6159 				SOF_TIMESTAMPING_RX_SOFTWARE |
6160 				SOF_TIMESTAMPING_SOFTWARE;
6161 
6162 	if (tg3_flag(tp, PTP_CAPABLE)) {
6163 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6164 					SOF_TIMESTAMPING_RX_HARDWARE |
6165 					SOF_TIMESTAMPING_RAW_HARDWARE;
6166 	}
6167 
6168 	if (tp->ptp_clock)
6169 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6170 	else
6171 		info->phc_index = -1;
6172 
6173 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6174 
6175 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6176 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6177 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6178 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6179 	return 0;
6180 }
6181 
6182 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6183 {
6184 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6185 	u64 correction;
6186 	bool neg_adj;
6187 
6188 	/* Frequency adjustment is performed using hardware with a 24 bit
6189 	 * accumulator and a programmable correction value. On each clk, the
6190 	 * correction value gets added to the accumulator and when it
6191 	 * overflows, the time counter is incremented/decremented.
6192 	 */
6193 	neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6194 
6195 	tg3_full_lock(tp, 0);
6196 
6197 	if (correction)
6198 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6199 		     TG3_EAV_REF_CLK_CORRECT_EN |
6200 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6201 		     ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6202 	else
6203 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6204 
6205 	tg3_full_unlock(tp);
6206 
6207 	return 0;
6208 }
6209 
6210 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6211 {
6212 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6213 
6214 	tg3_full_lock(tp, 0);
6215 	tp->ptp_adjust += delta;
6216 	tg3_full_unlock(tp);
6217 
6218 	return 0;
6219 }
6220 
6221 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6222 			    struct ptp_system_timestamp *sts)
6223 {
6224 	u64 ns;
6225 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6226 
6227 	tg3_full_lock(tp, 0);
6228 	ns = tg3_refclk_read(tp, sts);
6229 	ns += tp->ptp_adjust;
6230 	tg3_full_unlock(tp);
6231 
6232 	*ts = ns_to_timespec64(ns);
6233 
6234 	return 0;
6235 }
6236 
6237 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6238 			   const struct timespec64 *ts)
6239 {
6240 	u64 ns;
6241 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6242 
6243 	ns = timespec64_to_ns(ts);
6244 
6245 	tg3_full_lock(tp, 0);
6246 	tg3_refclk_write(tp, ns);
6247 	tp->ptp_adjust = 0;
6248 	tg3_full_unlock(tp);
6249 
6250 	return 0;
6251 }
6252 
6253 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6254 			  struct ptp_clock_request *rq, int on)
6255 {
6256 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6257 	u32 clock_ctl;
6258 	int rval = 0;
6259 
6260 	switch (rq->type) {
6261 	case PTP_CLK_REQ_PEROUT:
6262 		/* Reject requests with unsupported flags */
6263 		if (rq->perout.flags)
6264 			return -EOPNOTSUPP;
6265 
6266 		if (rq->perout.index != 0)
6267 			return -EINVAL;
6268 
6269 		tg3_full_lock(tp, 0);
6270 		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6271 		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6272 
6273 		if (on) {
6274 			u64 nsec;
6275 
6276 			nsec = rq->perout.start.sec * 1000000000ULL +
6277 			       rq->perout.start.nsec;
6278 
6279 			if (rq->perout.period.sec || rq->perout.period.nsec) {
6280 				netdev_warn(tp->dev,
6281 					    "Device supports only a one-shot timesync output, period must be 0\n");
6282 				rval = -EINVAL;
6283 				goto err_out;
6284 			}
6285 
6286 			if (nsec & (1ULL << 63)) {
6287 				netdev_warn(tp->dev,
6288 					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6289 				rval = -EINVAL;
6290 				goto err_out;
6291 			}
6292 
6293 			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6294 			tw32(TG3_EAV_WATCHDOG0_MSB,
6295 			     TG3_EAV_WATCHDOG0_EN |
6296 			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6297 
6298 			tw32(TG3_EAV_REF_CLCK_CTL,
6299 			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6300 		} else {
6301 			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6302 			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6303 		}
6304 
6305 err_out:
6306 		tg3_full_unlock(tp);
6307 		return rval;
6308 
6309 	default:
6310 		break;
6311 	}
6312 
6313 	return -EOPNOTSUPP;
6314 }
6315 
6316 static const struct ptp_clock_info tg3_ptp_caps = {
6317 	.owner		= THIS_MODULE,
6318 	.name		= "tg3 clock",
6319 	.max_adj	= 250000000,
6320 	.n_alarm	= 0,
6321 	.n_ext_ts	= 0,
6322 	.n_per_out	= 1,
6323 	.n_pins		= 0,
6324 	.pps		= 0,
6325 	.adjfine	= tg3_ptp_adjfine,
6326 	.adjtime	= tg3_ptp_adjtime,
6327 	.gettimex64	= tg3_ptp_gettimex,
6328 	.settime64	= tg3_ptp_settime,
6329 	.enable		= tg3_ptp_enable,
6330 };
6331 
6332 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6333 				     struct skb_shared_hwtstamps *timestamp)
6334 {
6335 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6336 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6337 					   tp->ptp_adjust);
6338 }
6339 
6340 /* tp->lock must be held */
6341 static void tg3_ptp_init(struct tg3 *tp)
6342 {
6343 	if (!tg3_flag(tp, PTP_CAPABLE))
6344 		return;
6345 
6346 	/* Initialize the hardware clock to the system time. */
6347 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6348 	tp->ptp_adjust = 0;
6349 	tp->ptp_info = tg3_ptp_caps;
6350 }
6351 
6352 /* tp->lock must be held */
6353 static void tg3_ptp_resume(struct tg3 *tp)
6354 {
6355 	if (!tg3_flag(tp, PTP_CAPABLE))
6356 		return;
6357 
6358 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6359 	tp->ptp_adjust = 0;
6360 }
6361 
6362 static void tg3_ptp_fini(struct tg3 *tp)
6363 {
6364 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6365 		return;
6366 
6367 	ptp_clock_unregister(tp->ptp_clock);
6368 	tp->ptp_clock = NULL;
6369 	tp->ptp_adjust = 0;
6370 }
6371 
6372 static inline int tg3_irq_sync(struct tg3 *tp)
6373 {
6374 	return tp->irq_sync;
6375 }
6376 
6377 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6378 {
6379 	int i;
6380 
6381 	dst = (u32 *)((u8 *)dst + off);
6382 	for (i = 0; i < len; i += sizeof(u32))
6383 		*dst++ = tr32(off + i);
6384 }
6385 
6386 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6387 {
6388 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6389 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6390 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6391 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6392 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6393 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6394 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6395 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6396 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6397 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6398 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6399 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6400 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6401 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6402 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6403 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6404 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6405 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6406 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6407 
6408 	if (tg3_flag(tp, SUPPORT_MSIX))
6409 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6410 
6411 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6412 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6413 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6414 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6415 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6416 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6417 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6418 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6419 
6420 	if (!tg3_flag(tp, 5705_PLUS)) {
6421 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6422 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6423 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6424 	}
6425 
6426 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6427 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6428 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6429 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6430 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6431 
6432 	if (tg3_flag(tp, NVRAM))
6433 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6434 }
6435 
6436 static void tg3_dump_state(struct tg3 *tp)
6437 {
6438 	int i;
6439 	u32 *regs;
6440 
6441 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6442 	if (!regs)
6443 		return;
6444 
6445 	if (tg3_flag(tp, PCI_EXPRESS)) {
6446 		/* Read up to but not including private PCI registers */
6447 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6448 			regs[i / sizeof(u32)] = tr32(i);
6449 	} else
6450 		tg3_dump_legacy_regs(tp, regs);
6451 
6452 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6453 		if (!regs[i + 0] && !regs[i + 1] &&
6454 		    !regs[i + 2] && !regs[i + 3])
6455 			continue;
6456 
6457 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6458 			   i * 4,
6459 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6460 	}
6461 
6462 	kfree(regs);
6463 
6464 	for (i = 0; i < tp->irq_cnt; i++) {
6465 		struct tg3_napi *tnapi = &tp->napi[i];
6466 
6467 		/* SW status block */
6468 		netdev_err(tp->dev,
6469 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6470 			   i,
6471 			   tnapi->hw_status->status,
6472 			   tnapi->hw_status->status_tag,
6473 			   tnapi->hw_status->rx_jumbo_consumer,
6474 			   tnapi->hw_status->rx_consumer,
6475 			   tnapi->hw_status->rx_mini_consumer,
6476 			   tnapi->hw_status->idx[0].rx_producer,
6477 			   tnapi->hw_status->idx[0].tx_consumer);
6478 
6479 		netdev_err(tp->dev,
6480 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6481 			   i,
6482 			   tnapi->last_tag, tnapi->last_irq_tag,
6483 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6484 			   tnapi->rx_rcb_ptr,
6485 			   tnapi->prodring.rx_std_prod_idx,
6486 			   tnapi->prodring.rx_std_cons_idx,
6487 			   tnapi->prodring.rx_jmb_prod_idx,
6488 			   tnapi->prodring.rx_jmb_cons_idx);
6489 	}
6490 }
6491 
6492 /* This is called whenever we suspect that the system chipset is re-
6493  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6494  * is bogus tx completions. We try to recover by setting the
6495  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6496  * in the workqueue.
6497  */
6498 static void tg3_tx_recover(struct tg3 *tp)
6499 {
6500 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6501 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6502 
6503 	netdev_warn(tp->dev,
6504 		    "The system may be re-ordering memory-mapped I/O "
6505 		    "cycles to the network device, attempting to recover. "
6506 		    "Please report the problem to the driver maintainer "
6507 		    "and include system chipset information.\n");
6508 
6509 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6510 }
6511 
6512 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6513 {
6514 	/* Tell compiler to fetch tx indices from memory. */
6515 	barrier();
6516 	return tnapi->tx_pending -
6517 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6518 }
6519 
6520 /* Tigon3 never reports partial packet sends.  So we do not
6521  * need special logic to handle SKBs that have not had all
6522  * of their frags sent yet, like SunGEM does.
6523  */
6524 static void tg3_tx(struct tg3_napi *tnapi)
6525 {
6526 	struct tg3 *tp = tnapi->tp;
6527 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6528 	u32 sw_idx = tnapi->tx_cons;
6529 	struct netdev_queue *txq;
6530 	int index = tnapi - tp->napi;
6531 	unsigned int pkts_compl = 0, bytes_compl = 0;
6532 
6533 	if (tg3_flag(tp, ENABLE_TSS))
6534 		index--;
6535 
6536 	txq = netdev_get_tx_queue(tp->dev, index);
6537 
6538 	while (sw_idx != hw_idx) {
6539 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6540 		struct sk_buff *skb = ri->skb;
6541 		int i, tx_bug = 0;
6542 
6543 		if (unlikely(skb == NULL)) {
6544 			tg3_tx_recover(tp);
6545 			return;
6546 		}
6547 
6548 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6549 			struct skb_shared_hwtstamps timestamp;
6550 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6551 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6552 
6553 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6554 
6555 			skb_tstamp_tx(skb, &timestamp);
6556 		}
6557 
6558 		dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6559 				 skb_headlen(skb), DMA_TO_DEVICE);
6560 
6561 		ri->skb = NULL;
6562 
6563 		while (ri->fragmented) {
6564 			ri->fragmented = false;
6565 			sw_idx = NEXT_TX(sw_idx);
6566 			ri = &tnapi->tx_buffers[sw_idx];
6567 		}
6568 
6569 		sw_idx = NEXT_TX(sw_idx);
6570 
6571 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6572 			ri = &tnapi->tx_buffers[sw_idx];
6573 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6574 				tx_bug = 1;
6575 
6576 			dma_unmap_page(&tp->pdev->dev,
6577 				       dma_unmap_addr(ri, mapping),
6578 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6579 				       DMA_TO_DEVICE);
6580 
6581 			while (ri->fragmented) {
6582 				ri->fragmented = false;
6583 				sw_idx = NEXT_TX(sw_idx);
6584 				ri = &tnapi->tx_buffers[sw_idx];
6585 			}
6586 
6587 			sw_idx = NEXT_TX(sw_idx);
6588 		}
6589 
6590 		pkts_compl++;
6591 		bytes_compl += skb->len;
6592 
6593 		dev_consume_skb_any(skb);
6594 
6595 		if (unlikely(tx_bug)) {
6596 			tg3_tx_recover(tp);
6597 			return;
6598 		}
6599 	}
6600 
6601 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6602 
6603 	tnapi->tx_cons = sw_idx;
6604 
6605 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6606 	 * before checking for netif_queue_stopped().  Without the
6607 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6608 	 * will miss it and cause the queue to be stopped forever.
6609 	 */
6610 	smp_mb();
6611 
6612 	if (unlikely(netif_tx_queue_stopped(txq) &&
6613 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6614 		__netif_tx_lock(txq, smp_processor_id());
6615 		if (netif_tx_queue_stopped(txq) &&
6616 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6617 			netif_tx_wake_queue(txq);
6618 		__netif_tx_unlock(txq);
6619 	}
6620 }
6621 
6622 static void tg3_frag_free(bool is_frag, void *data)
6623 {
6624 	if (is_frag)
6625 		skb_free_frag(data);
6626 	else
6627 		kfree(data);
6628 }
6629 
6630 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6631 {
6632 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6633 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6634 
6635 	if (!ri->data)
6636 		return;
6637 
6638 	dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6639 			 DMA_FROM_DEVICE);
6640 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6641 	ri->data = NULL;
6642 }
6643 
6644 
6645 /* Returns size of skb allocated or < 0 on error.
6646  *
6647  * We only need to fill in the address because the other members
6648  * of the RX descriptor are invariant, see tg3_init_rings.
6649  *
6650  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6651  * posting buffers we only dirty the first cache line of the RX
6652  * descriptor (containing the address).  Whereas for the RX status
6653  * buffers the cpu only reads the last cacheline of the RX descriptor
6654  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6655  */
6656 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6657 			     u32 opaque_key, u32 dest_idx_unmasked,
6658 			     unsigned int *frag_size)
6659 {
6660 	struct tg3_rx_buffer_desc *desc;
6661 	struct ring_info *map;
6662 	u8 *data;
6663 	dma_addr_t mapping;
6664 	int skb_size, data_size, dest_idx;
6665 
6666 	switch (opaque_key) {
6667 	case RXD_OPAQUE_RING_STD:
6668 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6669 		desc = &tpr->rx_std[dest_idx];
6670 		map = &tpr->rx_std_buffers[dest_idx];
6671 		data_size = tp->rx_pkt_map_sz;
6672 		break;
6673 
6674 	case RXD_OPAQUE_RING_JUMBO:
6675 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6676 		desc = &tpr->rx_jmb[dest_idx].std;
6677 		map = &tpr->rx_jmb_buffers[dest_idx];
6678 		data_size = TG3_RX_JMB_MAP_SZ;
6679 		break;
6680 
6681 	default:
6682 		return -EINVAL;
6683 	}
6684 
6685 	/* Do not overwrite any of the map or rp information
6686 	 * until we are sure we can commit to a new buffer.
6687 	 *
6688 	 * Callers depend upon this behavior and assume that
6689 	 * we leave everything unchanged if we fail.
6690 	 */
6691 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6692 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6693 	if (skb_size <= PAGE_SIZE) {
6694 		data = napi_alloc_frag(skb_size);
6695 		*frag_size = skb_size;
6696 	} else {
6697 		data = kmalloc(skb_size, GFP_ATOMIC);
6698 		*frag_size = 0;
6699 	}
6700 	if (!data)
6701 		return -ENOMEM;
6702 
6703 	mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6704 				 data_size, DMA_FROM_DEVICE);
6705 	if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6706 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6707 		return -EIO;
6708 	}
6709 
6710 	map->data = data;
6711 	dma_unmap_addr_set(map, mapping, mapping);
6712 
6713 	desc->addr_hi = ((u64)mapping >> 32);
6714 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6715 
6716 	return data_size;
6717 }
6718 
6719 /* We only need to move over in the address because the other
6720  * members of the RX descriptor are invariant.  See notes above
6721  * tg3_alloc_rx_data for full details.
6722  */
6723 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6724 			   struct tg3_rx_prodring_set *dpr,
6725 			   u32 opaque_key, int src_idx,
6726 			   u32 dest_idx_unmasked)
6727 {
6728 	struct tg3 *tp = tnapi->tp;
6729 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6730 	struct ring_info *src_map, *dest_map;
6731 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6732 	int dest_idx;
6733 
6734 	switch (opaque_key) {
6735 	case RXD_OPAQUE_RING_STD:
6736 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6737 		dest_desc = &dpr->rx_std[dest_idx];
6738 		dest_map = &dpr->rx_std_buffers[dest_idx];
6739 		src_desc = &spr->rx_std[src_idx];
6740 		src_map = &spr->rx_std_buffers[src_idx];
6741 		break;
6742 
6743 	case RXD_OPAQUE_RING_JUMBO:
6744 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6745 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6746 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6747 		src_desc = &spr->rx_jmb[src_idx].std;
6748 		src_map = &spr->rx_jmb_buffers[src_idx];
6749 		break;
6750 
6751 	default:
6752 		return;
6753 	}
6754 
6755 	dest_map->data = src_map->data;
6756 	dma_unmap_addr_set(dest_map, mapping,
6757 			   dma_unmap_addr(src_map, mapping));
6758 	dest_desc->addr_hi = src_desc->addr_hi;
6759 	dest_desc->addr_lo = src_desc->addr_lo;
6760 
6761 	/* Ensure that the update to the skb happens after the physical
6762 	 * addresses have been transferred to the new BD location.
6763 	 */
6764 	smp_wmb();
6765 
6766 	src_map->data = NULL;
6767 }
6768 
6769 /* The RX ring scheme is composed of multiple rings which post fresh
6770  * buffers to the chip, and one special ring the chip uses to report
6771  * status back to the host.
6772  *
6773  * The special ring reports the status of received packets to the
6774  * host.  The chip does not write into the original descriptor the
6775  * RX buffer was obtained from.  The chip simply takes the original
6776  * descriptor as provided by the host, updates the status and length
6777  * field, then writes this into the next status ring entry.
6778  *
6779  * Each ring the host uses to post buffers to the chip is described
6780  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6781  * it is first placed into the on-chip ram.  When the packet's length
6782  * is known, it walks down the TG3_BDINFO entries to select the ring.
6783  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6784  * which is within the range of the new packet's length is chosen.
6785  *
6786  * The "separate ring for rx status" scheme may sound queer, but it makes
6787  * sense from a cache coherency perspective.  If only the host writes
6788  * to the buffer post rings, and only the chip writes to the rx status
6789  * rings, then cache lines never move beyond shared-modified state.
6790  * If both the host and chip were to write into the same ring, cache line
6791  * eviction could occur since both entities want it in an exclusive state.
6792  */
6793 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6794 {
6795 	struct tg3 *tp = tnapi->tp;
6796 	u32 work_mask, rx_std_posted = 0;
6797 	u32 std_prod_idx, jmb_prod_idx;
6798 	u32 sw_idx = tnapi->rx_rcb_ptr;
6799 	u16 hw_idx;
6800 	int received;
6801 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6802 
6803 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6804 	/*
6805 	 * We need to order the read of hw_idx and the read of
6806 	 * the opaque cookie.
6807 	 */
6808 	rmb();
6809 	work_mask = 0;
6810 	received = 0;
6811 	std_prod_idx = tpr->rx_std_prod_idx;
6812 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6813 	while (sw_idx != hw_idx && budget > 0) {
6814 		struct ring_info *ri;
6815 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6816 		unsigned int len;
6817 		struct sk_buff *skb;
6818 		dma_addr_t dma_addr;
6819 		u32 opaque_key, desc_idx, *post_ptr;
6820 		u8 *data;
6821 		u64 tstamp = 0;
6822 
6823 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6824 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6825 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6826 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6827 			dma_addr = dma_unmap_addr(ri, mapping);
6828 			data = ri->data;
6829 			post_ptr = &std_prod_idx;
6830 			rx_std_posted++;
6831 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6832 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6833 			dma_addr = dma_unmap_addr(ri, mapping);
6834 			data = ri->data;
6835 			post_ptr = &jmb_prod_idx;
6836 		} else
6837 			goto next_pkt_nopost;
6838 
6839 		work_mask |= opaque_key;
6840 
6841 		if (desc->err_vlan & RXD_ERR_MASK) {
6842 		drop_it:
6843 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6844 				       desc_idx, *post_ptr);
6845 		drop_it_no_recycle:
6846 			/* Other statistics kept track of by card. */
6847 			tp->rx_dropped++;
6848 			goto next_pkt;
6849 		}
6850 
6851 		prefetch(data + TG3_RX_OFFSET(tp));
6852 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6853 		      ETH_FCS_LEN;
6854 
6855 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6856 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6857 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6858 		     RXD_FLAG_PTPSTAT_PTPV2) {
6859 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6860 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6861 		}
6862 
6863 		if (len > TG3_RX_COPY_THRESH(tp)) {
6864 			int skb_size;
6865 			unsigned int frag_size;
6866 
6867 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6868 						    *post_ptr, &frag_size);
6869 			if (skb_size < 0)
6870 				goto drop_it;
6871 
6872 			dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6873 					 DMA_FROM_DEVICE);
6874 
6875 			/* Ensure that the update to the data happens
6876 			 * after the usage of the old DMA mapping.
6877 			 */
6878 			smp_wmb();
6879 
6880 			ri->data = NULL;
6881 
6882 			skb = build_skb(data, frag_size);
6883 			if (!skb) {
6884 				tg3_frag_free(frag_size != 0, data);
6885 				goto drop_it_no_recycle;
6886 			}
6887 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6888 		} else {
6889 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6890 				       desc_idx, *post_ptr);
6891 
6892 			skb = netdev_alloc_skb(tp->dev,
6893 					       len + TG3_RAW_IP_ALIGN);
6894 			if (skb == NULL)
6895 				goto drop_it_no_recycle;
6896 
6897 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6898 			dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6899 						DMA_FROM_DEVICE);
6900 			memcpy(skb->data,
6901 			       data + TG3_RX_OFFSET(tp),
6902 			       len);
6903 			dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6904 						   len, DMA_FROM_DEVICE);
6905 		}
6906 
6907 		skb_put(skb, len);
6908 		if (tstamp)
6909 			tg3_hwclock_to_timestamp(tp, tstamp,
6910 						 skb_hwtstamps(skb));
6911 
6912 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6913 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6914 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6915 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6916 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6917 		else
6918 			skb_checksum_none_assert(skb);
6919 
6920 		skb->protocol = eth_type_trans(skb, tp->dev);
6921 
6922 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6923 		    skb->protocol != htons(ETH_P_8021Q) &&
6924 		    skb->protocol != htons(ETH_P_8021AD)) {
6925 			dev_kfree_skb_any(skb);
6926 			goto drop_it_no_recycle;
6927 		}
6928 
6929 		if (desc->type_flags & RXD_FLAG_VLAN &&
6930 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6931 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6932 					       desc->err_vlan & RXD_VLAN_MASK);
6933 
6934 		napi_gro_receive(&tnapi->napi, skb);
6935 
6936 		received++;
6937 		budget--;
6938 
6939 next_pkt:
6940 		(*post_ptr)++;
6941 
6942 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6943 			tpr->rx_std_prod_idx = std_prod_idx &
6944 					       tp->rx_std_ring_mask;
6945 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6946 				     tpr->rx_std_prod_idx);
6947 			work_mask &= ~RXD_OPAQUE_RING_STD;
6948 			rx_std_posted = 0;
6949 		}
6950 next_pkt_nopost:
6951 		sw_idx++;
6952 		sw_idx &= tp->rx_ret_ring_mask;
6953 
6954 		/* Refresh hw_idx to see if there is new work */
6955 		if (sw_idx == hw_idx) {
6956 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6957 			rmb();
6958 		}
6959 	}
6960 
6961 	/* ACK the status ring. */
6962 	tnapi->rx_rcb_ptr = sw_idx;
6963 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6964 
6965 	/* Refill RX ring(s). */
6966 	if (!tg3_flag(tp, ENABLE_RSS)) {
6967 		/* Sync BD data before updating mailbox */
6968 		wmb();
6969 
6970 		if (work_mask & RXD_OPAQUE_RING_STD) {
6971 			tpr->rx_std_prod_idx = std_prod_idx &
6972 					       tp->rx_std_ring_mask;
6973 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6974 				     tpr->rx_std_prod_idx);
6975 		}
6976 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6977 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6978 					       tp->rx_jmb_ring_mask;
6979 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6980 				     tpr->rx_jmb_prod_idx);
6981 		}
6982 	} else if (work_mask) {
6983 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6984 		 * updated before the producer indices can be updated.
6985 		 */
6986 		smp_wmb();
6987 
6988 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6989 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6990 
6991 		if (tnapi != &tp->napi[1]) {
6992 			tp->rx_refill = true;
6993 			napi_schedule(&tp->napi[1].napi);
6994 		}
6995 	}
6996 
6997 	return received;
6998 }
6999 
7000 static void tg3_poll_link(struct tg3 *tp)
7001 {
7002 	/* handle link change and other phy events */
7003 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7004 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7005 
7006 		if (sblk->status & SD_STATUS_LINK_CHG) {
7007 			sblk->status = SD_STATUS_UPDATED |
7008 				       (sblk->status & ~SD_STATUS_LINK_CHG);
7009 			spin_lock(&tp->lock);
7010 			if (tg3_flag(tp, USE_PHYLIB)) {
7011 				tw32_f(MAC_STATUS,
7012 				     (MAC_STATUS_SYNC_CHANGED |
7013 				      MAC_STATUS_CFG_CHANGED |
7014 				      MAC_STATUS_MI_COMPLETION |
7015 				      MAC_STATUS_LNKSTATE_CHANGED));
7016 				udelay(40);
7017 			} else
7018 				tg3_setup_phy(tp, false);
7019 			spin_unlock(&tp->lock);
7020 		}
7021 	}
7022 }
7023 
7024 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7025 				struct tg3_rx_prodring_set *dpr,
7026 				struct tg3_rx_prodring_set *spr)
7027 {
7028 	u32 si, di, cpycnt, src_prod_idx;
7029 	int i, err = 0;
7030 
7031 	while (1) {
7032 		src_prod_idx = spr->rx_std_prod_idx;
7033 
7034 		/* Make sure updates to the rx_std_buffers[] entries and the
7035 		 * standard producer index are seen in the correct order.
7036 		 */
7037 		smp_rmb();
7038 
7039 		if (spr->rx_std_cons_idx == src_prod_idx)
7040 			break;
7041 
7042 		if (spr->rx_std_cons_idx < src_prod_idx)
7043 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7044 		else
7045 			cpycnt = tp->rx_std_ring_mask + 1 -
7046 				 spr->rx_std_cons_idx;
7047 
7048 		cpycnt = min(cpycnt,
7049 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7050 
7051 		si = spr->rx_std_cons_idx;
7052 		di = dpr->rx_std_prod_idx;
7053 
7054 		for (i = di; i < di + cpycnt; i++) {
7055 			if (dpr->rx_std_buffers[i].data) {
7056 				cpycnt = i - di;
7057 				err = -ENOSPC;
7058 				break;
7059 			}
7060 		}
7061 
7062 		if (!cpycnt)
7063 			break;
7064 
7065 		/* Ensure that updates to the rx_std_buffers ring and the
7066 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7067 		 * ordered correctly WRT the skb check above.
7068 		 */
7069 		smp_rmb();
7070 
7071 		memcpy(&dpr->rx_std_buffers[di],
7072 		       &spr->rx_std_buffers[si],
7073 		       cpycnt * sizeof(struct ring_info));
7074 
7075 		for (i = 0; i < cpycnt; i++, di++, si++) {
7076 			struct tg3_rx_buffer_desc *sbd, *dbd;
7077 			sbd = &spr->rx_std[si];
7078 			dbd = &dpr->rx_std[di];
7079 			dbd->addr_hi = sbd->addr_hi;
7080 			dbd->addr_lo = sbd->addr_lo;
7081 		}
7082 
7083 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7084 				       tp->rx_std_ring_mask;
7085 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7086 				       tp->rx_std_ring_mask;
7087 	}
7088 
7089 	while (1) {
7090 		src_prod_idx = spr->rx_jmb_prod_idx;
7091 
7092 		/* Make sure updates to the rx_jmb_buffers[] entries and
7093 		 * the jumbo producer index are seen in the correct order.
7094 		 */
7095 		smp_rmb();
7096 
7097 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7098 			break;
7099 
7100 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7101 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7102 		else
7103 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7104 				 spr->rx_jmb_cons_idx;
7105 
7106 		cpycnt = min(cpycnt,
7107 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7108 
7109 		si = spr->rx_jmb_cons_idx;
7110 		di = dpr->rx_jmb_prod_idx;
7111 
7112 		for (i = di; i < di + cpycnt; i++) {
7113 			if (dpr->rx_jmb_buffers[i].data) {
7114 				cpycnt = i - di;
7115 				err = -ENOSPC;
7116 				break;
7117 			}
7118 		}
7119 
7120 		if (!cpycnt)
7121 			break;
7122 
7123 		/* Ensure that updates to the rx_jmb_buffers ring and the
7124 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7125 		 * ordered correctly WRT the skb check above.
7126 		 */
7127 		smp_rmb();
7128 
7129 		memcpy(&dpr->rx_jmb_buffers[di],
7130 		       &spr->rx_jmb_buffers[si],
7131 		       cpycnt * sizeof(struct ring_info));
7132 
7133 		for (i = 0; i < cpycnt; i++, di++, si++) {
7134 			struct tg3_rx_buffer_desc *sbd, *dbd;
7135 			sbd = &spr->rx_jmb[si].std;
7136 			dbd = &dpr->rx_jmb[di].std;
7137 			dbd->addr_hi = sbd->addr_hi;
7138 			dbd->addr_lo = sbd->addr_lo;
7139 		}
7140 
7141 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7142 				       tp->rx_jmb_ring_mask;
7143 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7144 				       tp->rx_jmb_ring_mask;
7145 	}
7146 
7147 	return err;
7148 }
7149 
7150 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7151 {
7152 	struct tg3 *tp = tnapi->tp;
7153 
7154 	/* run TX completion thread */
7155 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7156 		tg3_tx(tnapi);
7157 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7158 			return work_done;
7159 	}
7160 
7161 	if (!tnapi->rx_rcb_prod_idx)
7162 		return work_done;
7163 
7164 	/* run RX thread, within the bounds set by NAPI.
7165 	 * All RX "locking" is done by ensuring outside
7166 	 * code synchronizes with tg3->napi.poll()
7167 	 */
7168 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7169 		work_done += tg3_rx(tnapi, budget - work_done);
7170 
7171 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7172 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7173 		int i, err = 0;
7174 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7175 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7176 
7177 		tp->rx_refill = false;
7178 		for (i = 1; i <= tp->rxq_cnt; i++)
7179 			err |= tg3_rx_prodring_xfer(tp, dpr,
7180 						    &tp->napi[i].prodring);
7181 
7182 		wmb();
7183 
7184 		if (std_prod_idx != dpr->rx_std_prod_idx)
7185 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7186 				     dpr->rx_std_prod_idx);
7187 
7188 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7189 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7190 				     dpr->rx_jmb_prod_idx);
7191 
7192 		if (err)
7193 			tw32_f(HOSTCC_MODE, tp->coal_now);
7194 	}
7195 
7196 	return work_done;
7197 }
7198 
7199 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7200 {
7201 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7202 		schedule_work(&tp->reset_task);
7203 }
7204 
7205 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7206 {
7207 	if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7208 		cancel_work_sync(&tp->reset_task);
7209 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7210 }
7211 
7212 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7213 {
7214 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7215 	struct tg3 *tp = tnapi->tp;
7216 	int work_done = 0;
7217 	struct tg3_hw_status *sblk = tnapi->hw_status;
7218 
7219 	while (1) {
7220 		work_done = tg3_poll_work(tnapi, work_done, budget);
7221 
7222 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7223 			goto tx_recovery;
7224 
7225 		if (unlikely(work_done >= budget))
7226 			break;
7227 
7228 		/* tp->last_tag is used in tg3_int_reenable() below
7229 		 * to tell the hw how much work has been processed,
7230 		 * so we must read it before checking for more work.
7231 		 */
7232 		tnapi->last_tag = sblk->status_tag;
7233 		tnapi->last_irq_tag = tnapi->last_tag;
7234 		rmb();
7235 
7236 		/* check for RX/TX work to do */
7237 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7238 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7239 
7240 			/* This test here is not race free, but will reduce
7241 			 * the number of interrupts by looping again.
7242 			 */
7243 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7244 				continue;
7245 
7246 			napi_complete_done(napi, work_done);
7247 			/* Reenable interrupts. */
7248 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7249 
7250 			/* This test here is synchronized by napi_schedule()
7251 			 * and napi_complete() to close the race condition.
7252 			 */
7253 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7254 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7255 						  HOSTCC_MODE_ENABLE |
7256 						  tnapi->coal_now);
7257 			}
7258 			break;
7259 		}
7260 	}
7261 
7262 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7263 	return work_done;
7264 
7265 tx_recovery:
7266 	/* work_done is guaranteed to be less than budget. */
7267 	napi_complete(napi);
7268 	tg3_reset_task_schedule(tp);
7269 	return work_done;
7270 }
7271 
7272 static void tg3_process_error(struct tg3 *tp)
7273 {
7274 	u32 val;
7275 	bool real_error = false;
7276 
7277 	if (tg3_flag(tp, ERROR_PROCESSED))
7278 		return;
7279 
7280 	/* Check Flow Attention register */
7281 	val = tr32(HOSTCC_FLOW_ATTN);
7282 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7283 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7284 		real_error = true;
7285 	}
7286 
7287 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7288 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7289 		real_error = true;
7290 	}
7291 
7292 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7293 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7294 		real_error = true;
7295 	}
7296 
7297 	if (!real_error)
7298 		return;
7299 
7300 	tg3_dump_state(tp);
7301 
7302 	tg3_flag_set(tp, ERROR_PROCESSED);
7303 	tg3_reset_task_schedule(tp);
7304 }
7305 
7306 static int tg3_poll(struct napi_struct *napi, int budget)
7307 {
7308 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7309 	struct tg3 *tp = tnapi->tp;
7310 	int work_done = 0;
7311 	struct tg3_hw_status *sblk = tnapi->hw_status;
7312 
7313 	while (1) {
7314 		if (sblk->status & SD_STATUS_ERROR)
7315 			tg3_process_error(tp);
7316 
7317 		tg3_poll_link(tp);
7318 
7319 		work_done = tg3_poll_work(tnapi, work_done, budget);
7320 
7321 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7322 			goto tx_recovery;
7323 
7324 		if (unlikely(work_done >= budget))
7325 			break;
7326 
7327 		if (tg3_flag(tp, TAGGED_STATUS)) {
7328 			/* tp->last_tag is used in tg3_int_reenable() below
7329 			 * to tell the hw how much work has been processed,
7330 			 * so we must read it before checking for more work.
7331 			 */
7332 			tnapi->last_tag = sblk->status_tag;
7333 			tnapi->last_irq_tag = tnapi->last_tag;
7334 			rmb();
7335 		} else
7336 			sblk->status &= ~SD_STATUS_UPDATED;
7337 
7338 		if (likely(!tg3_has_work(tnapi))) {
7339 			napi_complete_done(napi, work_done);
7340 			tg3_int_reenable(tnapi);
7341 			break;
7342 		}
7343 	}
7344 
7345 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7346 	return work_done;
7347 
7348 tx_recovery:
7349 	/* work_done is guaranteed to be less than budget. */
7350 	napi_complete(napi);
7351 	tg3_reset_task_schedule(tp);
7352 	return work_done;
7353 }
7354 
7355 static void tg3_napi_disable(struct tg3 *tp)
7356 {
7357 	int i;
7358 
7359 	for (i = tp->irq_cnt - 1; i >= 0; i--)
7360 		napi_disable(&tp->napi[i].napi);
7361 }
7362 
7363 static void tg3_napi_enable(struct tg3 *tp)
7364 {
7365 	int i;
7366 
7367 	for (i = 0; i < tp->irq_cnt; i++)
7368 		napi_enable(&tp->napi[i].napi);
7369 }
7370 
7371 static void tg3_napi_init(struct tg3 *tp)
7372 {
7373 	int i;
7374 
7375 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
7376 	for (i = 1; i < tp->irq_cnt; i++)
7377 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
7378 }
7379 
7380 static void tg3_napi_fini(struct tg3 *tp)
7381 {
7382 	int i;
7383 
7384 	for (i = 0; i < tp->irq_cnt; i++)
7385 		netif_napi_del(&tp->napi[i].napi);
7386 }
7387 
7388 static inline void tg3_netif_stop(struct tg3 *tp)
7389 {
7390 	netif_trans_update(tp->dev);	/* prevent tx timeout */
7391 	tg3_napi_disable(tp);
7392 	netif_carrier_off(tp->dev);
7393 	netif_tx_disable(tp->dev);
7394 }
7395 
7396 /* tp->lock must be held */
7397 static inline void tg3_netif_start(struct tg3 *tp)
7398 {
7399 	tg3_ptp_resume(tp);
7400 
7401 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7402 	 * appropriate so long as all callers are assured to
7403 	 * have free tx slots (such as after tg3_init_hw)
7404 	 */
7405 	netif_tx_wake_all_queues(tp->dev);
7406 
7407 	if (tp->link_up)
7408 		netif_carrier_on(tp->dev);
7409 
7410 	tg3_napi_enable(tp);
7411 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7412 	tg3_enable_ints(tp);
7413 }
7414 
7415 static void tg3_irq_quiesce(struct tg3 *tp)
7416 	__releases(tp->lock)
7417 	__acquires(tp->lock)
7418 {
7419 	int i;
7420 
7421 	BUG_ON(tp->irq_sync);
7422 
7423 	tp->irq_sync = 1;
7424 	smp_mb();
7425 
7426 	spin_unlock_bh(&tp->lock);
7427 
7428 	for (i = 0; i < tp->irq_cnt; i++)
7429 		synchronize_irq(tp->napi[i].irq_vec);
7430 
7431 	spin_lock_bh(&tp->lock);
7432 }
7433 
7434 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7435  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7436  * with as well.  Most of the time, this is not necessary except when
7437  * shutting down the device.
7438  */
7439 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7440 {
7441 	spin_lock_bh(&tp->lock);
7442 	if (irq_sync)
7443 		tg3_irq_quiesce(tp);
7444 }
7445 
7446 static inline void tg3_full_unlock(struct tg3 *tp)
7447 {
7448 	spin_unlock_bh(&tp->lock);
7449 }
7450 
7451 /* One-shot MSI handler - Chip automatically disables interrupt
7452  * after sending MSI so driver doesn't have to do it.
7453  */
7454 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7455 {
7456 	struct tg3_napi *tnapi = dev_id;
7457 	struct tg3 *tp = tnapi->tp;
7458 
7459 	prefetch(tnapi->hw_status);
7460 	if (tnapi->rx_rcb)
7461 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7462 
7463 	if (likely(!tg3_irq_sync(tp)))
7464 		napi_schedule(&tnapi->napi);
7465 
7466 	return IRQ_HANDLED;
7467 }
7468 
7469 /* MSI ISR - No need to check for interrupt sharing and no need to
7470  * flush status block and interrupt mailbox. PCI ordering rules
7471  * guarantee that MSI will arrive after the status block.
7472  */
7473 static irqreturn_t tg3_msi(int irq, void *dev_id)
7474 {
7475 	struct tg3_napi *tnapi = dev_id;
7476 	struct tg3 *tp = tnapi->tp;
7477 
7478 	prefetch(tnapi->hw_status);
7479 	if (tnapi->rx_rcb)
7480 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7481 	/*
7482 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7483 	 * chip-internal interrupt pending events.
7484 	 * Writing non-zero to intr-mbox-0 additional tells the
7485 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7486 	 * event coalescing.
7487 	 */
7488 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7489 	if (likely(!tg3_irq_sync(tp)))
7490 		napi_schedule(&tnapi->napi);
7491 
7492 	return IRQ_RETVAL(1);
7493 }
7494 
7495 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7496 {
7497 	struct tg3_napi *tnapi = dev_id;
7498 	struct tg3 *tp = tnapi->tp;
7499 	struct tg3_hw_status *sblk = tnapi->hw_status;
7500 	unsigned int handled = 1;
7501 
7502 	/* In INTx mode, it is possible for the interrupt to arrive at
7503 	 * the CPU before the status block posted prior to the interrupt.
7504 	 * Reading the PCI State register will confirm whether the
7505 	 * interrupt is ours and will flush the status block.
7506 	 */
7507 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7508 		if (tg3_flag(tp, CHIP_RESETTING) ||
7509 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7510 			handled = 0;
7511 			goto out;
7512 		}
7513 	}
7514 
7515 	/*
7516 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7517 	 * chip-internal interrupt pending events.
7518 	 * Writing non-zero to intr-mbox-0 additional tells the
7519 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7520 	 * event coalescing.
7521 	 *
7522 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7523 	 * spurious interrupts.  The flush impacts performance but
7524 	 * excessive spurious interrupts can be worse in some cases.
7525 	 */
7526 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7527 	if (tg3_irq_sync(tp))
7528 		goto out;
7529 	sblk->status &= ~SD_STATUS_UPDATED;
7530 	if (likely(tg3_has_work(tnapi))) {
7531 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7532 		napi_schedule(&tnapi->napi);
7533 	} else {
7534 		/* No work, shared interrupt perhaps?  re-enable
7535 		 * interrupts, and flush that PCI write
7536 		 */
7537 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7538 			       0x00000000);
7539 	}
7540 out:
7541 	return IRQ_RETVAL(handled);
7542 }
7543 
7544 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7545 {
7546 	struct tg3_napi *tnapi = dev_id;
7547 	struct tg3 *tp = tnapi->tp;
7548 	struct tg3_hw_status *sblk = tnapi->hw_status;
7549 	unsigned int handled = 1;
7550 
7551 	/* In INTx mode, it is possible for the interrupt to arrive at
7552 	 * the CPU before the status block posted prior to the interrupt.
7553 	 * Reading the PCI State register will confirm whether the
7554 	 * interrupt is ours and will flush the status block.
7555 	 */
7556 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7557 		if (tg3_flag(tp, CHIP_RESETTING) ||
7558 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7559 			handled = 0;
7560 			goto out;
7561 		}
7562 	}
7563 
7564 	/*
7565 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7566 	 * chip-internal interrupt pending events.
7567 	 * writing non-zero to intr-mbox-0 additional tells the
7568 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7569 	 * event coalescing.
7570 	 *
7571 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7572 	 * spurious interrupts.  The flush impacts performance but
7573 	 * excessive spurious interrupts can be worse in some cases.
7574 	 */
7575 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7576 
7577 	/*
7578 	 * In a shared interrupt configuration, sometimes other devices'
7579 	 * interrupts will scream.  We record the current status tag here
7580 	 * so that the above check can report that the screaming interrupts
7581 	 * are unhandled.  Eventually they will be silenced.
7582 	 */
7583 	tnapi->last_irq_tag = sblk->status_tag;
7584 
7585 	if (tg3_irq_sync(tp))
7586 		goto out;
7587 
7588 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7589 
7590 	napi_schedule(&tnapi->napi);
7591 
7592 out:
7593 	return IRQ_RETVAL(handled);
7594 }
7595 
7596 /* ISR for interrupt test */
7597 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7598 {
7599 	struct tg3_napi *tnapi = dev_id;
7600 	struct tg3 *tp = tnapi->tp;
7601 	struct tg3_hw_status *sblk = tnapi->hw_status;
7602 
7603 	if ((sblk->status & SD_STATUS_UPDATED) ||
7604 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7605 		tg3_disable_ints(tp);
7606 		return IRQ_RETVAL(1);
7607 	}
7608 	return IRQ_RETVAL(0);
7609 }
7610 
7611 #ifdef CONFIG_NET_POLL_CONTROLLER
7612 static void tg3_poll_controller(struct net_device *dev)
7613 {
7614 	int i;
7615 	struct tg3 *tp = netdev_priv(dev);
7616 
7617 	if (tg3_irq_sync(tp))
7618 		return;
7619 
7620 	for (i = 0; i < tp->irq_cnt; i++)
7621 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7622 }
7623 #endif
7624 
7625 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7626 {
7627 	struct tg3 *tp = netdev_priv(dev);
7628 
7629 	if (netif_msg_tx_err(tp)) {
7630 		netdev_err(dev, "transmit timed out, resetting\n");
7631 		tg3_dump_state(tp);
7632 	}
7633 
7634 	tg3_reset_task_schedule(tp);
7635 }
7636 
7637 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7638 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7639 {
7640 	u32 base = (u32) mapping & 0xffffffff;
7641 
7642 	return base + len + 8 < base;
7643 }
7644 
7645 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7646  * of any 4GB boundaries: 4G, 8G, etc
7647  */
7648 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7649 					   u32 len, u32 mss)
7650 {
7651 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7652 		u32 base = (u32) mapping & 0xffffffff;
7653 
7654 		return ((base + len + (mss & 0x3fff)) < base);
7655 	}
7656 	return 0;
7657 }
7658 
7659 /* Test for DMA addresses > 40-bit */
7660 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7661 					  int len)
7662 {
7663 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7664 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7665 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7666 	return 0;
7667 #else
7668 	return 0;
7669 #endif
7670 }
7671 
7672 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7673 				 dma_addr_t mapping, u32 len, u32 flags,
7674 				 u32 mss, u32 vlan)
7675 {
7676 	txbd->addr_hi = ((u64) mapping >> 32);
7677 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7678 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7679 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7680 }
7681 
7682 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7683 			    dma_addr_t map, u32 len, u32 flags,
7684 			    u32 mss, u32 vlan)
7685 {
7686 	struct tg3 *tp = tnapi->tp;
7687 	bool hwbug = false;
7688 
7689 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7690 		hwbug = true;
7691 
7692 	if (tg3_4g_overflow_test(map, len))
7693 		hwbug = true;
7694 
7695 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7696 		hwbug = true;
7697 
7698 	if (tg3_40bit_overflow_test(tp, map, len))
7699 		hwbug = true;
7700 
7701 	if (tp->dma_limit) {
7702 		u32 prvidx = *entry;
7703 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7704 		while (len > tp->dma_limit && *budget) {
7705 			u32 frag_len = tp->dma_limit;
7706 			len -= tp->dma_limit;
7707 
7708 			/* Avoid the 8byte DMA problem */
7709 			if (len <= 8) {
7710 				len += tp->dma_limit / 2;
7711 				frag_len = tp->dma_limit / 2;
7712 			}
7713 
7714 			tnapi->tx_buffers[*entry].fragmented = true;
7715 
7716 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7717 				      frag_len, tmp_flag, mss, vlan);
7718 			*budget -= 1;
7719 			prvidx = *entry;
7720 			*entry = NEXT_TX(*entry);
7721 
7722 			map += frag_len;
7723 		}
7724 
7725 		if (len) {
7726 			if (*budget) {
7727 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7728 					      len, flags, mss, vlan);
7729 				*budget -= 1;
7730 				*entry = NEXT_TX(*entry);
7731 			} else {
7732 				hwbug = true;
7733 				tnapi->tx_buffers[prvidx].fragmented = false;
7734 			}
7735 		}
7736 	} else {
7737 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7738 			      len, flags, mss, vlan);
7739 		*entry = NEXT_TX(*entry);
7740 	}
7741 
7742 	return hwbug;
7743 }
7744 
7745 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7746 {
7747 	int i;
7748 	struct sk_buff *skb;
7749 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7750 
7751 	skb = txb->skb;
7752 	txb->skb = NULL;
7753 
7754 	dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7755 			 skb_headlen(skb), DMA_TO_DEVICE);
7756 
7757 	while (txb->fragmented) {
7758 		txb->fragmented = false;
7759 		entry = NEXT_TX(entry);
7760 		txb = &tnapi->tx_buffers[entry];
7761 	}
7762 
7763 	for (i = 0; i <= last; i++) {
7764 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7765 
7766 		entry = NEXT_TX(entry);
7767 		txb = &tnapi->tx_buffers[entry];
7768 
7769 		dma_unmap_page(&tnapi->tp->pdev->dev,
7770 			       dma_unmap_addr(txb, mapping),
7771 			       skb_frag_size(frag), DMA_TO_DEVICE);
7772 
7773 		while (txb->fragmented) {
7774 			txb->fragmented = false;
7775 			entry = NEXT_TX(entry);
7776 			txb = &tnapi->tx_buffers[entry];
7777 		}
7778 	}
7779 }
7780 
7781 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7782 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7783 				       struct sk_buff **pskb,
7784 				       u32 *entry, u32 *budget,
7785 				       u32 base_flags, u32 mss, u32 vlan)
7786 {
7787 	struct tg3 *tp = tnapi->tp;
7788 	struct sk_buff *new_skb, *skb = *pskb;
7789 	dma_addr_t new_addr = 0;
7790 	int ret = 0;
7791 
7792 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7793 		new_skb = skb_copy(skb, GFP_ATOMIC);
7794 	else {
7795 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7796 
7797 		new_skb = skb_copy_expand(skb,
7798 					  skb_headroom(skb) + more_headroom,
7799 					  skb_tailroom(skb), GFP_ATOMIC);
7800 	}
7801 
7802 	if (!new_skb) {
7803 		ret = -1;
7804 	} else {
7805 		/* New SKB is guaranteed to be linear. */
7806 		new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7807 					  new_skb->len, DMA_TO_DEVICE);
7808 		/* Make sure the mapping succeeded */
7809 		if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7810 			dev_kfree_skb_any(new_skb);
7811 			ret = -1;
7812 		} else {
7813 			u32 save_entry = *entry;
7814 
7815 			base_flags |= TXD_FLAG_END;
7816 
7817 			tnapi->tx_buffers[*entry].skb = new_skb;
7818 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7819 					   mapping, new_addr);
7820 
7821 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7822 					    new_skb->len, base_flags,
7823 					    mss, vlan)) {
7824 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7825 				dev_kfree_skb_any(new_skb);
7826 				ret = -1;
7827 			}
7828 		}
7829 	}
7830 
7831 	dev_consume_skb_any(skb);
7832 	*pskb = new_skb;
7833 	return ret;
7834 }
7835 
7836 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7837 {
7838 	/* Check if we will never have enough descriptors,
7839 	 * as gso_segs can be more than current ring size
7840 	 */
7841 	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7842 }
7843 
7844 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7845 
7846 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7847  * indicated in tg3_tx_frag_set()
7848  */
7849 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7850 		       struct netdev_queue *txq, struct sk_buff *skb)
7851 {
7852 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7853 	struct sk_buff *segs, *seg, *next;
7854 
7855 	/* Estimate the number of fragments in the worst case */
7856 	if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7857 		netif_tx_stop_queue(txq);
7858 
7859 		/* netif_tx_stop_queue() must be done before checking
7860 		 * checking tx index in tg3_tx_avail() below, because in
7861 		 * tg3_tx(), we update tx index before checking for
7862 		 * netif_tx_queue_stopped().
7863 		 */
7864 		smp_mb();
7865 		if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7866 			return NETDEV_TX_BUSY;
7867 
7868 		netif_tx_wake_queue(txq);
7869 	}
7870 
7871 	segs = skb_gso_segment(skb, tp->dev->features &
7872 				    ~(NETIF_F_TSO | NETIF_F_TSO6));
7873 	if (IS_ERR(segs) || !segs)
7874 		goto tg3_tso_bug_end;
7875 
7876 	skb_list_walk_safe(segs, seg, next) {
7877 		skb_mark_not_on_list(seg);
7878 		tg3_start_xmit(seg, tp->dev);
7879 	}
7880 
7881 tg3_tso_bug_end:
7882 	dev_consume_skb_any(skb);
7883 
7884 	return NETDEV_TX_OK;
7885 }
7886 
7887 /* hard_start_xmit for all devices */
7888 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7889 {
7890 	struct tg3 *tp = netdev_priv(dev);
7891 	u32 len, entry, base_flags, mss, vlan = 0;
7892 	u32 budget;
7893 	int i = -1, would_hit_hwbug;
7894 	dma_addr_t mapping;
7895 	struct tg3_napi *tnapi;
7896 	struct netdev_queue *txq;
7897 	unsigned int last;
7898 	struct iphdr *iph = NULL;
7899 	struct tcphdr *tcph = NULL;
7900 	__sum16 tcp_csum = 0, ip_csum = 0;
7901 	__be16 ip_tot_len = 0;
7902 
7903 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7904 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7905 	if (tg3_flag(tp, ENABLE_TSS))
7906 		tnapi++;
7907 
7908 	budget = tg3_tx_avail(tnapi);
7909 
7910 	/* We are running in BH disabled context with netif_tx_lock
7911 	 * and TX reclaim runs via tp->napi.poll inside of a software
7912 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7913 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7914 	 */
7915 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7916 		if (!netif_tx_queue_stopped(txq)) {
7917 			netif_tx_stop_queue(txq);
7918 
7919 			/* This is a hard error, log it. */
7920 			netdev_err(dev,
7921 				   "BUG! Tx Ring full when queue awake!\n");
7922 		}
7923 		return NETDEV_TX_BUSY;
7924 	}
7925 
7926 	entry = tnapi->tx_prod;
7927 	base_flags = 0;
7928 
7929 	mss = skb_shinfo(skb)->gso_size;
7930 	if (mss) {
7931 		u32 tcp_opt_len, hdr_len;
7932 
7933 		if (skb_cow_head(skb, 0))
7934 			goto drop;
7935 
7936 		iph = ip_hdr(skb);
7937 		tcp_opt_len = tcp_optlen(skb);
7938 
7939 		hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
7940 
7941 		/* HW/FW can not correctly segment packets that have been
7942 		 * vlan encapsulated.
7943 		 */
7944 		if (skb->protocol == htons(ETH_P_8021Q) ||
7945 		    skb->protocol == htons(ETH_P_8021AD)) {
7946 			if (tg3_tso_bug_gso_check(tnapi, skb))
7947 				return tg3_tso_bug(tp, tnapi, txq, skb);
7948 			goto drop;
7949 		}
7950 
7951 		if (!skb_is_gso_v6(skb)) {
7952 			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7953 			    tg3_flag(tp, TSO_BUG)) {
7954 				if (tg3_tso_bug_gso_check(tnapi, skb))
7955 					return tg3_tso_bug(tp, tnapi, txq, skb);
7956 				goto drop;
7957 			}
7958 			ip_csum = iph->check;
7959 			ip_tot_len = iph->tot_len;
7960 			iph->check = 0;
7961 			iph->tot_len = htons(mss + hdr_len);
7962 		}
7963 
7964 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7965 			       TXD_FLAG_CPU_POST_DMA);
7966 
7967 		tcph = tcp_hdr(skb);
7968 		tcp_csum = tcph->check;
7969 
7970 		if (tg3_flag(tp, HW_TSO_1) ||
7971 		    tg3_flag(tp, HW_TSO_2) ||
7972 		    tg3_flag(tp, HW_TSO_3)) {
7973 			tcph->check = 0;
7974 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7975 		} else {
7976 			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7977 							 0, IPPROTO_TCP, 0);
7978 		}
7979 
7980 		if (tg3_flag(tp, HW_TSO_3)) {
7981 			mss |= (hdr_len & 0xc) << 12;
7982 			if (hdr_len & 0x10)
7983 				base_flags |= 0x00000010;
7984 			base_flags |= (hdr_len & 0x3e0) << 5;
7985 		} else if (tg3_flag(tp, HW_TSO_2))
7986 			mss |= hdr_len << 9;
7987 		else if (tg3_flag(tp, HW_TSO_1) ||
7988 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
7989 			if (tcp_opt_len || iph->ihl > 5) {
7990 				int tsflags;
7991 
7992 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7993 				mss |= (tsflags << 11);
7994 			}
7995 		} else {
7996 			if (tcp_opt_len || iph->ihl > 5) {
7997 				int tsflags;
7998 
7999 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8000 				base_flags |= tsflags << 12;
8001 			}
8002 		}
8003 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8004 		/* HW/FW can not correctly checksum packets that have been
8005 		 * vlan encapsulated.
8006 		 */
8007 		if (skb->protocol == htons(ETH_P_8021Q) ||
8008 		    skb->protocol == htons(ETH_P_8021AD)) {
8009 			if (skb_checksum_help(skb))
8010 				goto drop;
8011 		} else  {
8012 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
8013 		}
8014 	}
8015 
8016 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8017 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
8018 		base_flags |= TXD_FLAG_JMB_PKT;
8019 
8020 	if (skb_vlan_tag_present(skb)) {
8021 		base_flags |= TXD_FLAG_VLAN;
8022 		vlan = skb_vlan_tag_get(skb);
8023 	}
8024 
8025 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8026 	    tg3_flag(tp, TX_TSTAMP_EN)) {
8027 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8028 		base_flags |= TXD_FLAG_HWTSTAMP;
8029 	}
8030 
8031 	len = skb_headlen(skb);
8032 
8033 	mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8034 				 DMA_TO_DEVICE);
8035 	if (dma_mapping_error(&tp->pdev->dev, mapping))
8036 		goto drop;
8037 
8038 
8039 	tnapi->tx_buffers[entry].skb = skb;
8040 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8041 
8042 	would_hit_hwbug = 0;
8043 
8044 	if (tg3_flag(tp, 5701_DMA_BUG))
8045 		would_hit_hwbug = 1;
8046 
8047 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8048 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8049 			    mss, vlan)) {
8050 		would_hit_hwbug = 1;
8051 	} else if (skb_shinfo(skb)->nr_frags > 0) {
8052 		u32 tmp_mss = mss;
8053 
8054 		if (!tg3_flag(tp, HW_TSO_1) &&
8055 		    !tg3_flag(tp, HW_TSO_2) &&
8056 		    !tg3_flag(tp, HW_TSO_3))
8057 			tmp_mss = 0;
8058 
8059 		/* Now loop through additional data
8060 		 * fragments, and queue them.
8061 		 */
8062 		last = skb_shinfo(skb)->nr_frags - 1;
8063 		for (i = 0; i <= last; i++) {
8064 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8065 
8066 			len = skb_frag_size(frag);
8067 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8068 						   len, DMA_TO_DEVICE);
8069 
8070 			tnapi->tx_buffers[entry].skb = NULL;
8071 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8072 					   mapping);
8073 			if (dma_mapping_error(&tp->pdev->dev, mapping))
8074 				goto dma_error;
8075 
8076 			if (!budget ||
8077 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8078 					    len, base_flags |
8079 					    ((i == last) ? TXD_FLAG_END : 0),
8080 					    tmp_mss, vlan)) {
8081 				would_hit_hwbug = 1;
8082 				break;
8083 			}
8084 		}
8085 	}
8086 
8087 	if (would_hit_hwbug) {
8088 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8089 
8090 		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8091 			/* If it's a TSO packet, do GSO instead of
8092 			 * allocating and copying to a large linear SKB
8093 			 */
8094 			if (ip_tot_len) {
8095 				iph->check = ip_csum;
8096 				iph->tot_len = ip_tot_len;
8097 			}
8098 			tcph->check = tcp_csum;
8099 			return tg3_tso_bug(tp, tnapi, txq, skb);
8100 		}
8101 
8102 		/* If the workaround fails due to memory/mapping
8103 		 * failure, silently drop this packet.
8104 		 */
8105 		entry = tnapi->tx_prod;
8106 		budget = tg3_tx_avail(tnapi);
8107 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8108 						base_flags, mss, vlan))
8109 			goto drop_nofree;
8110 	}
8111 
8112 	skb_tx_timestamp(skb);
8113 	netdev_tx_sent_queue(txq, skb->len);
8114 
8115 	/* Sync BD data before updating mailbox */
8116 	wmb();
8117 
8118 	tnapi->tx_prod = entry;
8119 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8120 		netif_tx_stop_queue(txq);
8121 
8122 		/* netif_tx_stop_queue() must be done before checking
8123 		 * checking tx index in tg3_tx_avail() below, because in
8124 		 * tg3_tx(), we update tx index before checking for
8125 		 * netif_tx_queue_stopped().
8126 		 */
8127 		smp_mb();
8128 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8129 			netif_tx_wake_queue(txq);
8130 	}
8131 
8132 	if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8133 		/* Packets are ready, update Tx producer idx on card. */
8134 		tw32_tx_mbox(tnapi->prodmbox, entry);
8135 	}
8136 
8137 	return NETDEV_TX_OK;
8138 
8139 dma_error:
8140 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8141 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8142 drop:
8143 	dev_kfree_skb_any(skb);
8144 drop_nofree:
8145 	tp->tx_dropped++;
8146 	return NETDEV_TX_OK;
8147 }
8148 
8149 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8150 {
8151 	if (enable) {
8152 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8153 				  MAC_MODE_PORT_MODE_MASK);
8154 
8155 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8156 
8157 		if (!tg3_flag(tp, 5705_PLUS))
8158 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8159 
8160 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8161 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8162 		else
8163 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8164 	} else {
8165 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8166 
8167 		if (tg3_flag(tp, 5705_PLUS) ||
8168 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8169 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8170 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8171 	}
8172 
8173 	tw32(MAC_MODE, tp->mac_mode);
8174 	udelay(40);
8175 }
8176 
8177 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8178 {
8179 	u32 val, bmcr, mac_mode, ptest = 0;
8180 
8181 	tg3_phy_toggle_apd(tp, false);
8182 	tg3_phy_toggle_automdix(tp, false);
8183 
8184 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8185 		return -EIO;
8186 
8187 	bmcr = BMCR_FULLDPLX;
8188 	switch (speed) {
8189 	case SPEED_10:
8190 		break;
8191 	case SPEED_100:
8192 		bmcr |= BMCR_SPEED100;
8193 		break;
8194 	case SPEED_1000:
8195 	default:
8196 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8197 			speed = SPEED_100;
8198 			bmcr |= BMCR_SPEED100;
8199 		} else {
8200 			speed = SPEED_1000;
8201 			bmcr |= BMCR_SPEED1000;
8202 		}
8203 	}
8204 
8205 	if (extlpbk) {
8206 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8207 			tg3_readphy(tp, MII_CTRL1000, &val);
8208 			val |= CTL1000_AS_MASTER |
8209 			       CTL1000_ENABLE_MASTER;
8210 			tg3_writephy(tp, MII_CTRL1000, val);
8211 		} else {
8212 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8213 				MII_TG3_FET_PTEST_TRIM_2;
8214 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8215 		}
8216 	} else
8217 		bmcr |= BMCR_LOOPBACK;
8218 
8219 	tg3_writephy(tp, MII_BMCR, bmcr);
8220 
8221 	/* The write needs to be flushed for the FETs */
8222 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8223 		tg3_readphy(tp, MII_BMCR, &bmcr);
8224 
8225 	udelay(40);
8226 
8227 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8228 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8229 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8230 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8231 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8232 
8233 		/* The write needs to be flushed for the AC131 */
8234 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8235 	}
8236 
8237 	/* Reset to prevent losing 1st rx packet intermittently */
8238 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8239 	    tg3_flag(tp, 5780_CLASS)) {
8240 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8241 		udelay(10);
8242 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8243 	}
8244 
8245 	mac_mode = tp->mac_mode &
8246 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8247 	if (speed == SPEED_1000)
8248 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8249 	else
8250 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8251 
8252 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8253 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8254 
8255 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8256 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8257 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8258 			mac_mode |= MAC_MODE_LINK_POLARITY;
8259 
8260 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8261 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8262 	}
8263 
8264 	tw32(MAC_MODE, mac_mode);
8265 	udelay(40);
8266 
8267 	return 0;
8268 }
8269 
8270 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8271 {
8272 	struct tg3 *tp = netdev_priv(dev);
8273 
8274 	if (features & NETIF_F_LOOPBACK) {
8275 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8276 			return;
8277 
8278 		spin_lock_bh(&tp->lock);
8279 		tg3_mac_loopback(tp, true);
8280 		netif_carrier_on(tp->dev);
8281 		spin_unlock_bh(&tp->lock);
8282 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8283 	} else {
8284 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8285 			return;
8286 
8287 		spin_lock_bh(&tp->lock);
8288 		tg3_mac_loopback(tp, false);
8289 		/* Force link status check */
8290 		tg3_setup_phy(tp, true);
8291 		spin_unlock_bh(&tp->lock);
8292 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8293 	}
8294 }
8295 
8296 static netdev_features_t tg3_fix_features(struct net_device *dev,
8297 	netdev_features_t features)
8298 {
8299 	struct tg3 *tp = netdev_priv(dev);
8300 
8301 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8302 		features &= ~NETIF_F_ALL_TSO;
8303 
8304 	return features;
8305 }
8306 
8307 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8308 {
8309 	netdev_features_t changed = dev->features ^ features;
8310 
8311 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8312 		tg3_set_loopback(dev, features);
8313 
8314 	return 0;
8315 }
8316 
8317 static void tg3_rx_prodring_free(struct tg3 *tp,
8318 				 struct tg3_rx_prodring_set *tpr)
8319 {
8320 	int i;
8321 
8322 	if (tpr != &tp->napi[0].prodring) {
8323 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8324 		     i = (i + 1) & tp->rx_std_ring_mask)
8325 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8326 					tp->rx_pkt_map_sz);
8327 
8328 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8329 			for (i = tpr->rx_jmb_cons_idx;
8330 			     i != tpr->rx_jmb_prod_idx;
8331 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8332 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8333 						TG3_RX_JMB_MAP_SZ);
8334 			}
8335 		}
8336 
8337 		return;
8338 	}
8339 
8340 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8341 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8342 				tp->rx_pkt_map_sz);
8343 
8344 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8345 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8346 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8347 					TG3_RX_JMB_MAP_SZ);
8348 	}
8349 }
8350 
8351 /* Initialize rx rings for packet processing.
8352  *
8353  * The chip has been shut down and the driver detached from
8354  * the networking, so no interrupts or new tx packets will
8355  * end up in the driver.  tp->{tx,}lock are held and thus
8356  * we may not sleep.
8357  */
8358 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8359 				 struct tg3_rx_prodring_set *tpr)
8360 {
8361 	u32 i, rx_pkt_dma_sz;
8362 
8363 	tpr->rx_std_cons_idx = 0;
8364 	tpr->rx_std_prod_idx = 0;
8365 	tpr->rx_jmb_cons_idx = 0;
8366 	tpr->rx_jmb_prod_idx = 0;
8367 
8368 	if (tpr != &tp->napi[0].prodring) {
8369 		memset(&tpr->rx_std_buffers[0], 0,
8370 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8371 		if (tpr->rx_jmb_buffers)
8372 			memset(&tpr->rx_jmb_buffers[0], 0,
8373 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8374 		goto done;
8375 	}
8376 
8377 	/* Zero out all descriptors. */
8378 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8379 
8380 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8381 	if (tg3_flag(tp, 5780_CLASS) &&
8382 	    tp->dev->mtu > ETH_DATA_LEN)
8383 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8384 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8385 
8386 	/* Initialize invariants of the rings, we only set this
8387 	 * stuff once.  This works because the card does not
8388 	 * write into the rx buffer posting rings.
8389 	 */
8390 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8391 		struct tg3_rx_buffer_desc *rxd;
8392 
8393 		rxd = &tpr->rx_std[i];
8394 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8395 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8396 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8397 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8398 	}
8399 
8400 	/* Now allocate fresh SKBs for each rx ring. */
8401 	for (i = 0; i < tp->rx_pending; i++) {
8402 		unsigned int frag_size;
8403 
8404 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8405 				      &frag_size) < 0) {
8406 			netdev_warn(tp->dev,
8407 				    "Using a smaller RX standard ring. Only "
8408 				    "%d out of %d buffers were allocated "
8409 				    "successfully\n", i, tp->rx_pending);
8410 			if (i == 0)
8411 				goto initfail;
8412 			tp->rx_pending = i;
8413 			break;
8414 		}
8415 	}
8416 
8417 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8418 		goto done;
8419 
8420 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8421 
8422 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8423 		goto done;
8424 
8425 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8426 		struct tg3_rx_buffer_desc *rxd;
8427 
8428 		rxd = &tpr->rx_jmb[i].std;
8429 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8430 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8431 				  RXD_FLAG_JUMBO;
8432 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8433 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8434 	}
8435 
8436 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8437 		unsigned int frag_size;
8438 
8439 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8440 				      &frag_size) < 0) {
8441 			netdev_warn(tp->dev,
8442 				    "Using a smaller RX jumbo ring. Only %d "
8443 				    "out of %d buffers were allocated "
8444 				    "successfully\n", i, tp->rx_jumbo_pending);
8445 			if (i == 0)
8446 				goto initfail;
8447 			tp->rx_jumbo_pending = i;
8448 			break;
8449 		}
8450 	}
8451 
8452 done:
8453 	return 0;
8454 
8455 initfail:
8456 	tg3_rx_prodring_free(tp, tpr);
8457 	return -ENOMEM;
8458 }
8459 
8460 static void tg3_rx_prodring_fini(struct tg3 *tp,
8461 				 struct tg3_rx_prodring_set *tpr)
8462 {
8463 	kfree(tpr->rx_std_buffers);
8464 	tpr->rx_std_buffers = NULL;
8465 	kfree(tpr->rx_jmb_buffers);
8466 	tpr->rx_jmb_buffers = NULL;
8467 	if (tpr->rx_std) {
8468 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8469 				  tpr->rx_std, tpr->rx_std_mapping);
8470 		tpr->rx_std = NULL;
8471 	}
8472 	if (tpr->rx_jmb) {
8473 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8474 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8475 		tpr->rx_jmb = NULL;
8476 	}
8477 }
8478 
8479 static int tg3_rx_prodring_init(struct tg3 *tp,
8480 				struct tg3_rx_prodring_set *tpr)
8481 {
8482 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8483 				      GFP_KERNEL);
8484 	if (!tpr->rx_std_buffers)
8485 		return -ENOMEM;
8486 
8487 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8488 					 TG3_RX_STD_RING_BYTES(tp),
8489 					 &tpr->rx_std_mapping,
8490 					 GFP_KERNEL);
8491 	if (!tpr->rx_std)
8492 		goto err_out;
8493 
8494 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8495 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8496 					      GFP_KERNEL);
8497 		if (!tpr->rx_jmb_buffers)
8498 			goto err_out;
8499 
8500 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8501 						 TG3_RX_JMB_RING_BYTES(tp),
8502 						 &tpr->rx_jmb_mapping,
8503 						 GFP_KERNEL);
8504 		if (!tpr->rx_jmb)
8505 			goto err_out;
8506 	}
8507 
8508 	return 0;
8509 
8510 err_out:
8511 	tg3_rx_prodring_fini(tp, tpr);
8512 	return -ENOMEM;
8513 }
8514 
8515 /* Free up pending packets in all rx/tx rings.
8516  *
8517  * The chip has been shut down and the driver detached from
8518  * the networking, so no interrupts or new tx packets will
8519  * end up in the driver.  tp->{tx,}lock is not held and we are not
8520  * in an interrupt context and thus may sleep.
8521  */
8522 static void tg3_free_rings(struct tg3 *tp)
8523 {
8524 	int i, j;
8525 
8526 	for (j = 0; j < tp->irq_cnt; j++) {
8527 		struct tg3_napi *tnapi = &tp->napi[j];
8528 
8529 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8530 
8531 		if (!tnapi->tx_buffers)
8532 			continue;
8533 
8534 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8535 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8536 
8537 			if (!skb)
8538 				continue;
8539 
8540 			tg3_tx_skb_unmap(tnapi, i,
8541 					 skb_shinfo(skb)->nr_frags - 1);
8542 
8543 			dev_consume_skb_any(skb);
8544 		}
8545 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8546 	}
8547 }
8548 
8549 /* Initialize tx/rx rings for packet processing.
8550  *
8551  * The chip has been shut down and the driver detached from
8552  * the networking, so no interrupts or new tx packets will
8553  * end up in the driver.  tp->{tx,}lock are held and thus
8554  * we may not sleep.
8555  */
8556 static int tg3_init_rings(struct tg3 *tp)
8557 {
8558 	int i;
8559 
8560 	/* Free up all the SKBs. */
8561 	tg3_free_rings(tp);
8562 
8563 	for (i = 0; i < tp->irq_cnt; i++) {
8564 		struct tg3_napi *tnapi = &tp->napi[i];
8565 
8566 		tnapi->last_tag = 0;
8567 		tnapi->last_irq_tag = 0;
8568 		tnapi->hw_status->status = 0;
8569 		tnapi->hw_status->status_tag = 0;
8570 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8571 
8572 		tnapi->tx_prod = 0;
8573 		tnapi->tx_cons = 0;
8574 		if (tnapi->tx_ring)
8575 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8576 
8577 		tnapi->rx_rcb_ptr = 0;
8578 		if (tnapi->rx_rcb)
8579 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8580 
8581 		if (tnapi->prodring.rx_std &&
8582 		    tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8583 			tg3_free_rings(tp);
8584 			return -ENOMEM;
8585 		}
8586 	}
8587 
8588 	return 0;
8589 }
8590 
8591 static void tg3_mem_tx_release(struct tg3 *tp)
8592 {
8593 	int i;
8594 
8595 	for (i = 0; i < tp->irq_max; i++) {
8596 		struct tg3_napi *tnapi = &tp->napi[i];
8597 
8598 		if (tnapi->tx_ring) {
8599 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8600 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8601 			tnapi->tx_ring = NULL;
8602 		}
8603 
8604 		kfree(tnapi->tx_buffers);
8605 		tnapi->tx_buffers = NULL;
8606 	}
8607 }
8608 
8609 static int tg3_mem_tx_acquire(struct tg3 *tp)
8610 {
8611 	int i;
8612 	struct tg3_napi *tnapi = &tp->napi[0];
8613 
8614 	/* If multivector TSS is enabled, vector 0 does not handle
8615 	 * tx interrupts.  Don't allocate any resources for it.
8616 	 */
8617 	if (tg3_flag(tp, ENABLE_TSS))
8618 		tnapi++;
8619 
8620 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8621 		tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8622 					    sizeof(struct tg3_tx_ring_info),
8623 					    GFP_KERNEL);
8624 		if (!tnapi->tx_buffers)
8625 			goto err_out;
8626 
8627 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8628 						    TG3_TX_RING_BYTES,
8629 						    &tnapi->tx_desc_mapping,
8630 						    GFP_KERNEL);
8631 		if (!tnapi->tx_ring)
8632 			goto err_out;
8633 	}
8634 
8635 	return 0;
8636 
8637 err_out:
8638 	tg3_mem_tx_release(tp);
8639 	return -ENOMEM;
8640 }
8641 
8642 static void tg3_mem_rx_release(struct tg3 *tp)
8643 {
8644 	int i;
8645 
8646 	for (i = 0; i < tp->irq_max; i++) {
8647 		struct tg3_napi *tnapi = &tp->napi[i];
8648 
8649 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8650 
8651 		if (!tnapi->rx_rcb)
8652 			continue;
8653 
8654 		dma_free_coherent(&tp->pdev->dev,
8655 				  TG3_RX_RCB_RING_BYTES(tp),
8656 				  tnapi->rx_rcb,
8657 				  tnapi->rx_rcb_mapping);
8658 		tnapi->rx_rcb = NULL;
8659 	}
8660 }
8661 
8662 static int tg3_mem_rx_acquire(struct tg3 *tp)
8663 {
8664 	unsigned int i, limit;
8665 
8666 	limit = tp->rxq_cnt;
8667 
8668 	/* If RSS is enabled, we need a (dummy) producer ring
8669 	 * set on vector zero.  This is the true hw prodring.
8670 	 */
8671 	if (tg3_flag(tp, ENABLE_RSS))
8672 		limit++;
8673 
8674 	for (i = 0; i < limit; i++) {
8675 		struct tg3_napi *tnapi = &tp->napi[i];
8676 
8677 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8678 			goto err_out;
8679 
8680 		/* If multivector RSS is enabled, vector 0
8681 		 * does not handle rx or tx interrupts.
8682 		 * Don't allocate any resources for it.
8683 		 */
8684 		if (!i && tg3_flag(tp, ENABLE_RSS))
8685 			continue;
8686 
8687 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8688 						   TG3_RX_RCB_RING_BYTES(tp),
8689 						   &tnapi->rx_rcb_mapping,
8690 						   GFP_KERNEL);
8691 		if (!tnapi->rx_rcb)
8692 			goto err_out;
8693 	}
8694 
8695 	return 0;
8696 
8697 err_out:
8698 	tg3_mem_rx_release(tp);
8699 	return -ENOMEM;
8700 }
8701 
8702 /*
8703  * Must not be invoked with interrupt sources disabled and
8704  * the hardware shutdown down.
8705  */
8706 static void tg3_free_consistent(struct tg3 *tp)
8707 {
8708 	int i;
8709 
8710 	for (i = 0; i < tp->irq_cnt; i++) {
8711 		struct tg3_napi *tnapi = &tp->napi[i];
8712 
8713 		if (tnapi->hw_status) {
8714 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8715 					  tnapi->hw_status,
8716 					  tnapi->status_mapping);
8717 			tnapi->hw_status = NULL;
8718 		}
8719 	}
8720 
8721 	tg3_mem_rx_release(tp);
8722 	tg3_mem_tx_release(tp);
8723 
8724 	/* tp->hw_stats can be referenced safely:
8725 	 *     1. under rtnl_lock
8726 	 *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8727 	 */
8728 	if (tp->hw_stats) {
8729 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8730 				  tp->hw_stats, tp->stats_mapping);
8731 		tp->hw_stats = NULL;
8732 	}
8733 }
8734 
8735 /*
8736  * Must not be invoked with interrupt sources disabled and
8737  * the hardware shutdown down.  Can sleep.
8738  */
8739 static int tg3_alloc_consistent(struct tg3 *tp)
8740 {
8741 	int i;
8742 
8743 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8744 					  sizeof(struct tg3_hw_stats),
8745 					  &tp->stats_mapping, GFP_KERNEL);
8746 	if (!tp->hw_stats)
8747 		goto err_out;
8748 
8749 	for (i = 0; i < tp->irq_cnt; i++) {
8750 		struct tg3_napi *tnapi = &tp->napi[i];
8751 		struct tg3_hw_status *sblk;
8752 
8753 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8754 						      TG3_HW_STATUS_SIZE,
8755 						      &tnapi->status_mapping,
8756 						      GFP_KERNEL);
8757 		if (!tnapi->hw_status)
8758 			goto err_out;
8759 
8760 		sblk = tnapi->hw_status;
8761 
8762 		if (tg3_flag(tp, ENABLE_RSS)) {
8763 			u16 *prodptr = NULL;
8764 
8765 			/*
8766 			 * When RSS is enabled, the status block format changes
8767 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8768 			 * and "rx_mini_consumer" members get mapped to the
8769 			 * other three rx return ring producer indexes.
8770 			 */
8771 			switch (i) {
8772 			case 1:
8773 				prodptr = &sblk->idx[0].rx_producer;
8774 				break;
8775 			case 2:
8776 				prodptr = &sblk->rx_jumbo_consumer;
8777 				break;
8778 			case 3:
8779 				prodptr = &sblk->reserved;
8780 				break;
8781 			case 4:
8782 				prodptr = &sblk->rx_mini_consumer;
8783 				break;
8784 			}
8785 			tnapi->rx_rcb_prod_idx = prodptr;
8786 		} else {
8787 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8788 		}
8789 	}
8790 
8791 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8792 		goto err_out;
8793 
8794 	return 0;
8795 
8796 err_out:
8797 	tg3_free_consistent(tp);
8798 	return -ENOMEM;
8799 }
8800 
8801 #define MAX_WAIT_CNT 1000
8802 
8803 /* To stop a block, clear the enable bit and poll till it
8804  * clears.  tp->lock is held.
8805  */
8806 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8807 {
8808 	unsigned int i;
8809 	u32 val;
8810 
8811 	if (tg3_flag(tp, 5705_PLUS)) {
8812 		switch (ofs) {
8813 		case RCVLSC_MODE:
8814 		case DMAC_MODE:
8815 		case MBFREE_MODE:
8816 		case BUFMGR_MODE:
8817 		case MEMARB_MODE:
8818 			/* We can't enable/disable these bits of the
8819 			 * 5705/5750, just say success.
8820 			 */
8821 			return 0;
8822 
8823 		default:
8824 			break;
8825 		}
8826 	}
8827 
8828 	val = tr32(ofs);
8829 	val &= ~enable_bit;
8830 	tw32_f(ofs, val);
8831 
8832 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8833 		if (pci_channel_offline(tp->pdev)) {
8834 			dev_err(&tp->pdev->dev,
8835 				"tg3_stop_block device offline, "
8836 				"ofs=%lx enable_bit=%x\n",
8837 				ofs, enable_bit);
8838 			return -ENODEV;
8839 		}
8840 
8841 		udelay(100);
8842 		val = tr32(ofs);
8843 		if ((val & enable_bit) == 0)
8844 			break;
8845 	}
8846 
8847 	if (i == MAX_WAIT_CNT && !silent) {
8848 		dev_err(&tp->pdev->dev,
8849 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8850 			ofs, enable_bit);
8851 		return -ENODEV;
8852 	}
8853 
8854 	return 0;
8855 }
8856 
8857 /* tp->lock is held. */
8858 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8859 {
8860 	int i, err;
8861 
8862 	tg3_disable_ints(tp);
8863 
8864 	if (pci_channel_offline(tp->pdev)) {
8865 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8866 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8867 		err = -ENODEV;
8868 		goto err_no_dev;
8869 	}
8870 
8871 	tp->rx_mode &= ~RX_MODE_ENABLE;
8872 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8873 	udelay(10);
8874 
8875 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8876 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8877 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8878 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8879 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8880 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8881 
8882 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8883 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8884 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8885 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8886 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8887 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8888 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8889 
8890 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8891 	tw32_f(MAC_MODE, tp->mac_mode);
8892 	udelay(40);
8893 
8894 	tp->tx_mode &= ~TX_MODE_ENABLE;
8895 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8896 
8897 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8898 		udelay(100);
8899 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8900 			break;
8901 	}
8902 	if (i >= MAX_WAIT_CNT) {
8903 		dev_err(&tp->pdev->dev,
8904 			"%s timed out, TX_MODE_ENABLE will not clear "
8905 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8906 		err |= -ENODEV;
8907 	}
8908 
8909 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8910 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8911 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8912 
8913 	tw32(FTQ_RESET, 0xffffffff);
8914 	tw32(FTQ_RESET, 0x00000000);
8915 
8916 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8917 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8918 
8919 err_no_dev:
8920 	for (i = 0; i < tp->irq_cnt; i++) {
8921 		struct tg3_napi *tnapi = &tp->napi[i];
8922 		if (tnapi->hw_status)
8923 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8924 	}
8925 
8926 	return err;
8927 }
8928 
8929 /* Save PCI command register before chip reset */
8930 static void tg3_save_pci_state(struct tg3 *tp)
8931 {
8932 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8933 }
8934 
8935 /* Restore PCI state after chip reset */
8936 static void tg3_restore_pci_state(struct tg3 *tp)
8937 {
8938 	u32 val;
8939 
8940 	/* Re-enable indirect register accesses. */
8941 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8942 			       tp->misc_host_ctrl);
8943 
8944 	/* Set MAX PCI retry to zero. */
8945 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8946 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8947 	    tg3_flag(tp, PCIX_MODE))
8948 		val |= PCISTATE_RETRY_SAME_DMA;
8949 	/* Allow reads and writes to the APE register and memory space. */
8950 	if (tg3_flag(tp, ENABLE_APE))
8951 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8952 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8953 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8954 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8955 
8956 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8957 
8958 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8959 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8960 				      tp->pci_cacheline_sz);
8961 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8962 				      tp->pci_lat_timer);
8963 	}
8964 
8965 	/* Make sure PCI-X relaxed ordering bit is clear. */
8966 	if (tg3_flag(tp, PCIX_MODE)) {
8967 		u16 pcix_cmd;
8968 
8969 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8970 				     &pcix_cmd);
8971 		pcix_cmd &= ~PCI_X_CMD_ERO;
8972 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8973 				      pcix_cmd);
8974 	}
8975 
8976 	if (tg3_flag(tp, 5780_CLASS)) {
8977 
8978 		/* Chip reset on 5780 will reset MSI enable bit,
8979 		 * so need to restore it.
8980 		 */
8981 		if (tg3_flag(tp, USING_MSI)) {
8982 			u16 ctrl;
8983 
8984 			pci_read_config_word(tp->pdev,
8985 					     tp->msi_cap + PCI_MSI_FLAGS,
8986 					     &ctrl);
8987 			pci_write_config_word(tp->pdev,
8988 					      tp->msi_cap + PCI_MSI_FLAGS,
8989 					      ctrl | PCI_MSI_FLAGS_ENABLE);
8990 			val = tr32(MSGINT_MODE);
8991 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8992 		}
8993 	}
8994 }
8995 
8996 static void tg3_override_clk(struct tg3 *tp)
8997 {
8998 	u32 val;
8999 
9000 	switch (tg3_asic_rev(tp)) {
9001 	case ASIC_REV_5717:
9002 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9003 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9004 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9005 		break;
9006 
9007 	case ASIC_REV_5719:
9008 	case ASIC_REV_5720:
9009 		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9010 		break;
9011 
9012 	default:
9013 		return;
9014 	}
9015 }
9016 
9017 static void tg3_restore_clk(struct tg3 *tp)
9018 {
9019 	u32 val;
9020 
9021 	switch (tg3_asic_rev(tp)) {
9022 	case ASIC_REV_5717:
9023 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9024 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9025 		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9026 		break;
9027 
9028 	case ASIC_REV_5719:
9029 	case ASIC_REV_5720:
9030 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9031 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9032 		break;
9033 
9034 	default:
9035 		return;
9036 	}
9037 }
9038 
9039 /* tp->lock is held. */
9040 static int tg3_chip_reset(struct tg3 *tp)
9041 	__releases(tp->lock)
9042 	__acquires(tp->lock)
9043 {
9044 	u32 val;
9045 	void (*write_op)(struct tg3 *, u32, u32);
9046 	int i, err;
9047 
9048 	if (!pci_device_is_present(tp->pdev))
9049 		return -ENODEV;
9050 
9051 	tg3_nvram_lock(tp);
9052 
9053 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9054 
9055 	/* No matching tg3_nvram_unlock() after this because
9056 	 * chip reset below will undo the nvram lock.
9057 	 */
9058 	tp->nvram_lock_cnt = 0;
9059 
9060 	/* GRC_MISC_CFG core clock reset will clear the memory
9061 	 * enable bit in PCI register 4 and the MSI enable bit
9062 	 * on some chips, so we save relevant registers here.
9063 	 */
9064 	tg3_save_pci_state(tp);
9065 
9066 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9067 	    tg3_flag(tp, 5755_PLUS))
9068 		tw32(GRC_FASTBOOT_PC, 0);
9069 
9070 	/*
9071 	 * We must avoid the readl() that normally takes place.
9072 	 * It locks machines, causes machine checks, and other
9073 	 * fun things.  So, temporarily disable the 5701
9074 	 * hardware workaround, while we do the reset.
9075 	 */
9076 	write_op = tp->write32;
9077 	if (write_op == tg3_write_flush_reg32)
9078 		tp->write32 = tg3_write32;
9079 
9080 	/* Prevent the irq handler from reading or writing PCI registers
9081 	 * during chip reset when the memory enable bit in the PCI command
9082 	 * register may be cleared.  The chip does not generate interrupt
9083 	 * at this time, but the irq handler may still be called due to irq
9084 	 * sharing or irqpoll.
9085 	 */
9086 	tg3_flag_set(tp, CHIP_RESETTING);
9087 	for (i = 0; i < tp->irq_cnt; i++) {
9088 		struct tg3_napi *tnapi = &tp->napi[i];
9089 		if (tnapi->hw_status) {
9090 			tnapi->hw_status->status = 0;
9091 			tnapi->hw_status->status_tag = 0;
9092 		}
9093 		tnapi->last_tag = 0;
9094 		tnapi->last_irq_tag = 0;
9095 	}
9096 	smp_mb();
9097 
9098 	tg3_full_unlock(tp);
9099 
9100 	for (i = 0; i < tp->irq_cnt; i++)
9101 		synchronize_irq(tp->napi[i].irq_vec);
9102 
9103 	tg3_full_lock(tp, 0);
9104 
9105 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9106 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9107 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9108 	}
9109 
9110 	/* do the reset */
9111 	val = GRC_MISC_CFG_CORECLK_RESET;
9112 
9113 	if (tg3_flag(tp, PCI_EXPRESS)) {
9114 		/* Force PCIe 1.0a mode */
9115 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9116 		    !tg3_flag(tp, 57765_PLUS) &&
9117 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
9118 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9119 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9120 
9121 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9122 			tw32(GRC_MISC_CFG, (1 << 29));
9123 			val |= (1 << 29);
9124 		}
9125 	}
9126 
9127 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9128 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9129 		tw32(GRC_VCPU_EXT_CTRL,
9130 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9131 	}
9132 
9133 	/* Set the clock to the highest frequency to avoid timeouts. With link
9134 	 * aware mode, the clock speed could be slow and bootcode does not
9135 	 * complete within the expected time. Override the clock to allow the
9136 	 * bootcode to finish sooner and then restore it.
9137 	 */
9138 	tg3_override_clk(tp);
9139 
9140 	/* Manage gphy power for all CPMU absent PCIe devices. */
9141 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9142 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9143 
9144 	tw32(GRC_MISC_CFG, val);
9145 
9146 	/* restore 5701 hardware bug workaround write method */
9147 	tp->write32 = write_op;
9148 
9149 	/* Unfortunately, we have to delay before the PCI read back.
9150 	 * Some 575X chips even will not respond to a PCI cfg access
9151 	 * when the reset command is given to the chip.
9152 	 *
9153 	 * How do these hardware designers expect things to work
9154 	 * properly if the PCI write is posted for a long period
9155 	 * of time?  It is always necessary to have some method by
9156 	 * which a register read back can occur to push the write
9157 	 * out which does the reset.
9158 	 *
9159 	 * For most tg3 variants the trick below was working.
9160 	 * Ho hum...
9161 	 */
9162 	udelay(120);
9163 
9164 	/* Flush PCI posted writes.  The normal MMIO registers
9165 	 * are inaccessible at this time so this is the only
9166 	 * way to make this reliably (actually, this is no longer
9167 	 * the case, see above).  I tried to use indirect
9168 	 * register read/write but this upset some 5701 variants.
9169 	 */
9170 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9171 
9172 	udelay(120);
9173 
9174 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9175 		u16 val16;
9176 
9177 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9178 			int j;
9179 			u32 cfg_val;
9180 
9181 			/* Wait for link training to complete.  */
9182 			for (j = 0; j < 5000; j++)
9183 				udelay(100);
9184 
9185 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9186 			pci_write_config_dword(tp->pdev, 0xc4,
9187 					       cfg_val | (1 << 15));
9188 		}
9189 
9190 		/* Clear the "no snoop" and "relaxed ordering" bits. */
9191 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9192 		/*
9193 		 * Older PCIe devices only support the 128 byte
9194 		 * MPS setting.  Enforce the restriction.
9195 		 */
9196 		if (!tg3_flag(tp, CPMU_PRESENT))
9197 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9198 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9199 
9200 		/* Clear error status */
9201 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9202 				      PCI_EXP_DEVSTA_CED |
9203 				      PCI_EXP_DEVSTA_NFED |
9204 				      PCI_EXP_DEVSTA_FED |
9205 				      PCI_EXP_DEVSTA_URD);
9206 	}
9207 
9208 	tg3_restore_pci_state(tp);
9209 
9210 	tg3_flag_clear(tp, CHIP_RESETTING);
9211 	tg3_flag_clear(tp, ERROR_PROCESSED);
9212 
9213 	val = 0;
9214 	if (tg3_flag(tp, 5780_CLASS))
9215 		val = tr32(MEMARB_MODE);
9216 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9217 
9218 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9219 		tg3_stop_fw(tp);
9220 		tw32(0x5000, 0x400);
9221 	}
9222 
9223 	if (tg3_flag(tp, IS_SSB_CORE)) {
9224 		/*
9225 		 * BCM4785: In order to avoid repercussions from using
9226 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9227 		 * which is not required.
9228 		 */
9229 		tg3_stop_fw(tp);
9230 		tg3_halt_cpu(tp, RX_CPU_BASE);
9231 	}
9232 
9233 	err = tg3_poll_fw(tp);
9234 	if (err)
9235 		return err;
9236 
9237 	tw32(GRC_MODE, tp->grc_mode);
9238 
9239 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9240 		val = tr32(0xc4);
9241 
9242 		tw32(0xc4, val | (1 << 15));
9243 	}
9244 
9245 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9246 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9247 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9248 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9249 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9250 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9251 	}
9252 
9253 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9254 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9255 		val = tp->mac_mode;
9256 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9257 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9258 		val = tp->mac_mode;
9259 	} else
9260 		val = 0;
9261 
9262 	tw32_f(MAC_MODE, val);
9263 	udelay(40);
9264 
9265 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9266 
9267 	tg3_mdio_start(tp);
9268 
9269 	if (tg3_flag(tp, PCI_EXPRESS) &&
9270 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9271 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9272 	    !tg3_flag(tp, 57765_PLUS)) {
9273 		val = tr32(0x7c00);
9274 
9275 		tw32(0x7c00, val | (1 << 25));
9276 	}
9277 
9278 	tg3_restore_clk(tp);
9279 
9280 	/* Increase the core clock speed to fix tx timeout issue for 5762
9281 	 * with 100Mbps link speed.
9282 	 */
9283 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9284 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9285 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9286 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9287 	}
9288 
9289 	/* Reprobe ASF enable state.  */
9290 	tg3_flag_clear(tp, ENABLE_ASF);
9291 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9292 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9293 
9294 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9295 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9296 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9297 		u32 nic_cfg;
9298 
9299 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9300 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9301 			tg3_flag_set(tp, ENABLE_ASF);
9302 			tp->last_event_jiffies = jiffies;
9303 			if (tg3_flag(tp, 5750_PLUS))
9304 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9305 
9306 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9307 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9308 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9309 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9310 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9311 		}
9312 	}
9313 
9314 	return 0;
9315 }
9316 
9317 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9318 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9319 static void __tg3_set_rx_mode(struct net_device *);
9320 
9321 /* tp->lock is held. */
9322 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9323 {
9324 	int err;
9325 
9326 	tg3_stop_fw(tp);
9327 
9328 	tg3_write_sig_pre_reset(tp, kind);
9329 
9330 	tg3_abort_hw(tp, silent);
9331 	err = tg3_chip_reset(tp);
9332 
9333 	__tg3_set_mac_addr(tp, false);
9334 
9335 	tg3_write_sig_legacy(tp, kind);
9336 	tg3_write_sig_post_reset(tp, kind);
9337 
9338 	if (tp->hw_stats) {
9339 		/* Save the stats across chip resets... */
9340 		tg3_get_nstats(tp, &tp->net_stats_prev);
9341 		tg3_get_estats(tp, &tp->estats_prev);
9342 
9343 		/* And make sure the next sample is new data */
9344 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9345 	}
9346 
9347 	return err;
9348 }
9349 
9350 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9351 {
9352 	struct tg3 *tp = netdev_priv(dev);
9353 	struct sockaddr *addr = p;
9354 	int err = 0;
9355 	bool skip_mac_1 = false;
9356 
9357 	if (!is_valid_ether_addr(addr->sa_data))
9358 		return -EADDRNOTAVAIL;
9359 
9360 	eth_hw_addr_set(dev, addr->sa_data);
9361 
9362 	if (!netif_running(dev))
9363 		return 0;
9364 
9365 	if (tg3_flag(tp, ENABLE_ASF)) {
9366 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9367 
9368 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9369 		addr0_low = tr32(MAC_ADDR_0_LOW);
9370 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9371 		addr1_low = tr32(MAC_ADDR_1_LOW);
9372 
9373 		/* Skip MAC addr 1 if ASF is using it. */
9374 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9375 		    !(addr1_high == 0 && addr1_low == 0))
9376 			skip_mac_1 = true;
9377 	}
9378 	spin_lock_bh(&tp->lock);
9379 	__tg3_set_mac_addr(tp, skip_mac_1);
9380 	__tg3_set_rx_mode(dev);
9381 	spin_unlock_bh(&tp->lock);
9382 
9383 	return err;
9384 }
9385 
9386 /* tp->lock is held. */
9387 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9388 			   dma_addr_t mapping, u32 maxlen_flags,
9389 			   u32 nic_addr)
9390 {
9391 	tg3_write_mem(tp,
9392 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9393 		      ((u64) mapping >> 32));
9394 	tg3_write_mem(tp,
9395 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9396 		      ((u64) mapping & 0xffffffff));
9397 	tg3_write_mem(tp,
9398 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9399 		       maxlen_flags);
9400 
9401 	if (!tg3_flag(tp, 5705_PLUS))
9402 		tg3_write_mem(tp,
9403 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9404 			      nic_addr);
9405 }
9406 
9407 
9408 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9409 {
9410 	int i = 0;
9411 
9412 	if (!tg3_flag(tp, ENABLE_TSS)) {
9413 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9414 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9415 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9416 	} else {
9417 		tw32(HOSTCC_TXCOL_TICKS, 0);
9418 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9419 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9420 
9421 		for (; i < tp->txq_cnt; i++) {
9422 			u32 reg;
9423 
9424 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9425 			tw32(reg, ec->tx_coalesce_usecs);
9426 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9427 			tw32(reg, ec->tx_max_coalesced_frames);
9428 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9429 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9430 		}
9431 	}
9432 
9433 	for (; i < tp->irq_max - 1; i++) {
9434 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9435 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9436 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9437 	}
9438 }
9439 
9440 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9441 {
9442 	int i = 0;
9443 	u32 limit = tp->rxq_cnt;
9444 
9445 	if (!tg3_flag(tp, ENABLE_RSS)) {
9446 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9447 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9448 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9449 		limit--;
9450 	} else {
9451 		tw32(HOSTCC_RXCOL_TICKS, 0);
9452 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9453 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9454 	}
9455 
9456 	for (; i < limit; i++) {
9457 		u32 reg;
9458 
9459 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9460 		tw32(reg, ec->rx_coalesce_usecs);
9461 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9462 		tw32(reg, ec->rx_max_coalesced_frames);
9463 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9464 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9465 	}
9466 
9467 	for (; i < tp->irq_max - 1; i++) {
9468 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9469 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9470 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9471 	}
9472 }
9473 
9474 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9475 {
9476 	tg3_coal_tx_init(tp, ec);
9477 	tg3_coal_rx_init(tp, ec);
9478 
9479 	if (!tg3_flag(tp, 5705_PLUS)) {
9480 		u32 val = ec->stats_block_coalesce_usecs;
9481 
9482 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9483 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9484 
9485 		if (!tp->link_up)
9486 			val = 0;
9487 
9488 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9489 	}
9490 }
9491 
9492 /* tp->lock is held. */
9493 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9494 {
9495 	u32 txrcb, limit;
9496 
9497 	/* Disable all transmit rings but the first. */
9498 	if (!tg3_flag(tp, 5705_PLUS))
9499 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9500 	else if (tg3_flag(tp, 5717_PLUS))
9501 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9502 	else if (tg3_flag(tp, 57765_CLASS) ||
9503 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9504 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9505 	else
9506 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9507 
9508 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9509 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9510 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9511 			      BDINFO_FLAGS_DISABLED);
9512 }
9513 
9514 /* tp->lock is held. */
9515 static void tg3_tx_rcbs_init(struct tg3 *tp)
9516 {
9517 	int i = 0;
9518 	u32 txrcb = NIC_SRAM_SEND_RCB;
9519 
9520 	if (tg3_flag(tp, ENABLE_TSS))
9521 		i++;
9522 
9523 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9524 		struct tg3_napi *tnapi = &tp->napi[i];
9525 
9526 		if (!tnapi->tx_ring)
9527 			continue;
9528 
9529 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9530 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9531 			       NIC_SRAM_TX_BUFFER_DESC);
9532 	}
9533 }
9534 
9535 /* tp->lock is held. */
9536 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9537 {
9538 	u32 rxrcb, limit;
9539 
9540 	/* Disable all receive return rings but the first. */
9541 	if (tg3_flag(tp, 5717_PLUS))
9542 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9543 	else if (!tg3_flag(tp, 5705_PLUS))
9544 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9545 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9546 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9547 		 tg3_flag(tp, 57765_CLASS))
9548 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9549 	else
9550 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9551 
9552 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9553 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9554 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9555 			      BDINFO_FLAGS_DISABLED);
9556 }
9557 
9558 /* tp->lock is held. */
9559 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9560 {
9561 	int i = 0;
9562 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9563 
9564 	if (tg3_flag(tp, ENABLE_RSS))
9565 		i++;
9566 
9567 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9568 		struct tg3_napi *tnapi = &tp->napi[i];
9569 
9570 		if (!tnapi->rx_rcb)
9571 			continue;
9572 
9573 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9574 			       (tp->rx_ret_ring_mask + 1) <<
9575 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9576 	}
9577 }
9578 
9579 /* tp->lock is held. */
9580 static void tg3_rings_reset(struct tg3 *tp)
9581 {
9582 	int i;
9583 	u32 stblk;
9584 	struct tg3_napi *tnapi = &tp->napi[0];
9585 
9586 	tg3_tx_rcbs_disable(tp);
9587 
9588 	tg3_rx_ret_rcbs_disable(tp);
9589 
9590 	/* Disable interrupts */
9591 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9592 	tp->napi[0].chk_msi_cnt = 0;
9593 	tp->napi[0].last_rx_cons = 0;
9594 	tp->napi[0].last_tx_cons = 0;
9595 
9596 	/* Zero mailbox registers. */
9597 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9598 		for (i = 1; i < tp->irq_max; i++) {
9599 			tp->napi[i].tx_prod = 0;
9600 			tp->napi[i].tx_cons = 0;
9601 			if (tg3_flag(tp, ENABLE_TSS))
9602 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9603 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9604 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9605 			tp->napi[i].chk_msi_cnt = 0;
9606 			tp->napi[i].last_rx_cons = 0;
9607 			tp->napi[i].last_tx_cons = 0;
9608 		}
9609 		if (!tg3_flag(tp, ENABLE_TSS))
9610 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9611 	} else {
9612 		tp->napi[0].tx_prod = 0;
9613 		tp->napi[0].tx_cons = 0;
9614 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9615 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9616 	}
9617 
9618 	/* Make sure the NIC-based send BD rings are disabled. */
9619 	if (!tg3_flag(tp, 5705_PLUS)) {
9620 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9621 		for (i = 0; i < 16; i++)
9622 			tw32_tx_mbox(mbox + i * 8, 0);
9623 	}
9624 
9625 	/* Clear status block in ram. */
9626 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9627 
9628 	/* Set status block DMA address */
9629 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9630 	     ((u64) tnapi->status_mapping >> 32));
9631 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9632 	     ((u64) tnapi->status_mapping & 0xffffffff));
9633 
9634 	stblk = HOSTCC_STATBLCK_RING1;
9635 
9636 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9637 		u64 mapping = (u64)tnapi->status_mapping;
9638 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9639 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9640 		stblk += 8;
9641 
9642 		/* Clear status block in ram. */
9643 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9644 	}
9645 
9646 	tg3_tx_rcbs_init(tp);
9647 	tg3_rx_ret_rcbs_init(tp);
9648 }
9649 
9650 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9651 {
9652 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9653 
9654 	if (!tg3_flag(tp, 5750_PLUS) ||
9655 	    tg3_flag(tp, 5780_CLASS) ||
9656 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9657 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9658 	    tg3_flag(tp, 57765_PLUS))
9659 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9660 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9661 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9662 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9663 	else
9664 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9665 
9666 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9667 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9668 
9669 	val = min(nic_rep_thresh, host_rep_thresh);
9670 	tw32(RCVBDI_STD_THRESH, val);
9671 
9672 	if (tg3_flag(tp, 57765_PLUS))
9673 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9674 
9675 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9676 		return;
9677 
9678 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9679 
9680 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9681 
9682 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9683 	tw32(RCVBDI_JUMBO_THRESH, val);
9684 
9685 	if (tg3_flag(tp, 57765_PLUS))
9686 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9687 }
9688 
9689 static inline u32 calc_crc(unsigned char *buf, int len)
9690 {
9691 	u32 reg;
9692 	u32 tmp;
9693 	int j, k;
9694 
9695 	reg = 0xffffffff;
9696 
9697 	for (j = 0; j < len; j++) {
9698 		reg ^= buf[j];
9699 
9700 		for (k = 0; k < 8; k++) {
9701 			tmp = reg & 0x01;
9702 
9703 			reg >>= 1;
9704 
9705 			if (tmp)
9706 				reg ^= CRC32_POLY_LE;
9707 		}
9708 	}
9709 
9710 	return ~reg;
9711 }
9712 
9713 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9714 {
9715 	/* accept or reject all multicast frames */
9716 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9717 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9718 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9719 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9720 }
9721 
9722 static void __tg3_set_rx_mode(struct net_device *dev)
9723 {
9724 	struct tg3 *tp = netdev_priv(dev);
9725 	u32 rx_mode;
9726 
9727 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9728 				  RX_MODE_KEEP_VLAN_TAG);
9729 
9730 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9731 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9732 	 * flag clear.
9733 	 */
9734 	if (!tg3_flag(tp, ENABLE_ASF))
9735 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9736 #endif
9737 
9738 	if (dev->flags & IFF_PROMISC) {
9739 		/* Promiscuous mode. */
9740 		rx_mode |= RX_MODE_PROMISC;
9741 	} else if (dev->flags & IFF_ALLMULTI) {
9742 		/* Accept all multicast. */
9743 		tg3_set_multi(tp, 1);
9744 	} else if (netdev_mc_empty(dev)) {
9745 		/* Reject all multicast. */
9746 		tg3_set_multi(tp, 0);
9747 	} else {
9748 		/* Accept one or more multicast(s). */
9749 		struct netdev_hw_addr *ha;
9750 		u32 mc_filter[4] = { 0, };
9751 		u32 regidx;
9752 		u32 bit;
9753 		u32 crc;
9754 
9755 		netdev_for_each_mc_addr(ha, dev) {
9756 			crc = calc_crc(ha->addr, ETH_ALEN);
9757 			bit = ~crc & 0x7f;
9758 			regidx = (bit & 0x60) >> 5;
9759 			bit &= 0x1f;
9760 			mc_filter[regidx] |= (1 << bit);
9761 		}
9762 
9763 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9764 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9765 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9766 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9767 	}
9768 
9769 	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9770 		rx_mode |= RX_MODE_PROMISC;
9771 	} else if (!(dev->flags & IFF_PROMISC)) {
9772 		/* Add all entries into to the mac addr filter list */
9773 		int i = 0;
9774 		struct netdev_hw_addr *ha;
9775 
9776 		netdev_for_each_uc_addr(ha, dev) {
9777 			__tg3_set_one_mac_addr(tp, ha->addr,
9778 					       i + TG3_UCAST_ADDR_IDX(tp));
9779 			i++;
9780 		}
9781 	}
9782 
9783 	if (rx_mode != tp->rx_mode) {
9784 		tp->rx_mode = rx_mode;
9785 		tw32_f(MAC_RX_MODE, rx_mode);
9786 		udelay(10);
9787 	}
9788 }
9789 
9790 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9791 {
9792 	int i;
9793 
9794 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9795 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9796 }
9797 
9798 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9799 {
9800 	int i;
9801 
9802 	if (!tg3_flag(tp, SUPPORT_MSIX))
9803 		return;
9804 
9805 	if (tp->rxq_cnt == 1) {
9806 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9807 		return;
9808 	}
9809 
9810 	/* Validate table against current IRQ count */
9811 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9812 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9813 			break;
9814 	}
9815 
9816 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9817 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9818 }
9819 
9820 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9821 {
9822 	int i = 0;
9823 	u32 reg = MAC_RSS_INDIR_TBL_0;
9824 
9825 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9826 		u32 val = tp->rss_ind_tbl[i];
9827 		i++;
9828 		for (; i % 8; i++) {
9829 			val <<= 4;
9830 			val |= tp->rss_ind_tbl[i];
9831 		}
9832 		tw32(reg, val);
9833 		reg += 4;
9834 	}
9835 }
9836 
9837 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9838 {
9839 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9840 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9841 	else
9842 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9843 }
9844 
9845 /* tp->lock is held. */
9846 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9847 {
9848 	u32 val, rdmac_mode;
9849 	int i, err, limit;
9850 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9851 
9852 	tg3_disable_ints(tp);
9853 
9854 	tg3_stop_fw(tp);
9855 
9856 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9857 
9858 	if (tg3_flag(tp, INIT_COMPLETE))
9859 		tg3_abort_hw(tp, 1);
9860 
9861 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9862 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9863 		tg3_phy_pull_config(tp);
9864 		tg3_eee_pull_config(tp, NULL);
9865 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9866 	}
9867 
9868 	/* Enable MAC control of LPI */
9869 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9870 		tg3_setup_eee(tp);
9871 
9872 	if (reset_phy)
9873 		tg3_phy_reset(tp);
9874 
9875 	err = tg3_chip_reset(tp);
9876 	if (err)
9877 		return err;
9878 
9879 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9880 
9881 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9882 		val = tr32(TG3_CPMU_CTRL);
9883 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9884 		tw32(TG3_CPMU_CTRL, val);
9885 
9886 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9887 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9888 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9889 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9890 
9891 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9892 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9893 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9894 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9895 
9896 		val = tr32(TG3_CPMU_HST_ACC);
9897 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9898 		val |= CPMU_HST_ACC_MACCLK_6_25;
9899 		tw32(TG3_CPMU_HST_ACC, val);
9900 	}
9901 
9902 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9903 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9904 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9905 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9906 		tw32(PCIE_PWR_MGMT_THRESH, val);
9907 
9908 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9909 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9910 
9911 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9912 
9913 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9914 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9915 	}
9916 
9917 	if (tg3_flag(tp, L1PLLPD_EN)) {
9918 		u32 grc_mode = tr32(GRC_MODE);
9919 
9920 		/* Access the lower 1K of PL PCIE block registers. */
9921 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9922 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9923 
9924 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9925 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9926 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9927 
9928 		tw32(GRC_MODE, grc_mode);
9929 	}
9930 
9931 	if (tg3_flag(tp, 57765_CLASS)) {
9932 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9933 			u32 grc_mode = tr32(GRC_MODE);
9934 
9935 			/* Access the lower 1K of PL PCIE block registers. */
9936 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9937 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9938 
9939 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9940 				   TG3_PCIE_PL_LO_PHYCTL5);
9941 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9942 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9943 
9944 			tw32(GRC_MODE, grc_mode);
9945 		}
9946 
9947 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9948 			u32 grc_mode;
9949 
9950 			/* Fix transmit hangs */
9951 			val = tr32(TG3_CPMU_PADRNG_CTL);
9952 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9953 			tw32(TG3_CPMU_PADRNG_CTL, val);
9954 
9955 			grc_mode = tr32(GRC_MODE);
9956 
9957 			/* Access the lower 1K of DL PCIE block registers. */
9958 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9959 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9960 
9961 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9962 				   TG3_PCIE_DL_LO_FTSMAX);
9963 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9964 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9965 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9966 
9967 			tw32(GRC_MODE, grc_mode);
9968 		}
9969 
9970 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9971 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9972 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9973 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9974 	}
9975 
9976 	/* This works around an issue with Athlon chipsets on
9977 	 * B3 tigon3 silicon.  This bit has no effect on any
9978 	 * other revision.  But do not set this on PCI Express
9979 	 * chips and don't even touch the clocks if the CPMU is present.
9980 	 */
9981 	if (!tg3_flag(tp, CPMU_PRESENT)) {
9982 		if (!tg3_flag(tp, PCI_EXPRESS))
9983 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9984 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9985 	}
9986 
9987 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9988 	    tg3_flag(tp, PCIX_MODE)) {
9989 		val = tr32(TG3PCI_PCISTATE);
9990 		val |= PCISTATE_RETRY_SAME_DMA;
9991 		tw32(TG3PCI_PCISTATE, val);
9992 	}
9993 
9994 	if (tg3_flag(tp, ENABLE_APE)) {
9995 		/* Allow reads and writes to the
9996 		 * APE register and memory space.
9997 		 */
9998 		val = tr32(TG3PCI_PCISTATE);
9999 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10000 		       PCISTATE_ALLOW_APE_SHMEM_WR |
10001 		       PCISTATE_ALLOW_APE_PSPACE_WR;
10002 		tw32(TG3PCI_PCISTATE, val);
10003 	}
10004 
10005 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10006 		/* Enable some hw fixes.  */
10007 		val = tr32(TG3PCI_MSI_DATA);
10008 		val |= (1 << 26) | (1 << 28) | (1 << 29);
10009 		tw32(TG3PCI_MSI_DATA, val);
10010 	}
10011 
10012 	/* Descriptor ring init may make accesses to the
10013 	 * NIC SRAM area to setup the TX descriptors, so we
10014 	 * can only do this after the hardware has been
10015 	 * successfully reset.
10016 	 */
10017 	err = tg3_init_rings(tp);
10018 	if (err)
10019 		return err;
10020 
10021 	if (tg3_flag(tp, 57765_PLUS)) {
10022 		val = tr32(TG3PCI_DMA_RW_CTRL) &
10023 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10024 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10025 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10026 		if (!tg3_flag(tp, 57765_CLASS) &&
10027 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10028 		    tg3_asic_rev(tp) != ASIC_REV_5762)
10029 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
10030 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10031 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10032 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
10033 		/* This value is determined during the probe time DMA
10034 		 * engine test, tg3_test_dma.
10035 		 */
10036 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10037 	}
10038 
10039 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10040 			  GRC_MODE_4X_NIC_SEND_RINGS |
10041 			  GRC_MODE_NO_TX_PHDR_CSUM |
10042 			  GRC_MODE_NO_RX_PHDR_CSUM);
10043 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10044 
10045 	/* Pseudo-header checksum is done by hardware logic and not
10046 	 * the offload processers, so make the chip do the pseudo-
10047 	 * header checksums on receive.  For transmit it is more
10048 	 * convenient to do the pseudo-header checksum in software
10049 	 * as Linux does that on transmit for us in all cases.
10050 	 */
10051 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10052 
10053 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10054 	if (tp->rxptpctl)
10055 		tw32(TG3_RX_PTP_CTL,
10056 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10057 
10058 	if (tg3_flag(tp, PTP_CAPABLE))
10059 		val |= GRC_MODE_TIME_SYNC_ENABLE;
10060 
10061 	tw32(GRC_MODE, tp->grc_mode | val);
10062 
10063 	/* On one of the AMD platform, MRRS is restricted to 4000 because of
10064 	 * south bridge limitation. As a workaround, Driver is setting MRRS
10065 	 * to 2048 instead of default 4096.
10066 	 */
10067 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10068 	    tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10069 		val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10070 		tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10071 	}
10072 
10073 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
10074 	val = tr32(GRC_MISC_CFG);
10075 	val &= ~0xff;
10076 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10077 	tw32(GRC_MISC_CFG, val);
10078 
10079 	/* Initialize MBUF/DESC pool. */
10080 	if (tg3_flag(tp, 5750_PLUS)) {
10081 		/* Do nothing.  */
10082 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10083 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10084 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
10085 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10086 		else
10087 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10088 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10089 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10090 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
10091 		int fw_len;
10092 
10093 		fw_len = tp->fw_len;
10094 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10095 		tw32(BUFMGR_MB_POOL_ADDR,
10096 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10097 		tw32(BUFMGR_MB_POOL_SIZE,
10098 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10099 	}
10100 
10101 	if (tp->dev->mtu <= ETH_DATA_LEN) {
10102 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10103 		     tp->bufmgr_config.mbuf_read_dma_low_water);
10104 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10105 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
10106 		tw32(BUFMGR_MB_HIGH_WATER,
10107 		     tp->bufmgr_config.mbuf_high_water);
10108 	} else {
10109 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10110 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10111 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10112 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10113 		tw32(BUFMGR_MB_HIGH_WATER,
10114 		     tp->bufmgr_config.mbuf_high_water_jumbo);
10115 	}
10116 	tw32(BUFMGR_DMA_LOW_WATER,
10117 	     tp->bufmgr_config.dma_low_water);
10118 	tw32(BUFMGR_DMA_HIGH_WATER,
10119 	     tp->bufmgr_config.dma_high_water);
10120 
10121 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10122 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
10123 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10124 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10125 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
10126 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10127 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10128 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10129 	tw32(BUFMGR_MODE, val);
10130 	for (i = 0; i < 2000; i++) {
10131 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10132 			break;
10133 		udelay(10);
10134 	}
10135 	if (i >= 2000) {
10136 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10137 		return -ENODEV;
10138 	}
10139 
10140 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10141 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10142 
10143 	tg3_setup_rxbd_thresholds(tp);
10144 
10145 	/* Initialize TG3_BDINFO's at:
10146 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
10147 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
10148 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
10149 	 *
10150 	 * like so:
10151 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
10152 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
10153 	 *                              ring attribute flags
10154 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
10155 	 *
10156 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10157 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10158 	 *
10159 	 * The size of each ring is fixed in the firmware, but the location is
10160 	 * configurable.
10161 	 */
10162 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10163 	     ((u64) tpr->rx_std_mapping >> 32));
10164 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10165 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10166 	if (!tg3_flag(tp, 5717_PLUS))
10167 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10168 		     NIC_SRAM_RX_BUFFER_DESC);
10169 
10170 	/* Disable the mini ring */
10171 	if (!tg3_flag(tp, 5705_PLUS))
10172 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10173 		     BDINFO_FLAGS_DISABLED);
10174 
10175 	/* Program the jumbo buffer descriptor ring control
10176 	 * blocks on those devices that have them.
10177 	 */
10178 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10179 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10180 
10181 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10182 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10183 			     ((u64) tpr->rx_jmb_mapping >> 32));
10184 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10185 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10186 			val = TG3_RX_JMB_RING_SIZE(tp) <<
10187 			      BDINFO_FLAGS_MAXLEN_SHIFT;
10188 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10189 			     val | BDINFO_FLAGS_USE_EXT_RECV);
10190 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10191 			    tg3_flag(tp, 57765_CLASS) ||
10192 			    tg3_asic_rev(tp) == ASIC_REV_5762)
10193 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10194 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10195 		} else {
10196 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10197 			     BDINFO_FLAGS_DISABLED);
10198 		}
10199 
10200 		if (tg3_flag(tp, 57765_PLUS)) {
10201 			val = TG3_RX_STD_RING_SIZE(tp);
10202 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10203 			val |= (TG3_RX_STD_DMA_SZ << 2);
10204 		} else
10205 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10206 	} else
10207 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10208 
10209 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10210 
10211 	tpr->rx_std_prod_idx = tp->rx_pending;
10212 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10213 
10214 	tpr->rx_jmb_prod_idx =
10215 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10216 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10217 
10218 	tg3_rings_reset(tp);
10219 
10220 	/* Initialize MAC address and backoff seed. */
10221 	__tg3_set_mac_addr(tp, false);
10222 
10223 	/* MTU + ethernet header + FCS + optional VLAN tag */
10224 	tw32(MAC_RX_MTU_SIZE,
10225 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10226 
10227 	/* The slot time is changed by tg3_setup_phy if we
10228 	 * run at gigabit with half duplex.
10229 	 */
10230 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10231 	      (6 << TX_LENGTHS_IPG_SHIFT) |
10232 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10233 
10234 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10235 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10236 		val |= tr32(MAC_TX_LENGTHS) &
10237 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10238 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10239 
10240 	tw32(MAC_TX_LENGTHS, val);
10241 
10242 	/* Receive rules. */
10243 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10244 	tw32(RCVLPC_CONFIG, 0x0181);
10245 
10246 	/* Calculate RDMAC_MODE setting early, we need it to determine
10247 	 * the RCVLPC_STATE_ENABLE mask.
10248 	 */
10249 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10250 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10251 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10252 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10253 		      RDMAC_MODE_LNGREAD_ENAB);
10254 
10255 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10256 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10257 
10258 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10259 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10260 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10261 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10262 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10263 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10264 
10265 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10266 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10267 		if (tg3_flag(tp, TSO_CAPABLE)) {
10268 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10269 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10270 			   !tg3_flag(tp, IS_5788)) {
10271 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10272 		}
10273 	}
10274 
10275 	if (tg3_flag(tp, PCI_EXPRESS))
10276 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10277 
10278 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10279 		tp->dma_limit = 0;
10280 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10281 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10282 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10283 		}
10284 	}
10285 
10286 	if (tg3_flag(tp, HW_TSO_1) ||
10287 	    tg3_flag(tp, HW_TSO_2) ||
10288 	    tg3_flag(tp, HW_TSO_3))
10289 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10290 
10291 	if (tg3_flag(tp, 57765_PLUS) ||
10292 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10293 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10294 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10295 
10296 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10297 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10298 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10299 
10300 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10301 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10302 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10303 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10304 	    tg3_flag(tp, 57765_PLUS)) {
10305 		u32 tgtreg;
10306 
10307 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10308 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10309 		else
10310 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10311 
10312 		val = tr32(tgtreg);
10313 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10314 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10315 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10316 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10317 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10318 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10319 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10320 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10321 		}
10322 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10323 	}
10324 
10325 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10326 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10327 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10328 		u32 tgtreg;
10329 
10330 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10331 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10332 		else
10333 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10334 
10335 		val = tr32(tgtreg);
10336 		tw32(tgtreg, val |
10337 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10338 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10339 	}
10340 
10341 	/* Receive/send statistics. */
10342 	if (tg3_flag(tp, 5750_PLUS)) {
10343 		val = tr32(RCVLPC_STATS_ENABLE);
10344 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10345 		tw32(RCVLPC_STATS_ENABLE, val);
10346 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10347 		   tg3_flag(tp, TSO_CAPABLE)) {
10348 		val = tr32(RCVLPC_STATS_ENABLE);
10349 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10350 		tw32(RCVLPC_STATS_ENABLE, val);
10351 	} else {
10352 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10353 	}
10354 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10355 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10356 	tw32(SNDDATAI_STATSCTRL,
10357 	     (SNDDATAI_SCTRL_ENABLE |
10358 	      SNDDATAI_SCTRL_FASTUPD));
10359 
10360 	/* Setup host coalescing engine. */
10361 	tw32(HOSTCC_MODE, 0);
10362 	for (i = 0; i < 2000; i++) {
10363 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10364 			break;
10365 		udelay(10);
10366 	}
10367 
10368 	__tg3_set_coalesce(tp, &tp->coal);
10369 
10370 	if (!tg3_flag(tp, 5705_PLUS)) {
10371 		/* Status/statistics block address.  See tg3_timer,
10372 		 * the tg3_periodic_fetch_stats call there, and
10373 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10374 		 */
10375 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10376 		     ((u64) tp->stats_mapping >> 32));
10377 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10378 		     ((u64) tp->stats_mapping & 0xffffffff));
10379 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10380 
10381 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10382 
10383 		/* Clear statistics and status block memory areas */
10384 		for (i = NIC_SRAM_STATS_BLK;
10385 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10386 		     i += sizeof(u32)) {
10387 			tg3_write_mem(tp, i, 0);
10388 			udelay(40);
10389 		}
10390 	}
10391 
10392 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10393 
10394 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10395 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10396 	if (!tg3_flag(tp, 5705_PLUS))
10397 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10398 
10399 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10400 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10401 		/* reset to prevent losing 1st rx packet intermittently */
10402 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10403 		udelay(10);
10404 	}
10405 
10406 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10407 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10408 			MAC_MODE_FHDE_ENABLE;
10409 	if (tg3_flag(tp, ENABLE_APE))
10410 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10411 	if (!tg3_flag(tp, 5705_PLUS) &&
10412 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10413 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10414 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10415 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10416 	udelay(40);
10417 
10418 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10419 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10420 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10421 	 * whether used as inputs or outputs, are set by boot code after
10422 	 * reset.
10423 	 */
10424 	if (!tg3_flag(tp, IS_NIC)) {
10425 		u32 gpio_mask;
10426 
10427 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10428 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10429 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10430 
10431 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10432 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10433 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10434 
10435 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10436 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10437 
10438 		tp->grc_local_ctrl &= ~gpio_mask;
10439 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10440 
10441 		/* GPIO1 must be driven high for eeprom write protect */
10442 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10443 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10444 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10445 	}
10446 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10447 	udelay(100);
10448 
10449 	if (tg3_flag(tp, USING_MSIX)) {
10450 		val = tr32(MSGINT_MODE);
10451 		val |= MSGINT_MODE_ENABLE;
10452 		if (tp->irq_cnt > 1)
10453 			val |= MSGINT_MODE_MULTIVEC_EN;
10454 		if (!tg3_flag(tp, 1SHOT_MSI))
10455 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10456 		tw32(MSGINT_MODE, val);
10457 	}
10458 
10459 	if (!tg3_flag(tp, 5705_PLUS)) {
10460 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10461 		udelay(40);
10462 	}
10463 
10464 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10465 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10466 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10467 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10468 	       WDMAC_MODE_LNGREAD_ENAB);
10469 
10470 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10471 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10472 		if (tg3_flag(tp, TSO_CAPABLE) &&
10473 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10474 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10475 			/* nothing */
10476 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10477 			   !tg3_flag(tp, IS_5788)) {
10478 			val |= WDMAC_MODE_RX_ACCEL;
10479 		}
10480 	}
10481 
10482 	/* Enable host coalescing bug fix */
10483 	if (tg3_flag(tp, 5755_PLUS))
10484 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10485 
10486 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10487 		val |= WDMAC_MODE_BURST_ALL_DATA;
10488 
10489 	tw32_f(WDMAC_MODE, val);
10490 	udelay(40);
10491 
10492 	if (tg3_flag(tp, PCIX_MODE)) {
10493 		u16 pcix_cmd;
10494 
10495 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10496 				     &pcix_cmd);
10497 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10498 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10499 			pcix_cmd |= PCI_X_CMD_READ_2K;
10500 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10501 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10502 			pcix_cmd |= PCI_X_CMD_READ_2K;
10503 		}
10504 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10505 				      pcix_cmd);
10506 	}
10507 
10508 	tw32_f(RDMAC_MODE, rdmac_mode);
10509 	udelay(40);
10510 
10511 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10512 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10513 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10514 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10515 				break;
10516 		}
10517 		if (i < TG3_NUM_RDMA_CHANNELS) {
10518 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10519 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10520 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10521 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10522 		}
10523 	}
10524 
10525 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10526 	if (!tg3_flag(tp, 5705_PLUS))
10527 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10528 
10529 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10530 		tw32(SNDDATAC_MODE,
10531 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10532 	else
10533 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10534 
10535 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10536 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10537 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10538 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10539 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10540 	tw32(RCVDBDI_MODE, val);
10541 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10542 	if (tg3_flag(tp, HW_TSO_1) ||
10543 	    tg3_flag(tp, HW_TSO_2) ||
10544 	    tg3_flag(tp, HW_TSO_3))
10545 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10546 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10547 	if (tg3_flag(tp, ENABLE_TSS))
10548 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10549 	tw32(SNDBDI_MODE, val);
10550 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10551 
10552 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10553 		err = tg3_load_5701_a0_firmware_fix(tp);
10554 		if (err)
10555 			return err;
10556 	}
10557 
10558 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10559 		/* Ignore any errors for the firmware download. If download
10560 		 * fails, the device will operate with EEE disabled
10561 		 */
10562 		tg3_load_57766_firmware(tp);
10563 	}
10564 
10565 	if (tg3_flag(tp, TSO_CAPABLE)) {
10566 		err = tg3_load_tso_firmware(tp);
10567 		if (err)
10568 			return err;
10569 	}
10570 
10571 	tp->tx_mode = TX_MODE_ENABLE;
10572 
10573 	if (tg3_flag(tp, 5755_PLUS) ||
10574 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10575 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10576 
10577 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10578 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10579 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10580 		tp->tx_mode &= ~val;
10581 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10582 	}
10583 
10584 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10585 	udelay(100);
10586 
10587 	if (tg3_flag(tp, ENABLE_RSS)) {
10588 		u32 rss_key[10];
10589 
10590 		tg3_rss_write_indir_tbl(tp);
10591 
10592 		netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10593 
10594 		for (i = 0; i < 10 ; i++)
10595 			tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10596 	}
10597 
10598 	tp->rx_mode = RX_MODE_ENABLE;
10599 	if (tg3_flag(tp, 5755_PLUS))
10600 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10601 
10602 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10603 		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10604 
10605 	if (tg3_flag(tp, ENABLE_RSS))
10606 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10607 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10608 			       RX_MODE_RSS_IPV6_HASH_EN |
10609 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10610 			       RX_MODE_RSS_IPV4_HASH_EN |
10611 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10612 
10613 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10614 	udelay(10);
10615 
10616 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10617 
10618 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10619 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10620 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10621 		udelay(10);
10622 	}
10623 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10624 	udelay(10);
10625 
10626 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10627 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10628 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10629 			/* Set drive transmission level to 1.2V  */
10630 			/* only if the signal pre-emphasis bit is not set  */
10631 			val = tr32(MAC_SERDES_CFG);
10632 			val &= 0xfffff000;
10633 			val |= 0x880;
10634 			tw32(MAC_SERDES_CFG, val);
10635 		}
10636 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10637 			tw32(MAC_SERDES_CFG, 0x616000);
10638 	}
10639 
10640 	/* Prevent chip from dropping frames when flow control
10641 	 * is enabled.
10642 	 */
10643 	if (tg3_flag(tp, 57765_CLASS))
10644 		val = 1;
10645 	else
10646 		val = 2;
10647 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10648 
10649 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10650 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10651 		/* Use hardware link auto-negotiation */
10652 		tg3_flag_set(tp, HW_AUTONEG);
10653 	}
10654 
10655 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10656 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10657 		u32 tmp;
10658 
10659 		tmp = tr32(SERDES_RX_CTRL);
10660 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10661 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10662 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10663 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10664 	}
10665 
10666 	if (!tg3_flag(tp, USE_PHYLIB)) {
10667 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10668 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10669 
10670 		err = tg3_setup_phy(tp, false);
10671 		if (err)
10672 			return err;
10673 
10674 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10675 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10676 			u32 tmp;
10677 
10678 			/* Clear CRC stats. */
10679 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10680 				tg3_writephy(tp, MII_TG3_TEST1,
10681 					     tmp | MII_TG3_TEST1_CRC_EN);
10682 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10683 			}
10684 		}
10685 	}
10686 
10687 	__tg3_set_rx_mode(tp->dev);
10688 
10689 	/* Initialize receive rules. */
10690 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10691 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10692 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10693 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10694 
10695 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10696 		limit = 8;
10697 	else
10698 		limit = 16;
10699 	if (tg3_flag(tp, ENABLE_ASF))
10700 		limit -= 4;
10701 	switch (limit) {
10702 	case 16:
10703 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10704 		fallthrough;
10705 	case 15:
10706 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10707 		fallthrough;
10708 	case 14:
10709 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10710 		fallthrough;
10711 	case 13:
10712 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10713 		fallthrough;
10714 	case 12:
10715 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10716 		fallthrough;
10717 	case 11:
10718 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10719 		fallthrough;
10720 	case 10:
10721 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10722 		fallthrough;
10723 	case 9:
10724 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10725 		fallthrough;
10726 	case 8:
10727 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10728 		fallthrough;
10729 	case 7:
10730 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10731 		fallthrough;
10732 	case 6:
10733 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10734 		fallthrough;
10735 	case 5:
10736 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10737 		fallthrough;
10738 	case 4:
10739 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10740 	case 3:
10741 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10742 	case 2:
10743 	case 1:
10744 
10745 	default:
10746 		break;
10747 	}
10748 
10749 	if (tg3_flag(tp, ENABLE_APE))
10750 		/* Write our heartbeat update interval to APE. */
10751 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10752 				APE_HOST_HEARTBEAT_INT_5SEC);
10753 
10754 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10755 
10756 	return 0;
10757 }
10758 
10759 /* Called at device open time to get the chip ready for
10760  * packet processing.  Invoked with tp->lock held.
10761  */
10762 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10763 {
10764 	/* Chip may have been just powered on. If so, the boot code may still
10765 	 * be running initialization. Wait for it to finish to avoid races in
10766 	 * accessing the hardware.
10767 	 */
10768 	tg3_enable_register_access(tp);
10769 	tg3_poll_fw(tp);
10770 
10771 	tg3_switch_clocks(tp);
10772 
10773 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10774 
10775 	return tg3_reset_hw(tp, reset_phy);
10776 }
10777 
10778 #ifdef CONFIG_TIGON3_HWMON
10779 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10780 {
10781 	u32 off, len = TG3_OCIR_LEN;
10782 	int i;
10783 
10784 	for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10785 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10786 
10787 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10788 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10789 			memset(ocir, 0, len);
10790 	}
10791 }
10792 
10793 /* sysfs attributes for hwmon */
10794 static ssize_t tg3_show_temp(struct device *dev,
10795 			     struct device_attribute *devattr, char *buf)
10796 {
10797 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10798 	struct tg3 *tp = dev_get_drvdata(dev);
10799 	u32 temperature;
10800 
10801 	spin_lock_bh(&tp->lock);
10802 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10803 				sizeof(temperature));
10804 	spin_unlock_bh(&tp->lock);
10805 	return sprintf(buf, "%u\n", temperature * 1000);
10806 }
10807 
10808 
10809 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10810 			  TG3_TEMP_SENSOR_OFFSET);
10811 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10812 			  TG3_TEMP_CAUTION_OFFSET);
10813 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10814 			  TG3_TEMP_MAX_OFFSET);
10815 
10816 static struct attribute *tg3_attrs[] = {
10817 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10818 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10819 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10820 	NULL
10821 };
10822 ATTRIBUTE_GROUPS(tg3);
10823 
10824 static void tg3_hwmon_close(struct tg3 *tp)
10825 {
10826 	if (tp->hwmon_dev) {
10827 		hwmon_device_unregister(tp->hwmon_dev);
10828 		tp->hwmon_dev = NULL;
10829 	}
10830 }
10831 
10832 static void tg3_hwmon_open(struct tg3 *tp)
10833 {
10834 	int i;
10835 	u32 size = 0;
10836 	struct pci_dev *pdev = tp->pdev;
10837 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10838 
10839 	tg3_sd_scan_scratchpad(tp, ocirs);
10840 
10841 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10842 		if (!ocirs[i].src_data_length)
10843 			continue;
10844 
10845 		size += ocirs[i].src_hdr_length;
10846 		size += ocirs[i].src_data_length;
10847 	}
10848 
10849 	if (!size)
10850 		return;
10851 
10852 	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10853 							  tp, tg3_groups);
10854 	if (IS_ERR(tp->hwmon_dev)) {
10855 		tp->hwmon_dev = NULL;
10856 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10857 	}
10858 }
10859 #else
10860 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10861 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10862 #endif /* CONFIG_TIGON3_HWMON */
10863 
10864 
10865 #define TG3_STAT_ADD32(PSTAT, REG) \
10866 do {	u32 __val = tr32(REG); \
10867 	(PSTAT)->low += __val; \
10868 	if ((PSTAT)->low < __val) \
10869 		(PSTAT)->high += 1; \
10870 } while (0)
10871 
10872 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10873 {
10874 	struct tg3_hw_stats *sp = tp->hw_stats;
10875 
10876 	if (!tp->link_up)
10877 		return;
10878 
10879 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10880 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10881 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10882 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10883 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10884 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10885 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10886 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10887 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10888 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10889 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10890 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10891 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10892 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10893 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10894 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10895 		u32 val;
10896 
10897 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10898 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10899 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10900 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10901 	}
10902 
10903 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10904 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10905 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10906 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10907 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10908 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10909 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10910 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10911 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10912 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10913 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10914 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10915 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10916 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10917 
10918 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10919 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10920 	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
10921 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10922 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10923 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10924 	} else {
10925 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10926 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10927 		if (val) {
10928 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10929 			sp->rx_discards.low += val;
10930 			if (sp->rx_discards.low < val)
10931 				sp->rx_discards.high += 1;
10932 		}
10933 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10934 	}
10935 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10936 }
10937 
10938 static void tg3_chk_missed_msi(struct tg3 *tp)
10939 {
10940 	u32 i;
10941 
10942 	for (i = 0; i < tp->irq_cnt; i++) {
10943 		struct tg3_napi *tnapi = &tp->napi[i];
10944 
10945 		if (tg3_has_work(tnapi)) {
10946 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10947 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10948 				if (tnapi->chk_msi_cnt < 1) {
10949 					tnapi->chk_msi_cnt++;
10950 					return;
10951 				}
10952 				tg3_msi(0, tnapi);
10953 			}
10954 		}
10955 		tnapi->chk_msi_cnt = 0;
10956 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10957 		tnapi->last_tx_cons = tnapi->tx_cons;
10958 	}
10959 }
10960 
10961 static void tg3_timer(struct timer_list *t)
10962 {
10963 	struct tg3 *tp = from_timer(tp, t, timer);
10964 
10965 	spin_lock(&tp->lock);
10966 
10967 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10968 		spin_unlock(&tp->lock);
10969 		goto restart_timer;
10970 	}
10971 
10972 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10973 	    tg3_flag(tp, 57765_CLASS))
10974 		tg3_chk_missed_msi(tp);
10975 
10976 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10977 		/* BCM4785: Flush posted writes from GbE to host memory. */
10978 		tr32(HOSTCC_MODE);
10979 	}
10980 
10981 	if (!tg3_flag(tp, TAGGED_STATUS)) {
10982 		/* All of this garbage is because when using non-tagged
10983 		 * IRQ status the mailbox/status_block protocol the chip
10984 		 * uses with the cpu is race prone.
10985 		 */
10986 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10987 			tw32(GRC_LOCAL_CTRL,
10988 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10989 		} else {
10990 			tw32(HOSTCC_MODE, tp->coalesce_mode |
10991 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10992 		}
10993 
10994 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10995 			spin_unlock(&tp->lock);
10996 			tg3_reset_task_schedule(tp);
10997 			goto restart_timer;
10998 		}
10999 	}
11000 
11001 	/* This part only runs once per second. */
11002 	if (!--tp->timer_counter) {
11003 		if (tg3_flag(tp, 5705_PLUS))
11004 			tg3_periodic_fetch_stats(tp);
11005 
11006 		if (tp->setlpicnt && !--tp->setlpicnt)
11007 			tg3_phy_eee_enable(tp);
11008 
11009 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
11010 			u32 mac_stat;
11011 			int phy_event;
11012 
11013 			mac_stat = tr32(MAC_STATUS);
11014 
11015 			phy_event = 0;
11016 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11017 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11018 					phy_event = 1;
11019 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11020 				phy_event = 1;
11021 
11022 			if (phy_event)
11023 				tg3_setup_phy(tp, false);
11024 		} else if (tg3_flag(tp, POLL_SERDES)) {
11025 			u32 mac_stat = tr32(MAC_STATUS);
11026 			int need_setup = 0;
11027 
11028 			if (tp->link_up &&
11029 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11030 				need_setup = 1;
11031 			}
11032 			if (!tp->link_up &&
11033 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
11034 					 MAC_STATUS_SIGNAL_DET))) {
11035 				need_setup = 1;
11036 			}
11037 			if (need_setup) {
11038 				if (!tp->serdes_counter) {
11039 					tw32_f(MAC_MODE,
11040 					     (tp->mac_mode &
11041 					      ~MAC_MODE_PORT_MODE_MASK));
11042 					udelay(40);
11043 					tw32_f(MAC_MODE, tp->mac_mode);
11044 					udelay(40);
11045 				}
11046 				tg3_setup_phy(tp, false);
11047 			}
11048 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11049 			   tg3_flag(tp, 5780_CLASS)) {
11050 			tg3_serdes_parallel_detect(tp);
11051 		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11052 			u32 cpmu = tr32(TG3_CPMU_STATUS);
11053 			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11054 					 TG3_CPMU_STATUS_LINK_MASK);
11055 
11056 			if (link_up != tp->link_up)
11057 				tg3_setup_phy(tp, false);
11058 		}
11059 
11060 		tp->timer_counter = tp->timer_multiplier;
11061 	}
11062 
11063 	/* Heartbeat is only sent once every 2 seconds.
11064 	 *
11065 	 * The heartbeat is to tell the ASF firmware that the host
11066 	 * driver is still alive.  In the event that the OS crashes,
11067 	 * ASF needs to reset the hardware to free up the FIFO space
11068 	 * that may be filled with rx packets destined for the host.
11069 	 * If the FIFO is full, ASF will no longer function properly.
11070 	 *
11071 	 * Unintended resets have been reported on real time kernels
11072 	 * where the timer doesn't run on time.  Netpoll will also have
11073 	 * same problem.
11074 	 *
11075 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11076 	 * to check the ring condition when the heartbeat is expiring
11077 	 * before doing the reset.  This will prevent most unintended
11078 	 * resets.
11079 	 */
11080 	if (!--tp->asf_counter) {
11081 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11082 			tg3_wait_for_event_ack(tp);
11083 
11084 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11085 				      FWCMD_NICDRV_ALIVE3);
11086 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11087 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11088 				      TG3_FW_UPDATE_TIMEOUT_SEC);
11089 
11090 			tg3_generate_fw_event(tp);
11091 		}
11092 		tp->asf_counter = tp->asf_multiplier;
11093 	}
11094 
11095 	/* Update the APE heartbeat every 5 seconds.*/
11096 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11097 
11098 	spin_unlock(&tp->lock);
11099 
11100 restart_timer:
11101 	tp->timer.expires = jiffies + tp->timer_offset;
11102 	add_timer(&tp->timer);
11103 }
11104 
11105 static void tg3_timer_init(struct tg3 *tp)
11106 {
11107 	if (tg3_flag(tp, TAGGED_STATUS) &&
11108 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
11109 	    !tg3_flag(tp, 57765_CLASS))
11110 		tp->timer_offset = HZ;
11111 	else
11112 		tp->timer_offset = HZ / 10;
11113 
11114 	BUG_ON(tp->timer_offset > HZ);
11115 
11116 	tp->timer_multiplier = (HZ / tp->timer_offset);
11117 	tp->asf_multiplier = (HZ / tp->timer_offset) *
11118 			     TG3_FW_UPDATE_FREQ_SEC;
11119 
11120 	timer_setup(&tp->timer, tg3_timer, 0);
11121 }
11122 
11123 static void tg3_timer_start(struct tg3 *tp)
11124 {
11125 	tp->asf_counter   = tp->asf_multiplier;
11126 	tp->timer_counter = tp->timer_multiplier;
11127 
11128 	tp->timer.expires = jiffies + tp->timer_offset;
11129 	add_timer(&tp->timer);
11130 }
11131 
11132 static void tg3_timer_stop(struct tg3 *tp)
11133 {
11134 	del_timer_sync(&tp->timer);
11135 }
11136 
11137 /* Restart hardware after configuration changes, self-test, etc.
11138  * Invoked with tp->lock held.
11139  */
11140 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11141 	__releases(tp->lock)
11142 	__acquires(tp->lock)
11143 {
11144 	int err;
11145 
11146 	err = tg3_init_hw(tp, reset_phy);
11147 	if (err) {
11148 		netdev_err(tp->dev,
11149 			   "Failed to re-initialize device, aborting\n");
11150 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11151 		tg3_full_unlock(tp);
11152 		tg3_timer_stop(tp);
11153 		tp->irq_sync = 0;
11154 		tg3_napi_enable(tp);
11155 		dev_close(tp->dev);
11156 		tg3_full_lock(tp, 0);
11157 	}
11158 	return err;
11159 }
11160 
11161 static void tg3_reset_task(struct work_struct *work)
11162 {
11163 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
11164 	int err;
11165 
11166 	rtnl_lock();
11167 	tg3_full_lock(tp, 0);
11168 
11169 	if (!netif_running(tp->dev)) {
11170 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11171 		tg3_full_unlock(tp);
11172 		rtnl_unlock();
11173 		return;
11174 	}
11175 
11176 	tg3_full_unlock(tp);
11177 
11178 	tg3_phy_stop(tp);
11179 
11180 	tg3_netif_stop(tp);
11181 
11182 	tg3_full_lock(tp, 1);
11183 
11184 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11185 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11186 		tp->write32_rx_mbox = tg3_write_flush_reg32;
11187 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11188 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11189 	}
11190 
11191 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11192 	err = tg3_init_hw(tp, true);
11193 	if (err) {
11194 		tg3_full_unlock(tp);
11195 		tp->irq_sync = 0;
11196 		tg3_napi_enable(tp);
11197 		/* Clear this flag so that tg3_reset_task_cancel() will not
11198 		 * call cancel_work_sync() and wait forever.
11199 		 */
11200 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11201 		dev_close(tp->dev);
11202 		goto out;
11203 	}
11204 
11205 	tg3_netif_start(tp);
11206 	tg3_full_unlock(tp);
11207 	tg3_phy_start(tp);
11208 	tg3_flag_clear(tp, RESET_TASK_PENDING);
11209 out:
11210 	rtnl_unlock();
11211 }
11212 
11213 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11214 {
11215 	irq_handler_t fn;
11216 	unsigned long flags;
11217 	char *name;
11218 	struct tg3_napi *tnapi = &tp->napi[irq_num];
11219 
11220 	if (tp->irq_cnt == 1)
11221 		name = tp->dev->name;
11222 	else {
11223 		name = &tnapi->irq_lbl[0];
11224 		if (tnapi->tx_buffers && tnapi->rx_rcb)
11225 			snprintf(name, IFNAMSIZ,
11226 				 "%s-txrx-%d", tp->dev->name, irq_num);
11227 		else if (tnapi->tx_buffers)
11228 			snprintf(name, IFNAMSIZ,
11229 				 "%s-tx-%d", tp->dev->name, irq_num);
11230 		else if (tnapi->rx_rcb)
11231 			snprintf(name, IFNAMSIZ,
11232 				 "%s-rx-%d", tp->dev->name, irq_num);
11233 		else
11234 			snprintf(name, IFNAMSIZ,
11235 				 "%s-%d", tp->dev->name, irq_num);
11236 		name[IFNAMSIZ-1] = 0;
11237 	}
11238 
11239 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11240 		fn = tg3_msi;
11241 		if (tg3_flag(tp, 1SHOT_MSI))
11242 			fn = tg3_msi_1shot;
11243 		flags = 0;
11244 	} else {
11245 		fn = tg3_interrupt;
11246 		if (tg3_flag(tp, TAGGED_STATUS))
11247 			fn = tg3_interrupt_tagged;
11248 		flags = IRQF_SHARED;
11249 	}
11250 
11251 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11252 }
11253 
11254 static int tg3_test_interrupt(struct tg3 *tp)
11255 {
11256 	struct tg3_napi *tnapi = &tp->napi[0];
11257 	struct net_device *dev = tp->dev;
11258 	int err, i, intr_ok = 0;
11259 	u32 val;
11260 
11261 	if (!netif_running(dev))
11262 		return -ENODEV;
11263 
11264 	tg3_disable_ints(tp);
11265 
11266 	free_irq(tnapi->irq_vec, tnapi);
11267 
11268 	/*
11269 	 * Turn off MSI one shot mode.  Otherwise this test has no
11270 	 * observable way to know whether the interrupt was delivered.
11271 	 */
11272 	if (tg3_flag(tp, 57765_PLUS)) {
11273 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11274 		tw32(MSGINT_MODE, val);
11275 	}
11276 
11277 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11278 			  IRQF_SHARED, dev->name, tnapi);
11279 	if (err)
11280 		return err;
11281 
11282 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11283 	tg3_enable_ints(tp);
11284 
11285 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11286 	       tnapi->coal_now);
11287 
11288 	for (i = 0; i < 5; i++) {
11289 		u32 int_mbox, misc_host_ctrl;
11290 
11291 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11292 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11293 
11294 		if ((int_mbox != 0) ||
11295 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11296 			intr_ok = 1;
11297 			break;
11298 		}
11299 
11300 		if (tg3_flag(tp, 57765_PLUS) &&
11301 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11302 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11303 
11304 		msleep(10);
11305 	}
11306 
11307 	tg3_disable_ints(tp);
11308 
11309 	free_irq(tnapi->irq_vec, tnapi);
11310 
11311 	err = tg3_request_irq(tp, 0);
11312 
11313 	if (err)
11314 		return err;
11315 
11316 	if (intr_ok) {
11317 		/* Reenable MSI one shot mode. */
11318 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11319 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11320 			tw32(MSGINT_MODE, val);
11321 		}
11322 		return 0;
11323 	}
11324 
11325 	return -EIO;
11326 }
11327 
11328 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11329  * successfully restored
11330  */
11331 static int tg3_test_msi(struct tg3 *tp)
11332 {
11333 	int err;
11334 	u16 pci_cmd;
11335 
11336 	if (!tg3_flag(tp, USING_MSI))
11337 		return 0;
11338 
11339 	/* Turn off SERR reporting in case MSI terminates with Master
11340 	 * Abort.
11341 	 */
11342 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11343 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11344 			      pci_cmd & ~PCI_COMMAND_SERR);
11345 
11346 	err = tg3_test_interrupt(tp);
11347 
11348 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11349 
11350 	if (!err)
11351 		return 0;
11352 
11353 	/* other failures */
11354 	if (err != -EIO)
11355 		return err;
11356 
11357 	/* MSI test failed, go back to INTx mode */
11358 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11359 		    "to INTx mode. Please report this failure to the PCI "
11360 		    "maintainer and include system chipset information\n");
11361 
11362 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11363 
11364 	pci_disable_msi(tp->pdev);
11365 
11366 	tg3_flag_clear(tp, USING_MSI);
11367 	tp->napi[0].irq_vec = tp->pdev->irq;
11368 
11369 	err = tg3_request_irq(tp, 0);
11370 	if (err)
11371 		return err;
11372 
11373 	/* Need to reset the chip because the MSI cycle may have terminated
11374 	 * with Master Abort.
11375 	 */
11376 	tg3_full_lock(tp, 1);
11377 
11378 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11379 	err = tg3_init_hw(tp, true);
11380 
11381 	tg3_full_unlock(tp);
11382 
11383 	if (err)
11384 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11385 
11386 	return err;
11387 }
11388 
11389 static int tg3_request_firmware(struct tg3 *tp)
11390 {
11391 	const struct tg3_firmware_hdr *fw_hdr;
11392 
11393 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11394 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11395 			   tp->fw_needed);
11396 		return -ENOENT;
11397 	}
11398 
11399 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11400 
11401 	/* Firmware blob starts with version numbers, followed by
11402 	 * start address and _full_ length including BSS sections
11403 	 * (which must be longer than the actual data, of course
11404 	 */
11405 
11406 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11407 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11408 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11409 			   tp->fw_len, tp->fw_needed);
11410 		release_firmware(tp->fw);
11411 		tp->fw = NULL;
11412 		return -EINVAL;
11413 	}
11414 
11415 	/* We no longer need firmware; we have it. */
11416 	tp->fw_needed = NULL;
11417 	return 0;
11418 }
11419 
11420 static u32 tg3_irq_count(struct tg3 *tp)
11421 {
11422 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11423 
11424 	if (irq_cnt > 1) {
11425 		/* We want as many rx rings enabled as there are cpus.
11426 		 * In multiqueue MSI-X mode, the first MSI-X vector
11427 		 * only deals with link interrupts, etc, so we add
11428 		 * one to the number of vectors we are requesting.
11429 		 */
11430 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11431 	}
11432 
11433 	return irq_cnt;
11434 }
11435 
11436 static bool tg3_enable_msix(struct tg3 *tp)
11437 {
11438 	int i, rc;
11439 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11440 
11441 	tp->txq_cnt = tp->txq_req;
11442 	tp->rxq_cnt = tp->rxq_req;
11443 	if (!tp->rxq_cnt)
11444 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11445 	if (tp->rxq_cnt > tp->rxq_max)
11446 		tp->rxq_cnt = tp->rxq_max;
11447 
11448 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11449 	 * scheduling of the TX rings can cause starvation of rings with
11450 	 * small packets when other rings have TSO or jumbo packets.
11451 	 */
11452 	if (!tp->txq_req)
11453 		tp->txq_cnt = 1;
11454 
11455 	tp->irq_cnt = tg3_irq_count(tp);
11456 
11457 	for (i = 0; i < tp->irq_max; i++) {
11458 		msix_ent[i].entry  = i;
11459 		msix_ent[i].vector = 0;
11460 	}
11461 
11462 	rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11463 	if (rc < 0) {
11464 		return false;
11465 	} else if (rc < tp->irq_cnt) {
11466 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11467 			      tp->irq_cnt, rc);
11468 		tp->irq_cnt = rc;
11469 		tp->rxq_cnt = max(rc - 1, 1);
11470 		if (tp->txq_cnt)
11471 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11472 	}
11473 
11474 	for (i = 0; i < tp->irq_max; i++)
11475 		tp->napi[i].irq_vec = msix_ent[i].vector;
11476 
11477 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11478 		pci_disable_msix(tp->pdev);
11479 		return false;
11480 	}
11481 
11482 	if (tp->irq_cnt == 1)
11483 		return true;
11484 
11485 	tg3_flag_set(tp, ENABLE_RSS);
11486 
11487 	if (tp->txq_cnt > 1)
11488 		tg3_flag_set(tp, ENABLE_TSS);
11489 
11490 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11491 
11492 	return true;
11493 }
11494 
11495 static void tg3_ints_init(struct tg3 *tp)
11496 {
11497 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11498 	    !tg3_flag(tp, TAGGED_STATUS)) {
11499 		/* All MSI supporting chips should support tagged
11500 		 * status.  Assert that this is the case.
11501 		 */
11502 		netdev_warn(tp->dev,
11503 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11504 		goto defcfg;
11505 	}
11506 
11507 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11508 		tg3_flag_set(tp, USING_MSIX);
11509 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11510 		tg3_flag_set(tp, USING_MSI);
11511 
11512 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11513 		u32 msi_mode = tr32(MSGINT_MODE);
11514 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11515 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11516 		if (!tg3_flag(tp, 1SHOT_MSI))
11517 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11518 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11519 	}
11520 defcfg:
11521 	if (!tg3_flag(tp, USING_MSIX)) {
11522 		tp->irq_cnt = 1;
11523 		tp->napi[0].irq_vec = tp->pdev->irq;
11524 	}
11525 
11526 	if (tp->irq_cnt == 1) {
11527 		tp->txq_cnt = 1;
11528 		tp->rxq_cnt = 1;
11529 		netif_set_real_num_tx_queues(tp->dev, 1);
11530 		netif_set_real_num_rx_queues(tp->dev, 1);
11531 	}
11532 }
11533 
11534 static void tg3_ints_fini(struct tg3 *tp)
11535 {
11536 	if (tg3_flag(tp, USING_MSIX))
11537 		pci_disable_msix(tp->pdev);
11538 	else if (tg3_flag(tp, USING_MSI))
11539 		pci_disable_msi(tp->pdev);
11540 	tg3_flag_clear(tp, USING_MSI);
11541 	tg3_flag_clear(tp, USING_MSIX);
11542 	tg3_flag_clear(tp, ENABLE_RSS);
11543 	tg3_flag_clear(tp, ENABLE_TSS);
11544 }
11545 
11546 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11547 		     bool init)
11548 {
11549 	struct net_device *dev = tp->dev;
11550 	int i, err;
11551 
11552 	/*
11553 	 * Setup interrupts first so we know how
11554 	 * many NAPI resources to allocate
11555 	 */
11556 	tg3_ints_init(tp);
11557 
11558 	tg3_rss_check_indir_tbl(tp);
11559 
11560 	/* The placement of this call is tied
11561 	 * to the setup and use of Host TX descriptors.
11562 	 */
11563 	err = tg3_alloc_consistent(tp);
11564 	if (err)
11565 		goto out_ints_fini;
11566 
11567 	tg3_napi_init(tp);
11568 
11569 	tg3_napi_enable(tp);
11570 
11571 	for (i = 0; i < tp->irq_cnt; i++) {
11572 		err = tg3_request_irq(tp, i);
11573 		if (err) {
11574 			for (i--; i >= 0; i--) {
11575 				struct tg3_napi *tnapi = &tp->napi[i];
11576 
11577 				free_irq(tnapi->irq_vec, tnapi);
11578 			}
11579 			goto out_napi_fini;
11580 		}
11581 	}
11582 
11583 	tg3_full_lock(tp, 0);
11584 
11585 	if (init)
11586 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11587 
11588 	err = tg3_init_hw(tp, reset_phy);
11589 	if (err) {
11590 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11591 		tg3_free_rings(tp);
11592 	}
11593 
11594 	tg3_full_unlock(tp);
11595 
11596 	if (err)
11597 		goto out_free_irq;
11598 
11599 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11600 		err = tg3_test_msi(tp);
11601 
11602 		if (err) {
11603 			tg3_full_lock(tp, 0);
11604 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11605 			tg3_free_rings(tp);
11606 			tg3_full_unlock(tp);
11607 
11608 			goto out_napi_fini;
11609 		}
11610 
11611 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11612 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11613 
11614 			tw32(PCIE_TRANSACTION_CFG,
11615 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11616 		}
11617 	}
11618 
11619 	tg3_phy_start(tp);
11620 
11621 	tg3_hwmon_open(tp);
11622 
11623 	tg3_full_lock(tp, 0);
11624 
11625 	tg3_timer_start(tp);
11626 	tg3_flag_set(tp, INIT_COMPLETE);
11627 	tg3_enable_ints(tp);
11628 
11629 	tg3_ptp_resume(tp);
11630 
11631 	tg3_full_unlock(tp);
11632 
11633 	netif_tx_start_all_queues(dev);
11634 
11635 	/*
11636 	 * Reset loopback feature if it was turned on while the device was down
11637 	 * make sure that it's installed properly now.
11638 	 */
11639 	if (dev->features & NETIF_F_LOOPBACK)
11640 		tg3_set_loopback(dev, dev->features);
11641 
11642 	return 0;
11643 
11644 out_free_irq:
11645 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11646 		struct tg3_napi *tnapi = &tp->napi[i];
11647 		free_irq(tnapi->irq_vec, tnapi);
11648 	}
11649 
11650 out_napi_fini:
11651 	tg3_napi_disable(tp);
11652 	tg3_napi_fini(tp);
11653 	tg3_free_consistent(tp);
11654 
11655 out_ints_fini:
11656 	tg3_ints_fini(tp);
11657 
11658 	return err;
11659 }
11660 
11661 static void tg3_stop(struct tg3 *tp)
11662 {
11663 	int i;
11664 
11665 	tg3_reset_task_cancel(tp);
11666 	tg3_netif_stop(tp);
11667 
11668 	tg3_timer_stop(tp);
11669 
11670 	tg3_hwmon_close(tp);
11671 
11672 	tg3_phy_stop(tp);
11673 
11674 	tg3_full_lock(tp, 1);
11675 
11676 	tg3_disable_ints(tp);
11677 
11678 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11679 	tg3_free_rings(tp);
11680 	tg3_flag_clear(tp, INIT_COMPLETE);
11681 
11682 	tg3_full_unlock(tp);
11683 
11684 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11685 		struct tg3_napi *tnapi = &tp->napi[i];
11686 		free_irq(tnapi->irq_vec, tnapi);
11687 	}
11688 
11689 	tg3_ints_fini(tp);
11690 
11691 	tg3_napi_fini(tp);
11692 
11693 	tg3_free_consistent(tp);
11694 }
11695 
11696 static int tg3_open(struct net_device *dev)
11697 {
11698 	struct tg3 *tp = netdev_priv(dev);
11699 	int err;
11700 
11701 	if (tp->pcierr_recovery) {
11702 		netdev_err(dev, "Failed to open device. PCI error recovery "
11703 			   "in progress\n");
11704 		return -EAGAIN;
11705 	}
11706 
11707 	if (tp->fw_needed) {
11708 		err = tg3_request_firmware(tp);
11709 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11710 			if (err) {
11711 				netdev_warn(tp->dev, "EEE capability disabled\n");
11712 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11713 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11714 				netdev_warn(tp->dev, "EEE capability restored\n");
11715 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11716 			}
11717 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11718 			if (err)
11719 				return err;
11720 		} else if (err) {
11721 			netdev_warn(tp->dev, "TSO capability disabled\n");
11722 			tg3_flag_clear(tp, TSO_CAPABLE);
11723 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11724 			netdev_notice(tp->dev, "TSO capability restored\n");
11725 			tg3_flag_set(tp, TSO_CAPABLE);
11726 		}
11727 	}
11728 
11729 	tg3_carrier_off(tp);
11730 
11731 	err = tg3_power_up(tp);
11732 	if (err)
11733 		return err;
11734 
11735 	tg3_full_lock(tp, 0);
11736 
11737 	tg3_disable_ints(tp);
11738 	tg3_flag_clear(tp, INIT_COMPLETE);
11739 
11740 	tg3_full_unlock(tp);
11741 
11742 	err = tg3_start(tp,
11743 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11744 			true, true);
11745 	if (err) {
11746 		tg3_frob_aux_power(tp, false);
11747 		pci_set_power_state(tp->pdev, PCI_D3hot);
11748 	}
11749 
11750 	return err;
11751 }
11752 
11753 static int tg3_close(struct net_device *dev)
11754 {
11755 	struct tg3 *tp = netdev_priv(dev);
11756 
11757 	if (tp->pcierr_recovery) {
11758 		netdev_err(dev, "Failed to close device. PCI error recovery "
11759 			   "in progress\n");
11760 		return -EAGAIN;
11761 	}
11762 
11763 	tg3_stop(tp);
11764 
11765 	if (pci_device_is_present(tp->pdev)) {
11766 		tg3_power_down_prepare(tp);
11767 
11768 		tg3_carrier_off(tp);
11769 	}
11770 	return 0;
11771 }
11772 
11773 static inline u64 get_stat64(tg3_stat64_t *val)
11774 {
11775        return ((u64)val->high << 32) | ((u64)val->low);
11776 }
11777 
11778 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11779 {
11780 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11781 
11782 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11783 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11784 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11785 		u32 val;
11786 
11787 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11788 			tg3_writephy(tp, MII_TG3_TEST1,
11789 				     val | MII_TG3_TEST1_CRC_EN);
11790 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11791 		} else
11792 			val = 0;
11793 
11794 		tp->phy_crc_errors += val;
11795 
11796 		return tp->phy_crc_errors;
11797 	}
11798 
11799 	return get_stat64(&hw_stats->rx_fcs_errors);
11800 }
11801 
11802 #define ESTAT_ADD(member) \
11803 	estats->member =	old_estats->member + \
11804 				get_stat64(&hw_stats->member)
11805 
11806 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11807 {
11808 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11809 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11810 
11811 	ESTAT_ADD(rx_octets);
11812 	ESTAT_ADD(rx_fragments);
11813 	ESTAT_ADD(rx_ucast_packets);
11814 	ESTAT_ADD(rx_mcast_packets);
11815 	ESTAT_ADD(rx_bcast_packets);
11816 	ESTAT_ADD(rx_fcs_errors);
11817 	ESTAT_ADD(rx_align_errors);
11818 	ESTAT_ADD(rx_xon_pause_rcvd);
11819 	ESTAT_ADD(rx_xoff_pause_rcvd);
11820 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11821 	ESTAT_ADD(rx_xoff_entered);
11822 	ESTAT_ADD(rx_frame_too_long_errors);
11823 	ESTAT_ADD(rx_jabbers);
11824 	ESTAT_ADD(rx_undersize_packets);
11825 	ESTAT_ADD(rx_in_length_errors);
11826 	ESTAT_ADD(rx_out_length_errors);
11827 	ESTAT_ADD(rx_64_or_less_octet_packets);
11828 	ESTAT_ADD(rx_65_to_127_octet_packets);
11829 	ESTAT_ADD(rx_128_to_255_octet_packets);
11830 	ESTAT_ADD(rx_256_to_511_octet_packets);
11831 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11832 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11833 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11834 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11835 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11836 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11837 
11838 	ESTAT_ADD(tx_octets);
11839 	ESTAT_ADD(tx_collisions);
11840 	ESTAT_ADD(tx_xon_sent);
11841 	ESTAT_ADD(tx_xoff_sent);
11842 	ESTAT_ADD(tx_flow_control);
11843 	ESTAT_ADD(tx_mac_errors);
11844 	ESTAT_ADD(tx_single_collisions);
11845 	ESTAT_ADD(tx_mult_collisions);
11846 	ESTAT_ADD(tx_deferred);
11847 	ESTAT_ADD(tx_excessive_collisions);
11848 	ESTAT_ADD(tx_late_collisions);
11849 	ESTAT_ADD(tx_collide_2times);
11850 	ESTAT_ADD(tx_collide_3times);
11851 	ESTAT_ADD(tx_collide_4times);
11852 	ESTAT_ADD(tx_collide_5times);
11853 	ESTAT_ADD(tx_collide_6times);
11854 	ESTAT_ADD(tx_collide_7times);
11855 	ESTAT_ADD(tx_collide_8times);
11856 	ESTAT_ADD(tx_collide_9times);
11857 	ESTAT_ADD(tx_collide_10times);
11858 	ESTAT_ADD(tx_collide_11times);
11859 	ESTAT_ADD(tx_collide_12times);
11860 	ESTAT_ADD(tx_collide_13times);
11861 	ESTAT_ADD(tx_collide_14times);
11862 	ESTAT_ADD(tx_collide_15times);
11863 	ESTAT_ADD(tx_ucast_packets);
11864 	ESTAT_ADD(tx_mcast_packets);
11865 	ESTAT_ADD(tx_bcast_packets);
11866 	ESTAT_ADD(tx_carrier_sense_errors);
11867 	ESTAT_ADD(tx_discards);
11868 	ESTAT_ADD(tx_errors);
11869 
11870 	ESTAT_ADD(dma_writeq_full);
11871 	ESTAT_ADD(dma_write_prioq_full);
11872 	ESTAT_ADD(rxbds_empty);
11873 	ESTAT_ADD(rx_discards);
11874 	ESTAT_ADD(rx_errors);
11875 	ESTAT_ADD(rx_threshold_hit);
11876 
11877 	ESTAT_ADD(dma_readq_full);
11878 	ESTAT_ADD(dma_read_prioq_full);
11879 	ESTAT_ADD(tx_comp_queue_full);
11880 
11881 	ESTAT_ADD(ring_set_send_prod_index);
11882 	ESTAT_ADD(ring_status_update);
11883 	ESTAT_ADD(nic_irqs);
11884 	ESTAT_ADD(nic_avoided_irqs);
11885 	ESTAT_ADD(nic_tx_threshold_hit);
11886 
11887 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11888 }
11889 
11890 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11891 {
11892 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11893 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11894 
11895 	stats->rx_packets = old_stats->rx_packets +
11896 		get_stat64(&hw_stats->rx_ucast_packets) +
11897 		get_stat64(&hw_stats->rx_mcast_packets) +
11898 		get_stat64(&hw_stats->rx_bcast_packets);
11899 
11900 	stats->tx_packets = old_stats->tx_packets +
11901 		get_stat64(&hw_stats->tx_ucast_packets) +
11902 		get_stat64(&hw_stats->tx_mcast_packets) +
11903 		get_stat64(&hw_stats->tx_bcast_packets);
11904 
11905 	stats->rx_bytes = old_stats->rx_bytes +
11906 		get_stat64(&hw_stats->rx_octets);
11907 	stats->tx_bytes = old_stats->tx_bytes +
11908 		get_stat64(&hw_stats->tx_octets);
11909 
11910 	stats->rx_errors = old_stats->rx_errors +
11911 		get_stat64(&hw_stats->rx_errors);
11912 	stats->tx_errors = old_stats->tx_errors +
11913 		get_stat64(&hw_stats->tx_errors) +
11914 		get_stat64(&hw_stats->tx_mac_errors) +
11915 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11916 		get_stat64(&hw_stats->tx_discards);
11917 
11918 	stats->multicast = old_stats->multicast +
11919 		get_stat64(&hw_stats->rx_mcast_packets);
11920 	stats->collisions = old_stats->collisions +
11921 		get_stat64(&hw_stats->tx_collisions);
11922 
11923 	stats->rx_length_errors = old_stats->rx_length_errors +
11924 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11925 		get_stat64(&hw_stats->rx_undersize_packets);
11926 
11927 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11928 		get_stat64(&hw_stats->rx_align_errors);
11929 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11930 		get_stat64(&hw_stats->tx_discards);
11931 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11932 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11933 
11934 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11935 		tg3_calc_crc_errors(tp);
11936 
11937 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11938 		get_stat64(&hw_stats->rx_discards);
11939 
11940 	stats->rx_dropped = tp->rx_dropped;
11941 	stats->tx_dropped = tp->tx_dropped;
11942 }
11943 
11944 static int tg3_get_regs_len(struct net_device *dev)
11945 {
11946 	return TG3_REG_BLK_SIZE;
11947 }
11948 
11949 static void tg3_get_regs(struct net_device *dev,
11950 		struct ethtool_regs *regs, void *_p)
11951 {
11952 	struct tg3 *tp = netdev_priv(dev);
11953 
11954 	regs->version = 0;
11955 
11956 	memset(_p, 0, TG3_REG_BLK_SIZE);
11957 
11958 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11959 		return;
11960 
11961 	tg3_full_lock(tp, 0);
11962 
11963 	tg3_dump_legacy_regs(tp, (u32 *)_p);
11964 
11965 	tg3_full_unlock(tp);
11966 }
11967 
11968 static int tg3_get_eeprom_len(struct net_device *dev)
11969 {
11970 	struct tg3 *tp = netdev_priv(dev);
11971 
11972 	return tp->nvram_size;
11973 }
11974 
11975 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11976 {
11977 	struct tg3 *tp = netdev_priv(dev);
11978 	int ret, cpmu_restore = 0;
11979 	u8  *pd;
11980 	u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11981 	__be32 val;
11982 
11983 	if (tg3_flag(tp, NO_NVRAM))
11984 		return -EINVAL;
11985 
11986 	offset = eeprom->offset;
11987 	len = eeprom->len;
11988 	eeprom->len = 0;
11989 
11990 	eeprom->magic = TG3_EEPROM_MAGIC;
11991 
11992 	/* Override clock, link aware and link idle modes */
11993 	if (tg3_flag(tp, CPMU_PRESENT)) {
11994 		cpmu_val = tr32(TG3_CPMU_CTRL);
11995 		if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11996 				CPMU_CTRL_LINK_IDLE_MODE)) {
11997 			tw32(TG3_CPMU_CTRL, cpmu_val &
11998 					    ~(CPMU_CTRL_LINK_AWARE_MODE |
11999 					     CPMU_CTRL_LINK_IDLE_MODE));
12000 			cpmu_restore = 1;
12001 		}
12002 	}
12003 	tg3_override_clk(tp);
12004 
12005 	if (offset & 3) {
12006 		/* adjustments to start on required 4 byte boundary */
12007 		b_offset = offset & 3;
12008 		b_count = 4 - b_offset;
12009 		if (b_count > len) {
12010 			/* i.e. offset=1 len=2 */
12011 			b_count = len;
12012 		}
12013 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12014 		if (ret)
12015 			goto eeprom_done;
12016 		memcpy(data, ((char *)&val) + b_offset, b_count);
12017 		len -= b_count;
12018 		offset += b_count;
12019 		eeprom->len += b_count;
12020 	}
12021 
12022 	/* read bytes up to the last 4 byte boundary */
12023 	pd = &data[eeprom->len];
12024 	for (i = 0; i < (len - (len & 3)); i += 4) {
12025 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
12026 		if (ret) {
12027 			if (i)
12028 				i -= 4;
12029 			eeprom->len += i;
12030 			goto eeprom_done;
12031 		}
12032 		memcpy(pd + i, &val, 4);
12033 		if (need_resched()) {
12034 			if (signal_pending(current)) {
12035 				eeprom->len += i;
12036 				ret = -EINTR;
12037 				goto eeprom_done;
12038 			}
12039 			cond_resched();
12040 		}
12041 	}
12042 	eeprom->len += i;
12043 
12044 	if (len & 3) {
12045 		/* read last bytes not ending on 4 byte boundary */
12046 		pd = &data[eeprom->len];
12047 		b_count = len & 3;
12048 		b_offset = offset + len - b_count;
12049 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
12050 		if (ret)
12051 			goto eeprom_done;
12052 		memcpy(pd, &val, b_count);
12053 		eeprom->len += b_count;
12054 	}
12055 	ret = 0;
12056 
12057 eeprom_done:
12058 	/* Restore clock, link aware and link idle modes */
12059 	tg3_restore_clk(tp);
12060 	if (cpmu_restore)
12061 		tw32(TG3_CPMU_CTRL, cpmu_val);
12062 
12063 	return ret;
12064 }
12065 
12066 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12067 {
12068 	struct tg3 *tp = netdev_priv(dev);
12069 	int ret;
12070 	u32 offset, len, b_offset, odd_len;
12071 	u8 *buf;
12072 	__be32 start = 0, end;
12073 
12074 	if (tg3_flag(tp, NO_NVRAM) ||
12075 	    eeprom->magic != TG3_EEPROM_MAGIC)
12076 		return -EINVAL;
12077 
12078 	offset = eeprom->offset;
12079 	len = eeprom->len;
12080 
12081 	if ((b_offset = (offset & 3))) {
12082 		/* adjustments to start on required 4 byte boundary */
12083 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12084 		if (ret)
12085 			return ret;
12086 		len += b_offset;
12087 		offset &= ~3;
12088 		if (len < 4)
12089 			len = 4;
12090 	}
12091 
12092 	odd_len = 0;
12093 	if (len & 3) {
12094 		/* adjustments to end on required 4 byte boundary */
12095 		odd_len = 1;
12096 		len = (len + 3) & ~3;
12097 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12098 		if (ret)
12099 			return ret;
12100 	}
12101 
12102 	buf = data;
12103 	if (b_offset || odd_len) {
12104 		buf = kmalloc(len, GFP_KERNEL);
12105 		if (!buf)
12106 			return -ENOMEM;
12107 		if (b_offset)
12108 			memcpy(buf, &start, 4);
12109 		if (odd_len)
12110 			memcpy(buf+len-4, &end, 4);
12111 		memcpy(buf + b_offset, data, eeprom->len);
12112 	}
12113 
12114 	ret = tg3_nvram_write_block(tp, offset, len, buf);
12115 
12116 	if (buf != data)
12117 		kfree(buf);
12118 
12119 	return ret;
12120 }
12121 
12122 static int tg3_get_link_ksettings(struct net_device *dev,
12123 				  struct ethtool_link_ksettings *cmd)
12124 {
12125 	struct tg3 *tp = netdev_priv(dev);
12126 	u32 supported, advertising;
12127 
12128 	if (tg3_flag(tp, USE_PHYLIB)) {
12129 		struct phy_device *phydev;
12130 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12131 			return -EAGAIN;
12132 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12133 		phy_ethtool_ksettings_get(phydev, cmd);
12134 
12135 		return 0;
12136 	}
12137 
12138 	supported = (SUPPORTED_Autoneg);
12139 
12140 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12141 		supported |= (SUPPORTED_1000baseT_Half |
12142 			      SUPPORTED_1000baseT_Full);
12143 
12144 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12145 		supported |= (SUPPORTED_100baseT_Half |
12146 			      SUPPORTED_100baseT_Full |
12147 			      SUPPORTED_10baseT_Half |
12148 			      SUPPORTED_10baseT_Full |
12149 			      SUPPORTED_TP);
12150 		cmd->base.port = PORT_TP;
12151 	} else {
12152 		supported |= SUPPORTED_FIBRE;
12153 		cmd->base.port = PORT_FIBRE;
12154 	}
12155 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12156 						supported);
12157 
12158 	advertising = tp->link_config.advertising;
12159 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
12160 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12161 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12162 				advertising |= ADVERTISED_Pause;
12163 			} else {
12164 				advertising |= ADVERTISED_Pause |
12165 					ADVERTISED_Asym_Pause;
12166 			}
12167 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12168 			advertising |= ADVERTISED_Asym_Pause;
12169 		}
12170 	}
12171 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12172 						advertising);
12173 
12174 	if (netif_running(dev) && tp->link_up) {
12175 		cmd->base.speed = tp->link_config.active_speed;
12176 		cmd->base.duplex = tp->link_config.active_duplex;
12177 		ethtool_convert_legacy_u32_to_link_mode(
12178 			cmd->link_modes.lp_advertising,
12179 			tp->link_config.rmt_adv);
12180 
12181 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12182 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12183 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12184 			else
12185 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
12186 		}
12187 	} else {
12188 		cmd->base.speed = SPEED_UNKNOWN;
12189 		cmd->base.duplex = DUPLEX_UNKNOWN;
12190 		cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12191 	}
12192 	cmd->base.phy_address = tp->phy_addr;
12193 	cmd->base.autoneg = tp->link_config.autoneg;
12194 	return 0;
12195 }
12196 
12197 static int tg3_set_link_ksettings(struct net_device *dev,
12198 				  const struct ethtool_link_ksettings *cmd)
12199 {
12200 	struct tg3 *tp = netdev_priv(dev);
12201 	u32 speed = cmd->base.speed;
12202 	u32 advertising;
12203 
12204 	if (tg3_flag(tp, USE_PHYLIB)) {
12205 		struct phy_device *phydev;
12206 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12207 			return -EAGAIN;
12208 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12209 		return phy_ethtool_ksettings_set(phydev, cmd);
12210 	}
12211 
12212 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
12213 	    cmd->base.autoneg != AUTONEG_DISABLE)
12214 		return -EINVAL;
12215 
12216 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
12217 	    cmd->base.duplex != DUPLEX_FULL &&
12218 	    cmd->base.duplex != DUPLEX_HALF)
12219 		return -EINVAL;
12220 
12221 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
12222 						cmd->link_modes.advertising);
12223 
12224 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12225 		u32 mask = ADVERTISED_Autoneg |
12226 			   ADVERTISED_Pause |
12227 			   ADVERTISED_Asym_Pause;
12228 
12229 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12230 			mask |= ADVERTISED_1000baseT_Half |
12231 				ADVERTISED_1000baseT_Full;
12232 
12233 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12234 			mask |= ADVERTISED_100baseT_Half |
12235 				ADVERTISED_100baseT_Full |
12236 				ADVERTISED_10baseT_Half |
12237 				ADVERTISED_10baseT_Full |
12238 				ADVERTISED_TP;
12239 		else
12240 			mask |= ADVERTISED_FIBRE;
12241 
12242 		if (advertising & ~mask)
12243 			return -EINVAL;
12244 
12245 		mask &= (ADVERTISED_1000baseT_Half |
12246 			 ADVERTISED_1000baseT_Full |
12247 			 ADVERTISED_100baseT_Half |
12248 			 ADVERTISED_100baseT_Full |
12249 			 ADVERTISED_10baseT_Half |
12250 			 ADVERTISED_10baseT_Full);
12251 
12252 		advertising &= mask;
12253 	} else {
12254 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12255 			if (speed != SPEED_1000)
12256 				return -EINVAL;
12257 
12258 			if (cmd->base.duplex != DUPLEX_FULL)
12259 				return -EINVAL;
12260 		} else {
12261 			if (speed != SPEED_100 &&
12262 			    speed != SPEED_10)
12263 				return -EINVAL;
12264 		}
12265 	}
12266 
12267 	tg3_full_lock(tp, 0);
12268 
12269 	tp->link_config.autoneg = cmd->base.autoneg;
12270 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12271 		tp->link_config.advertising = (advertising |
12272 					      ADVERTISED_Autoneg);
12273 		tp->link_config.speed = SPEED_UNKNOWN;
12274 		tp->link_config.duplex = DUPLEX_UNKNOWN;
12275 	} else {
12276 		tp->link_config.advertising = 0;
12277 		tp->link_config.speed = speed;
12278 		tp->link_config.duplex = cmd->base.duplex;
12279 	}
12280 
12281 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12282 
12283 	tg3_warn_mgmt_link_flap(tp);
12284 
12285 	if (netif_running(dev))
12286 		tg3_setup_phy(tp, true);
12287 
12288 	tg3_full_unlock(tp);
12289 
12290 	return 0;
12291 }
12292 
12293 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12294 {
12295 	struct tg3 *tp = netdev_priv(dev);
12296 
12297 	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12298 	strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12299 	strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12300 }
12301 
12302 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12303 {
12304 	struct tg3 *tp = netdev_priv(dev);
12305 
12306 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12307 		wol->supported = WAKE_MAGIC;
12308 	else
12309 		wol->supported = 0;
12310 	wol->wolopts = 0;
12311 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12312 		wol->wolopts = WAKE_MAGIC;
12313 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12314 }
12315 
12316 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12317 {
12318 	struct tg3 *tp = netdev_priv(dev);
12319 	struct device *dp = &tp->pdev->dev;
12320 
12321 	if (wol->wolopts & ~WAKE_MAGIC)
12322 		return -EINVAL;
12323 	if ((wol->wolopts & WAKE_MAGIC) &&
12324 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12325 		return -EINVAL;
12326 
12327 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12328 
12329 	if (device_may_wakeup(dp))
12330 		tg3_flag_set(tp, WOL_ENABLE);
12331 	else
12332 		tg3_flag_clear(tp, WOL_ENABLE);
12333 
12334 	return 0;
12335 }
12336 
12337 static u32 tg3_get_msglevel(struct net_device *dev)
12338 {
12339 	struct tg3 *tp = netdev_priv(dev);
12340 	return tp->msg_enable;
12341 }
12342 
12343 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12344 {
12345 	struct tg3 *tp = netdev_priv(dev);
12346 	tp->msg_enable = value;
12347 }
12348 
12349 static int tg3_nway_reset(struct net_device *dev)
12350 {
12351 	struct tg3 *tp = netdev_priv(dev);
12352 	int r;
12353 
12354 	if (!netif_running(dev))
12355 		return -EAGAIN;
12356 
12357 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12358 		return -EINVAL;
12359 
12360 	tg3_warn_mgmt_link_flap(tp);
12361 
12362 	if (tg3_flag(tp, USE_PHYLIB)) {
12363 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12364 			return -EAGAIN;
12365 		r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12366 	} else {
12367 		u32 bmcr;
12368 
12369 		spin_lock_bh(&tp->lock);
12370 		r = -EINVAL;
12371 		tg3_readphy(tp, MII_BMCR, &bmcr);
12372 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12373 		    ((bmcr & BMCR_ANENABLE) ||
12374 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12375 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12376 						   BMCR_ANENABLE);
12377 			r = 0;
12378 		}
12379 		spin_unlock_bh(&tp->lock);
12380 	}
12381 
12382 	return r;
12383 }
12384 
12385 static void tg3_get_ringparam(struct net_device *dev,
12386 			      struct ethtool_ringparam *ering,
12387 			      struct kernel_ethtool_ringparam *kernel_ering,
12388 			      struct netlink_ext_ack *extack)
12389 {
12390 	struct tg3 *tp = netdev_priv(dev);
12391 
12392 	ering->rx_max_pending = tp->rx_std_ring_mask;
12393 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12394 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12395 	else
12396 		ering->rx_jumbo_max_pending = 0;
12397 
12398 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12399 
12400 	ering->rx_pending = tp->rx_pending;
12401 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12402 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12403 	else
12404 		ering->rx_jumbo_pending = 0;
12405 
12406 	ering->tx_pending = tp->napi[0].tx_pending;
12407 }
12408 
12409 static int tg3_set_ringparam(struct net_device *dev,
12410 			     struct ethtool_ringparam *ering,
12411 			     struct kernel_ethtool_ringparam *kernel_ering,
12412 			     struct netlink_ext_ack *extack)
12413 {
12414 	struct tg3 *tp = netdev_priv(dev);
12415 	int i, irq_sync = 0, err = 0;
12416 	bool reset_phy = false;
12417 
12418 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12419 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12420 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12421 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12422 	    (tg3_flag(tp, TSO_BUG) &&
12423 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12424 		return -EINVAL;
12425 
12426 	if (netif_running(dev)) {
12427 		tg3_phy_stop(tp);
12428 		tg3_netif_stop(tp);
12429 		irq_sync = 1;
12430 	}
12431 
12432 	tg3_full_lock(tp, irq_sync);
12433 
12434 	tp->rx_pending = ering->rx_pending;
12435 
12436 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12437 	    tp->rx_pending > 63)
12438 		tp->rx_pending = 63;
12439 
12440 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12441 		tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12442 
12443 	for (i = 0; i < tp->irq_max; i++)
12444 		tp->napi[i].tx_pending = ering->tx_pending;
12445 
12446 	if (netif_running(dev)) {
12447 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12448 		/* Reset PHY to avoid PHY lock up */
12449 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12450 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12451 		    tg3_asic_rev(tp) == ASIC_REV_5720)
12452 			reset_phy = true;
12453 
12454 		err = tg3_restart_hw(tp, reset_phy);
12455 		if (!err)
12456 			tg3_netif_start(tp);
12457 	}
12458 
12459 	tg3_full_unlock(tp);
12460 
12461 	if (irq_sync && !err)
12462 		tg3_phy_start(tp);
12463 
12464 	return err;
12465 }
12466 
12467 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12468 {
12469 	struct tg3 *tp = netdev_priv(dev);
12470 
12471 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12472 
12473 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12474 		epause->rx_pause = 1;
12475 	else
12476 		epause->rx_pause = 0;
12477 
12478 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12479 		epause->tx_pause = 1;
12480 	else
12481 		epause->tx_pause = 0;
12482 }
12483 
12484 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12485 {
12486 	struct tg3 *tp = netdev_priv(dev);
12487 	int err = 0;
12488 	bool reset_phy = false;
12489 
12490 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12491 		tg3_warn_mgmt_link_flap(tp);
12492 
12493 	if (tg3_flag(tp, USE_PHYLIB)) {
12494 		struct phy_device *phydev;
12495 
12496 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12497 
12498 		if (!phy_validate_pause(phydev, epause))
12499 			return -EINVAL;
12500 
12501 		tp->link_config.flowctrl = 0;
12502 		phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12503 		if (epause->rx_pause) {
12504 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12505 
12506 			if (epause->tx_pause) {
12507 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12508 			}
12509 		} else if (epause->tx_pause) {
12510 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12511 		}
12512 
12513 		if (epause->autoneg)
12514 			tg3_flag_set(tp, PAUSE_AUTONEG);
12515 		else
12516 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12517 
12518 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12519 			if (phydev->autoneg) {
12520 				/* phy_set_asym_pause() will
12521 				 * renegotiate the link to inform our
12522 				 * link partner of our flow control
12523 				 * settings, even if the flow control
12524 				 * is forced.  Let tg3_adjust_link()
12525 				 * do the final flow control setup.
12526 				 */
12527 				return 0;
12528 			}
12529 
12530 			if (!epause->autoneg)
12531 				tg3_setup_flow_control(tp, 0, 0);
12532 		}
12533 	} else {
12534 		int irq_sync = 0;
12535 
12536 		if (netif_running(dev)) {
12537 			tg3_netif_stop(tp);
12538 			irq_sync = 1;
12539 		}
12540 
12541 		tg3_full_lock(tp, irq_sync);
12542 
12543 		if (epause->autoneg)
12544 			tg3_flag_set(tp, PAUSE_AUTONEG);
12545 		else
12546 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12547 		if (epause->rx_pause)
12548 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12549 		else
12550 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12551 		if (epause->tx_pause)
12552 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12553 		else
12554 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12555 
12556 		if (netif_running(dev)) {
12557 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12558 			/* Reset PHY to avoid PHY lock up */
12559 			if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12560 			    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12561 			    tg3_asic_rev(tp) == ASIC_REV_5720)
12562 				reset_phy = true;
12563 
12564 			err = tg3_restart_hw(tp, reset_phy);
12565 			if (!err)
12566 				tg3_netif_start(tp);
12567 		}
12568 
12569 		tg3_full_unlock(tp);
12570 	}
12571 
12572 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12573 
12574 	return err;
12575 }
12576 
12577 static int tg3_get_sset_count(struct net_device *dev, int sset)
12578 {
12579 	switch (sset) {
12580 	case ETH_SS_TEST:
12581 		return TG3_NUM_TEST;
12582 	case ETH_SS_STATS:
12583 		return TG3_NUM_STATS;
12584 	default:
12585 		return -EOPNOTSUPP;
12586 	}
12587 }
12588 
12589 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12590 			 u32 *rules __always_unused)
12591 {
12592 	struct tg3 *tp = netdev_priv(dev);
12593 
12594 	if (!tg3_flag(tp, SUPPORT_MSIX))
12595 		return -EOPNOTSUPP;
12596 
12597 	switch (info->cmd) {
12598 	case ETHTOOL_GRXRINGS:
12599 		if (netif_running(tp->dev))
12600 			info->data = tp->rxq_cnt;
12601 		else {
12602 			info->data = num_online_cpus();
12603 			if (info->data > TG3_RSS_MAX_NUM_QS)
12604 				info->data = TG3_RSS_MAX_NUM_QS;
12605 		}
12606 
12607 		return 0;
12608 
12609 	default:
12610 		return -EOPNOTSUPP;
12611 	}
12612 }
12613 
12614 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12615 {
12616 	u32 size = 0;
12617 	struct tg3 *tp = netdev_priv(dev);
12618 
12619 	if (tg3_flag(tp, SUPPORT_MSIX))
12620 		size = TG3_RSS_INDIR_TBL_SIZE;
12621 
12622 	return size;
12623 }
12624 
12625 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12626 {
12627 	struct tg3 *tp = netdev_priv(dev);
12628 	int i;
12629 
12630 	if (hfunc)
12631 		*hfunc = ETH_RSS_HASH_TOP;
12632 	if (!indir)
12633 		return 0;
12634 
12635 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12636 		indir[i] = tp->rss_ind_tbl[i];
12637 
12638 	return 0;
12639 }
12640 
12641 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12642 			const u8 hfunc)
12643 {
12644 	struct tg3 *tp = netdev_priv(dev);
12645 	size_t i;
12646 
12647 	/* We require at least one supported parameter to be changed and no
12648 	 * change in any of the unsupported parameters
12649 	 */
12650 	if (key ||
12651 	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12652 		return -EOPNOTSUPP;
12653 
12654 	if (!indir)
12655 		return 0;
12656 
12657 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12658 		tp->rss_ind_tbl[i] = indir[i];
12659 
12660 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12661 		return 0;
12662 
12663 	/* It is legal to write the indirection
12664 	 * table while the device is running.
12665 	 */
12666 	tg3_full_lock(tp, 0);
12667 	tg3_rss_write_indir_tbl(tp);
12668 	tg3_full_unlock(tp);
12669 
12670 	return 0;
12671 }
12672 
12673 static void tg3_get_channels(struct net_device *dev,
12674 			     struct ethtool_channels *channel)
12675 {
12676 	struct tg3 *tp = netdev_priv(dev);
12677 	u32 deflt_qs = netif_get_num_default_rss_queues();
12678 
12679 	channel->max_rx = tp->rxq_max;
12680 	channel->max_tx = tp->txq_max;
12681 
12682 	if (netif_running(dev)) {
12683 		channel->rx_count = tp->rxq_cnt;
12684 		channel->tx_count = tp->txq_cnt;
12685 	} else {
12686 		if (tp->rxq_req)
12687 			channel->rx_count = tp->rxq_req;
12688 		else
12689 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12690 
12691 		if (tp->txq_req)
12692 			channel->tx_count = tp->txq_req;
12693 		else
12694 			channel->tx_count = min(deflt_qs, tp->txq_max);
12695 	}
12696 }
12697 
12698 static int tg3_set_channels(struct net_device *dev,
12699 			    struct ethtool_channels *channel)
12700 {
12701 	struct tg3 *tp = netdev_priv(dev);
12702 
12703 	if (!tg3_flag(tp, SUPPORT_MSIX))
12704 		return -EOPNOTSUPP;
12705 
12706 	if (channel->rx_count > tp->rxq_max ||
12707 	    channel->tx_count > tp->txq_max)
12708 		return -EINVAL;
12709 
12710 	tp->rxq_req = channel->rx_count;
12711 	tp->txq_req = channel->tx_count;
12712 
12713 	if (!netif_running(dev))
12714 		return 0;
12715 
12716 	tg3_stop(tp);
12717 
12718 	tg3_carrier_off(tp);
12719 
12720 	tg3_start(tp, true, false, false);
12721 
12722 	return 0;
12723 }
12724 
12725 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12726 {
12727 	switch (stringset) {
12728 	case ETH_SS_STATS:
12729 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12730 		break;
12731 	case ETH_SS_TEST:
12732 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12733 		break;
12734 	default:
12735 		WARN_ON(1);	/* we need a WARN() */
12736 		break;
12737 	}
12738 }
12739 
12740 static int tg3_set_phys_id(struct net_device *dev,
12741 			    enum ethtool_phys_id_state state)
12742 {
12743 	struct tg3 *tp = netdev_priv(dev);
12744 
12745 	switch (state) {
12746 	case ETHTOOL_ID_ACTIVE:
12747 		return 1;	/* cycle on/off once per second */
12748 
12749 	case ETHTOOL_ID_ON:
12750 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12751 		     LED_CTRL_1000MBPS_ON |
12752 		     LED_CTRL_100MBPS_ON |
12753 		     LED_CTRL_10MBPS_ON |
12754 		     LED_CTRL_TRAFFIC_OVERRIDE |
12755 		     LED_CTRL_TRAFFIC_BLINK |
12756 		     LED_CTRL_TRAFFIC_LED);
12757 		break;
12758 
12759 	case ETHTOOL_ID_OFF:
12760 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12761 		     LED_CTRL_TRAFFIC_OVERRIDE);
12762 		break;
12763 
12764 	case ETHTOOL_ID_INACTIVE:
12765 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12766 		break;
12767 	}
12768 
12769 	return 0;
12770 }
12771 
12772 static void tg3_get_ethtool_stats(struct net_device *dev,
12773 				   struct ethtool_stats *estats, u64 *tmp_stats)
12774 {
12775 	struct tg3 *tp = netdev_priv(dev);
12776 
12777 	if (tp->hw_stats)
12778 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12779 	else
12780 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12781 }
12782 
12783 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12784 {
12785 	int i;
12786 	__be32 *buf;
12787 	u32 offset = 0, len = 0;
12788 	u32 magic, val;
12789 
12790 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12791 		return NULL;
12792 
12793 	if (magic == TG3_EEPROM_MAGIC) {
12794 		for (offset = TG3_NVM_DIR_START;
12795 		     offset < TG3_NVM_DIR_END;
12796 		     offset += TG3_NVM_DIRENT_SIZE) {
12797 			if (tg3_nvram_read(tp, offset, &val))
12798 				return NULL;
12799 
12800 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12801 			    TG3_NVM_DIRTYPE_EXTVPD)
12802 				break;
12803 		}
12804 
12805 		if (offset != TG3_NVM_DIR_END) {
12806 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12807 			if (tg3_nvram_read(tp, offset + 4, &offset))
12808 				return NULL;
12809 
12810 			offset = tg3_nvram_logical_addr(tp, offset);
12811 		}
12812 
12813 		if (!offset || !len) {
12814 			offset = TG3_NVM_VPD_OFF;
12815 			len = TG3_NVM_VPD_LEN;
12816 		}
12817 
12818 		buf = kmalloc(len, GFP_KERNEL);
12819 		if (!buf)
12820 			return NULL;
12821 
12822 		for (i = 0; i < len; i += 4) {
12823 			/* The data is in little-endian format in NVRAM.
12824 			 * Use the big-endian read routines to preserve
12825 			 * the byte order as it exists in NVRAM.
12826 			 */
12827 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12828 				goto error;
12829 		}
12830 		*vpdlen = len;
12831 	} else {
12832 		buf = pci_vpd_alloc(tp->pdev, vpdlen);
12833 		if (IS_ERR(buf))
12834 			return NULL;
12835 	}
12836 
12837 	return buf;
12838 
12839 error:
12840 	kfree(buf);
12841 	return NULL;
12842 }
12843 
12844 #define NVRAM_TEST_SIZE 0x100
12845 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12846 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12847 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12848 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12849 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12850 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12851 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12852 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12853 
12854 static int tg3_test_nvram(struct tg3 *tp)
12855 {
12856 	u32 csum, magic;
12857 	__be32 *buf;
12858 	int i, j, k, err = 0, size;
12859 	unsigned int len;
12860 
12861 	if (tg3_flag(tp, NO_NVRAM))
12862 		return 0;
12863 
12864 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12865 		return -EIO;
12866 
12867 	if (magic == TG3_EEPROM_MAGIC)
12868 		size = NVRAM_TEST_SIZE;
12869 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12870 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12871 		    TG3_EEPROM_SB_FORMAT_1) {
12872 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12873 			case TG3_EEPROM_SB_REVISION_0:
12874 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12875 				break;
12876 			case TG3_EEPROM_SB_REVISION_2:
12877 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12878 				break;
12879 			case TG3_EEPROM_SB_REVISION_3:
12880 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12881 				break;
12882 			case TG3_EEPROM_SB_REVISION_4:
12883 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12884 				break;
12885 			case TG3_EEPROM_SB_REVISION_5:
12886 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12887 				break;
12888 			case TG3_EEPROM_SB_REVISION_6:
12889 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12890 				break;
12891 			default:
12892 				return -EIO;
12893 			}
12894 		} else
12895 			return 0;
12896 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12897 		size = NVRAM_SELFBOOT_HW_SIZE;
12898 	else
12899 		return -EIO;
12900 
12901 	buf = kmalloc(size, GFP_KERNEL);
12902 	if (buf == NULL)
12903 		return -ENOMEM;
12904 
12905 	err = -EIO;
12906 	for (i = 0, j = 0; i < size; i += 4, j++) {
12907 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12908 		if (err)
12909 			break;
12910 	}
12911 	if (i < size)
12912 		goto out;
12913 
12914 	/* Selfboot format */
12915 	magic = be32_to_cpu(buf[0]);
12916 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12917 	    TG3_EEPROM_MAGIC_FW) {
12918 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12919 
12920 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12921 		    TG3_EEPROM_SB_REVISION_2) {
12922 			/* For rev 2, the csum doesn't include the MBA. */
12923 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12924 				csum8 += buf8[i];
12925 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12926 				csum8 += buf8[i];
12927 		} else {
12928 			for (i = 0; i < size; i++)
12929 				csum8 += buf8[i];
12930 		}
12931 
12932 		if (csum8 == 0) {
12933 			err = 0;
12934 			goto out;
12935 		}
12936 
12937 		err = -EIO;
12938 		goto out;
12939 	}
12940 
12941 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12942 	    TG3_EEPROM_MAGIC_HW) {
12943 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12944 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12945 		u8 *buf8 = (u8 *) buf;
12946 
12947 		/* Separate the parity bits and the data bytes.  */
12948 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12949 			if ((i == 0) || (i == 8)) {
12950 				int l;
12951 				u8 msk;
12952 
12953 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12954 					parity[k++] = buf8[i] & msk;
12955 				i++;
12956 			} else if (i == 16) {
12957 				int l;
12958 				u8 msk;
12959 
12960 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12961 					parity[k++] = buf8[i] & msk;
12962 				i++;
12963 
12964 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12965 					parity[k++] = buf8[i] & msk;
12966 				i++;
12967 			}
12968 			data[j++] = buf8[i];
12969 		}
12970 
12971 		err = -EIO;
12972 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12973 			u8 hw8 = hweight8(data[i]);
12974 
12975 			if ((hw8 & 0x1) && parity[i])
12976 				goto out;
12977 			else if (!(hw8 & 0x1) && !parity[i])
12978 				goto out;
12979 		}
12980 		err = 0;
12981 		goto out;
12982 	}
12983 
12984 	err = -EIO;
12985 
12986 	/* Bootstrap checksum at offset 0x10 */
12987 	csum = calc_crc((unsigned char *) buf, 0x10);
12988 	if (csum != le32_to_cpu(buf[0x10/4]))
12989 		goto out;
12990 
12991 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12992 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12993 	if (csum != le32_to_cpu(buf[0xfc/4]))
12994 		goto out;
12995 
12996 	kfree(buf);
12997 
12998 	buf = tg3_vpd_readblock(tp, &len);
12999 	if (!buf)
13000 		return -ENOMEM;
13001 
13002 	err = pci_vpd_check_csum(buf, len);
13003 	/* go on if no checksum found */
13004 	if (err == 1)
13005 		err = 0;
13006 out:
13007 	kfree(buf);
13008 	return err;
13009 }
13010 
13011 #define TG3_SERDES_TIMEOUT_SEC	2
13012 #define TG3_COPPER_TIMEOUT_SEC	6
13013 
13014 static int tg3_test_link(struct tg3 *tp)
13015 {
13016 	int i, max;
13017 
13018 	if (!netif_running(tp->dev))
13019 		return -ENODEV;
13020 
13021 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13022 		max = TG3_SERDES_TIMEOUT_SEC;
13023 	else
13024 		max = TG3_COPPER_TIMEOUT_SEC;
13025 
13026 	for (i = 0; i < max; i++) {
13027 		if (tp->link_up)
13028 			return 0;
13029 
13030 		if (msleep_interruptible(1000))
13031 			break;
13032 	}
13033 
13034 	return -EIO;
13035 }
13036 
13037 /* Only test the commonly used registers */
13038 static int tg3_test_registers(struct tg3 *tp)
13039 {
13040 	int i, is_5705, is_5750;
13041 	u32 offset, read_mask, write_mask, val, save_val, read_val;
13042 	static struct {
13043 		u16 offset;
13044 		u16 flags;
13045 #define TG3_FL_5705	0x1
13046 #define TG3_FL_NOT_5705	0x2
13047 #define TG3_FL_NOT_5788	0x4
13048 #define TG3_FL_NOT_5750	0x8
13049 		u32 read_mask;
13050 		u32 write_mask;
13051 	} reg_tbl[] = {
13052 		/* MAC Control Registers */
13053 		{ MAC_MODE, TG3_FL_NOT_5705,
13054 			0x00000000, 0x00ef6f8c },
13055 		{ MAC_MODE, TG3_FL_5705,
13056 			0x00000000, 0x01ef6b8c },
13057 		{ MAC_STATUS, TG3_FL_NOT_5705,
13058 			0x03800107, 0x00000000 },
13059 		{ MAC_STATUS, TG3_FL_5705,
13060 			0x03800100, 0x00000000 },
13061 		{ MAC_ADDR_0_HIGH, 0x0000,
13062 			0x00000000, 0x0000ffff },
13063 		{ MAC_ADDR_0_LOW, 0x0000,
13064 			0x00000000, 0xffffffff },
13065 		{ MAC_RX_MTU_SIZE, 0x0000,
13066 			0x00000000, 0x0000ffff },
13067 		{ MAC_TX_MODE, 0x0000,
13068 			0x00000000, 0x00000070 },
13069 		{ MAC_TX_LENGTHS, 0x0000,
13070 			0x00000000, 0x00003fff },
13071 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
13072 			0x00000000, 0x000007fc },
13073 		{ MAC_RX_MODE, TG3_FL_5705,
13074 			0x00000000, 0x000007dc },
13075 		{ MAC_HASH_REG_0, 0x0000,
13076 			0x00000000, 0xffffffff },
13077 		{ MAC_HASH_REG_1, 0x0000,
13078 			0x00000000, 0xffffffff },
13079 		{ MAC_HASH_REG_2, 0x0000,
13080 			0x00000000, 0xffffffff },
13081 		{ MAC_HASH_REG_3, 0x0000,
13082 			0x00000000, 0xffffffff },
13083 
13084 		/* Receive Data and Receive BD Initiator Control Registers. */
13085 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13086 			0x00000000, 0xffffffff },
13087 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13088 			0x00000000, 0xffffffff },
13089 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13090 			0x00000000, 0x00000003 },
13091 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13092 			0x00000000, 0xffffffff },
13093 		{ RCVDBDI_STD_BD+0, 0x0000,
13094 			0x00000000, 0xffffffff },
13095 		{ RCVDBDI_STD_BD+4, 0x0000,
13096 			0x00000000, 0xffffffff },
13097 		{ RCVDBDI_STD_BD+8, 0x0000,
13098 			0x00000000, 0xffff0002 },
13099 		{ RCVDBDI_STD_BD+0xc, 0x0000,
13100 			0x00000000, 0xffffffff },
13101 
13102 		/* Receive BD Initiator Control Registers. */
13103 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13104 			0x00000000, 0xffffffff },
13105 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
13106 			0x00000000, 0x000003ff },
13107 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13108 			0x00000000, 0xffffffff },
13109 
13110 		/* Host Coalescing Control Registers. */
13111 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
13112 			0x00000000, 0x00000004 },
13113 		{ HOSTCC_MODE, TG3_FL_5705,
13114 			0x00000000, 0x000000f6 },
13115 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13116 			0x00000000, 0xffffffff },
13117 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13118 			0x00000000, 0x000003ff },
13119 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13120 			0x00000000, 0xffffffff },
13121 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13122 			0x00000000, 0x000003ff },
13123 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13124 			0x00000000, 0xffffffff },
13125 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13126 			0x00000000, 0x000000ff },
13127 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13128 			0x00000000, 0xffffffff },
13129 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13130 			0x00000000, 0x000000ff },
13131 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13132 			0x00000000, 0xffffffff },
13133 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13134 			0x00000000, 0xffffffff },
13135 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13136 			0x00000000, 0xffffffff },
13137 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13138 			0x00000000, 0x000000ff },
13139 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13140 			0x00000000, 0xffffffff },
13141 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13142 			0x00000000, 0x000000ff },
13143 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13144 			0x00000000, 0xffffffff },
13145 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13146 			0x00000000, 0xffffffff },
13147 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13148 			0x00000000, 0xffffffff },
13149 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13150 			0x00000000, 0xffffffff },
13151 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13152 			0x00000000, 0xffffffff },
13153 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13154 			0xffffffff, 0x00000000 },
13155 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13156 			0xffffffff, 0x00000000 },
13157 
13158 		/* Buffer Manager Control Registers. */
13159 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13160 			0x00000000, 0x007fff80 },
13161 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13162 			0x00000000, 0x007fffff },
13163 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13164 			0x00000000, 0x0000003f },
13165 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13166 			0x00000000, 0x000001ff },
13167 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
13168 			0x00000000, 0x000001ff },
13169 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13170 			0xffffffff, 0x00000000 },
13171 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13172 			0xffffffff, 0x00000000 },
13173 
13174 		/* Mailbox Registers */
13175 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13176 			0x00000000, 0x000001ff },
13177 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13178 			0x00000000, 0x000001ff },
13179 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13180 			0x00000000, 0x000007ff },
13181 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13182 			0x00000000, 0x000001ff },
13183 
13184 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
13185 	};
13186 
13187 	is_5705 = is_5750 = 0;
13188 	if (tg3_flag(tp, 5705_PLUS)) {
13189 		is_5705 = 1;
13190 		if (tg3_flag(tp, 5750_PLUS))
13191 			is_5750 = 1;
13192 	}
13193 
13194 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13195 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13196 			continue;
13197 
13198 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13199 			continue;
13200 
13201 		if (tg3_flag(tp, IS_5788) &&
13202 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13203 			continue;
13204 
13205 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13206 			continue;
13207 
13208 		offset = (u32) reg_tbl[i].offset;
13209 		read_mask = reg_tbl[i].read_mask;
13210 		write_mask = reg_tbl[i].write_mask;
13211 
13212 		/* Save the original register content */
13213 		save_val = tr32(offset);
13214 
13215 		/* Determine the read-only value. */
13216 		read_val = save_val & read_mask;
13217 
13218 		/* Write zero to the register, then make sure the read-only bits
13219 		 * are not changed and the read/write bits are all zeros.
13220 		 */
13221 		tw32(offset, 0);
13222 
13223 		val = tr32(offset);
13224 
13225 		/* Test the read-only and read/write bits. */
13226 		if (((val & read_mask) != read_val) || (val & write_mask))
13227 			goto out;
13228 
13229 		/* Write ones to all the bits defined by RdMask and WrMask, then
13230 		 * make sure the read-only bits are not changed and the
13231 		 * read/write bits are all ones.
13232 		 */
13233 		tw32(offset, read_mask | write_mask);
13234 
13235 		val = tr32(offset);
13236 
13237 		/* Test the read-only bits. */
13238 		if ((val & read_mask) != read_val)
13239 			goto out;
13240 
13241 		/* Test the read/write bits. */
13242 		if ((val & write_mask) != write_mask)
13243 			goto out;
13244 
13245 		tw32(offset, save_val);
13246 	}
13247 
13248 	return 0;
13249 
13250 out:
13251 	if (netif_msg_hw(tp))
13252 		netdev_err(tp->dev,
13253 			   "Register test failed at offset %x\n", offset);
13254 	tw32(offset, save_val);
13255 	return -EIO;
13256 }
13257 
13258 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13259 {
13260 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13261 	int i;
13262 	u32 j;
13263 
13264 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13265 		for (j = 0; j < len; j += 4) {
13266 			u32 val;
13267 
13268 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13269 			tg3_read_mem(tp, offset + j, &val);
13270 			if (val != test_pattern[i])
13271 				return -EIO;
13272 		}
13273 	}
13274 	return 0;
13275 }
13276 
13277 static int tg3_test_memory(struct tg3 *tp)
13278 {
13279 	static struct mem_entry {
13280 		u32 offset;
13281 		u32 len;
13282 	} mem_tbl_570x[] = {
13283 		{ 0x00000000, 0x00b50},
13284 		{ 0x00002000, 0x1c000},
13285 		{ 0xffffffff, 0x00000}
13286 	}, mem_tbl_5705[] = {
13287 		{ 0x00000100, 0x0000c},
13288 		{ 0x00000200, 0x00008},
13289 		{ 0x00004000, 0x00800},
13290 		{ 0x00006000, 0x01000},
13291 		{ 0x00008000, 0x02000},
13292 		{ 0x00010000, 0x0e000},
13293 		{ 0xffffffff, 0x00000}
13294 	}, mem_tbl_5755[] = {
13295 		{ 0x00000200, 0x00008},
13296 		{ 0x00004000, 0x00800},
13297 		{ 0x00006000, 0x00800},
13298 		{ 0x00008000, 0x02000},
13299 		{ 0x00010000, 0x0c000},
13300 		{ 0xffffffff, 0x00000}
13301 	}, mem_tbl_5906[] = {
13302 		{ 0x00000200, 0x00008},
13303 		{ 0x00004000, 0x00400},
13304 		{ 0x00006000, 0x00400},
13305 		{ 0x00008000, 0x01000},
13306 		{ 0x00010000, 0x01000},
13307 		{ 0xffffffff, 0x00000}
13308 	}, mem_tbl_5717[] = {
13309 		{ 0x00000200, 0x00008},
13310 		{ 0x00010000, 0x0a000},
13311 		{ 0x00020000, 0x13c00},
13312 		{ 0xffffffff, 0x00000}
13313 	}, mem_tbl_57765[] = {
13314 		{ 0x00000200, 0x00008},
13315 		{ 0x00004000, 0x00800},
13316 		{ 0x00006000, 0x09800},
13317 		{ 0x00010000, 0x0a000},
13318 		{ 0xffffffff, 0x00000}
13319 	};
13320 	struct mem_entry *mem_tbl;
13321 	int err = 0;
13322 	int i;
13323 
13324 	if (tg3_flag(tp, 5717_PLUS))
13325 		mem_tbl = mem_tbl_5717;
13326 	else if (tg3_flag(tp, 57765_CLASS) ||
13327 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13328 		mem_tbl = mem_tbl_57765;
13329 	else if (tg3_flag(tp, 5755_PLUS))
13330 		mem_tbl = mem_tbl_5755;
13331 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13332 		mem_tbl = mem_tbl_5906;
13333 	else if (tg3_flag(tp, 5705_PLUS))
13334 		mem_tbl = mem_tbl_5705;
13335 	else
13336 		mem_tbl = mem_tbl_570x;
13337 
13338 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13339 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13340 		if (err)
13341 			break;
13342 	}
13343 
13344 	return err;
13345 }
13346 
13347 #define TG3_TSO_MSS		500
13348 
13349 #define TG3_TSO_IP_HDR_LEN	20
13350 #define TG3_TSO_TCP_HDR_LEN	20
13351 #define TG3_TSO_TCP_OPT_LEN	12
13352 
13353 static const u8 tg3_tso_header[] = {
13354 0x08, 0x00,
13355 0x45, 0x00, 0x00, 0x00,
13356 0x00, 0x00, 0x40, 0x00,
13357 0x40, 0x06, 0x00, 0x00,
13358 0x0a, 0x00, 0x00, 0x01,
13359 0x0a, 0x00, 0x00, 0x02,
13360 0x0d, 0x00, 0xe0, 0x00,
13361 0x00, 0x00, 0x01, 0x00,
13362 0x00, 0x00, 0x02, 0x00,
13363 0x80, 0x10, 0x10, 0x00,
13364 0x14, 0x09, 0x00, 0x00,
13365 0x01, 0x01, 0x08, 0x0a,
13366 0x11, 0x11, 0x11, 0x11,
13367 0x11, 0x11, 0x11, 0x11,
13368 };
13369 
13370 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13371 {
13372 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13373 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13374 	u32 budget;
13375 	struct sk_buff *skb;
13376 	u8 *tx_data, *rx_data;
13377 	dma_addr_t map;
13378 	int num_pkts, tx_len, rx_len, i, err;
13379 	struct tg3_rx_buffer_desc *desc;
13380 	struct tg3_napi *tnapi, *rnapi;
13381 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13382 
13383 	tnapi = &tp->napi[0];
13384 	rnapi = &tp->napi[0];
13385 	if (tp->irq_cnt > 1) {
13386 		if (tg3_flag(tp, ENABLE_RSS))
13387 			rnapi = &tp->napi[1];
13388 		if (tg3_flag(tp, ENABLE_TSS))
13389 			tnapi = &tp->napi[1];
13390 	}
13391 	coal_now = tnapi->coal_now | rnapi->coal_now;
13392 
13393 	err = -EIO;
13394 
13395 	tx_len = pktsz;
13396 	skb = netdev_alloc_skb(tp->dev, tx_len);
13397 	if (!skb)
13398 		return -ENOMEM;
13399 
13400 	tx_data = skb_put(skb, tx_len);
13401 	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13402 	memset(tx_data + ETH_ALEN, 0x0, 8);
13403 
13404 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13405 
13406 	if (tso_loopback) {
13407 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13408 
13409 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13410 			      TG3_TSO_TCP_OPT_LEN;
13411 
13412 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13413 		       sizeof(tg3_tso_header));
13414 		mss = TG3_TSO_MSS;
13415 
13416 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13417 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13418 
13419 		/* Set the total length field in the IP header */
13420 		iph->tot_len = htons((u16)(mss + hdr_len));
13421 
13422 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13423 			      TXD_FLAG_CPU_POST_DMA);
13424 
13425 		if (tg3_flag(tp, HW_TSO_1) ||
13426 		    tg3_flag(tp, HW_TSO_2) ||
13427 		    tg3_flag(tp, HW_TSO_3)) {
13428 			struct tcphdr *th;
13429 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13430 			th = (struct tcphdr *)&tx_data[val];
13431 			th->check = 0;
13432 		} else
13433 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13434 
13435 		if (tg3_flag(tp, HW_TSO_3)) {
13436 			mss |= (hdr_len & 0xc) << 12;
13437 			if (hdr_len & 0x10)
13438 				base_flags |= 0x00000010;
13439 			base_flags |= (hdr_len & 0x3e0) << 5;
13440 		} else if (tg3_flag(tp, HW_TSO_2))
13441 			mss |= hdr_len << 9;
13442 		else if (tg3_flag(tp, HW_TSO_1) ||
13443 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13444 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13445 		} else {
13446 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13447 		}
13448 
13449 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13450 	} else {
13451 		num_pkts = 1;
13452 		data_off = ETH_HLEN;
13453 
13454 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13455 		    tx_len > VLAN_ETH_FRAME_LEN)
13456 			base_flags |= TXD_FLAG_JMB_PKT;
13457 	}
13458 
13459 	for (i = data_off; i < tx_len; i++)
13460 		tx_data[i] = (u8) (i & 0xff);
13461 
13462 	map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13463 	if (dma_mapping_error(&tp->pdev->dev, map)) {
13464 		dev_kfree_skb(skb);
13465 		return -EIO;
13466 	}
13467 
13468 	val = tnapi->tx_prod;
13469 	tnapi->tx_buffers[val].skb = skb;
13470 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13471 
13472 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13473 	       rnapi->coal_now);
13474 
13475 	udelay(10);
13476 
13477 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13478 
13479 	budget = tg3_tx_avail(tnapi);
13480 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13481 			    base_flags | TXD_FLAG_END, mss, 0)) {
13482 		tnapi->tx_buffers[val].skb = NULL;
13483 		dev_kfree_skb(skb);
13484 		return -EIO;
13485 	}
13486 
13487 	tnapi->tx_prod++;
13488 
13489 	/* Sync BD data before updating mailbox */
13490 	wmb();
13491 
13492 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13493 	tr32_mailbox(tnapi->prodmbox);
13494 
13495 	udelay(10);
13496 
13497 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13498 	for (i = 0; i < 35; i++) {
13499 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13500 		       coal_now);
13501 
13502 		udelay(10);
13503 
13504 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13505 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13506 		if ((tx_idx == tnapi->tx_prod) &&
13507 		    (rx_idx == (rx_start_idx + num_pkts)))
13508 			break;
13509 	}
13510 
13511 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13512 	dev_kfree_skb(skb);
13513 
13514 	if (tx_idx != tnapi->tx_prod)
13515 		goto out;
13516 
13517 	if (rx_idx != rx_start_idx + num_pkts)
13518 		goto out;
13519 
13520 	val = data_off;
13521 	while (rx_idx != rx_start_idx) {
13522 		desc = &rnapi->rx_rcb[rx_start_idx++];
13523 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13524 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13525 
13526 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13527 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13528 			goto out;
13529 
13530 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13531 			 - ETH_FCS_LEN;
13532 
13533 		if (!tso_loopback) {
13534 			if (rx_len != tx_len)
13535 				goto out;
13536 
13537 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13538 				if (opaque_key != RXD_OPAQUE_RING_STD)
13539 					goto out;
13540 			} else {
13541 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13542 					goto out;
13543 			}
13544 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13545 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13546 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13547 			goto out;
13548 		}
13549 
13550 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13551 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13552 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13553 					     mapping);
13554 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13555 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13556 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13557 					     mapping);
13558 		} else
13559 			goto out;
13560 
13561 		dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13562 					DMA_FROM_DEVICE);
13563 
13564 		rx_data += TG3_RX_OFFSET(tp);
13565 		for (i = data_off; i < rx_len; i++, val++) {
13566 			if (*(rx_data + i) != (u8) (val & 0xff))
13567 				goto out;
13568 		}
13569 	}
13570 
13571 	err = 0;
13572 
13573 	/* tg3_free_rings will unmap and free the rx_data */
13574 out:
13575 	return err;
13576 }
13577 
13578 #define TG3_STD_LOOPBACK_FAILED		1
13579 #define TG3_JMB_LOOPBACK_FAILED		2
13580 #define TG3_TSO_LOOPBACK_FAILED		4
13581 #define TG3_LOOPBACK_FAILED \
13582 	(TG3_STD_LOOPBACK_FAILED | \
13583 	 TG3_JMB_LOOPBACK_FAILED | \
13584 	 TG3_TSO_LOOPBACK_FAILED)
13585 
13586 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13587 {
13588 	int err = -EIO;
13589 	u32 eee_cap;
13590 	u32 jmb_pkt_sz = 9000;
13591 
13592 	if (tp->dma_limit)
13593 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13594 
13595 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13596 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13597 
13598 	if (!netif_running(tp->dev)) {
13599 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13600 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13601 		if (do_extlpbk)
13602 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13603 		goto done;
13604 	}
13605 
13606 	err = tg3_reset_hw(tp, true);
13607 	if (err) {
13608 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13609 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13610 		if (do_extlpbk)
13611 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13612 		goto done;
13613 	}
13614 
13615 	if (tg3_flag(tp, ENABLE_RSS)) {
13616 		int i;
13617 
13618 		/* Reroute all rx packets to the 1st queue */
13619 		for (i = MAC_RSS_INDIR_TBL_0;
13620 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13621 			tw32(i, 0x0);
13622 	}
13623 
13624 	/* HW errata - mac loopback fails in some cases on 5780.
13625 	 * Normal traffic and PHY loopback are not affected by
13626 	 * errata.  Also, the MAC loopback test is deprecated for
13627 	 * all newer ASIC revisions.
13628 	 */
13629 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13630 	    !tg3_flag(tp, CPMU_PRESENT)) {
13631 		tg3_mac_loopback(tp, true);
13632 
13633 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13634 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13635 
13636 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13637 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13638 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13639 
13640 		tg3_mac_loopback(tp, false);
13641 	}
13642 
13643 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13644 	    !tg3_flag(tp, USE_PHYLIB)) {
13645 		int i;
13646 
13647 		tg3_phy_lpbk_set(tp, 0, false);
13648 
13649 		/* Wait for link */
13650 		for (i = 0; i < 100; i++) {
13651 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13652 				break;
13653 			mdelay(1);
13654 		}
13655 
13656 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13657 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13658 		if (tg3_flag(tp, TSO_CAPABLE) &&
13659 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13660 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13661 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13662 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13663 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13664 
13665 		if (do_extlpbk) {
13666 			tg3_phy_lpbk_set(tp, 0, true);
13667 
13668 			/* All link indications report up, but the hardware
13669 			 * isn't really ready for about 20 msec.  Double it
13670 			 * to be sure.
13671 			 */
13672 			mdelay(40);
13673 
13674 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13675 				data[TG3_EXT_LOOPB_TEST] |=
13676 							TG3_STD_LOOPBACK_FAILED;
13677 			if (tg3_flag(tp, TSO_CAPABLE) &&
13678 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13679 				data[TG3_EXT_LOOPB_TEST] |=
13680 							TG3_TSO_LOOPBACK_FAILED;
13681 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13682 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13683 				data[TG3_EXT_LOOPB_TEST] |=
13684 							TG3_JMB_LOOPBACK_FAILED;
13685 		}
13686 
13687 		/* Re-enable gphy autopowerdown. */
13688 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13689 			tg3_phy_toggle_apd(tp, true);
13690 	}
13691 
13692 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13693 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13694 
13695 done:
13696 	tp->phy_flags |= eee_cap;
13697 
13698 	return err;
13699 }
13700 
13701 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13702 			  u64 *data)
13703 {
13704 	struct tg3 *tp = netdev_priv(dev);
13705 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13706 
13707 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13708 		if (tg3_power_up(tp)) {
13709 			etest->flags |= ETH_TEST_FL_FAILED;
13710 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13711 			return;
13712 		}
13713 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13714 	}
13715 
13716 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13717 
13718 	if (tg3_test_nvram(tp) != 0) {
13719 		etest->flags |= ETH_TEST_FL_FAILED;
13720 		data[TG3_NVRAM_TEST] = 1;
13721 	}
13722 	if (!doextlpbk && tg3_test_link(tp)) {
13723 		etest->flags |= ETH_TEST_FL_FAILED;
13724 		data[TG3_LINK_TEST] = 1;
13725 	}
13726 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13727 		int err, err2 = 0, irq_sync = 0;
13728 
13729 		if (netif_running(dev)) {
13730 			tg3_phy_stop(tp);
13731 			tg3_netif_stop(tp);
13732 			irq_sync = 1;
13733 		}
13734 
13735 		tg3_full_lock(tp, irq_sync);
13736 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13737 		err = tg3_nvram_lock(tp);
13738 		tg3_halt_cpu(tp, RX_CPU_BASE);
13739 		if (!tg3_flag(tp, 5705_PLUS))
13740 			tg3_halt_cpu(tp, TX_CPU_BASE);
13741 		if (!err)
13742 			tg3_nvram_unlock(tp);
13743 
13744 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13745 			tg3_phy_reset(tp);
13746 
13747 		if (tg3_test_registers(tp) != 0) {
13748 			etest->flags |= ETH_TEST_FL_FAILED;
13749 			data[TG3_REGISTER_TEST] = 1;
13750 		}
13751 
13752 		if (tg3_test_memory(tp) != 0) {
13753 			etest->flags |= ETH_TEST_FL_FAILED;
13754 			data[TG3_MEMORY_TEST] = 1;
13755 		}
13756 
13757 		if (doextlpbk)
13758 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13759 
13760 		if (tg3_test_loopback(tp, data, doextlpbk))
13761 			etest->flags |= ETH_TEST_FL_FAILED;
13762 
13763 		tg3_full_unlock(tp);
13764 
13765 		if (tg3_test_interrupt(tp) != 0) {
13766 			etest->flags |= ETH_TEST_FL_FAILED;
13767 			data[TG3_INTERRUPT_TEST] = 1;
13768 		}
13769 
13770 		tg3_full_lock(tp, 0);
13771 
13772 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13773 		if (netif_running(dev)) {
13774 			tg3_flag_set(tp, INIT_COMPLETE);
13775 			err2 = tg3_restart_hw(tp, true);
13776 			if (!err2)
13777 				tg3_netif_start(tp);
13778 		}
13779 
13780 		tg3_full_unlock(tp);
13781 
13782 		if (irq_sync && !err2)
13783 			tg3_phy_start(tp);
13784 	}
13785 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13786 		tg3_power_down_prepare(tp);
13787 
13788 }
13789 
13790 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13791 {
13792 	struct tg3 *tp = netdev_priv(dev);
13793 	struct hwtstamp_config stmpconf;
13794 
13795 	if (!tg3_flag(tp, PTP_CAPABLE))
13796 		return -EOPNOTSUPP;
13797 
13798 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13799 		return -EFAULT;
13800 
13801 	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13802 	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13803 		return -ERANGE;
13804 
13805 	switch (stmpconf.rx_filter) {
13806 	case HWTSTAMP_FILTER_NONE:
13807 		tp->rxptpctl = 0;
13808 		break;
13809 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13810 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13811 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13812 		break;
13813 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13814 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13815 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13816 		break;
13817 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13818 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13819 			       TG3_RX_PTP_CTL_DELAY_REQ;
13820 		break;
13821 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13822 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13823 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13824 		break;
13825 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13826 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13827 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13828 		break;
13829 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13830 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13831 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13832 		break;
13833 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13834 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13835 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13836 		break;
13837 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13838 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13839 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13840 		break;
13841 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13842 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13843 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13844 		break;
13845 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13846 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13847 			       TG3_RX_PTP_CTL_DELAY_REQ;
13848 		break;
13849 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13850 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13851 			       TG3_RX_PTP_CTL_DELAY_REQ;
13852 		break;
13853 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13854 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13855 			       TG3_RX_PTP_CTL_DELAY_REQ;
13856 		break;
13857 	default:
13858 		return -ERANGE;
13859 	}
13860 
13861 	if (netif_running(dev) && tp->rxptpctl)
13862 		tw32(TG3_RX_PTP_CTL,
13863 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13864 
13865 	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13866 		tg3_flag_set(tp, TX_TSTAMP_EN);
13867 	else
13868 		tg3_flag_clear(tp, TX_TSTAMP_EN);
13869 
13870 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13871 		-EFAULT : 0;
13872 }
13873 
13874 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13875 {
13876 	struct tg3 *tp = netdev_priv(dev);
13877 	struct hwtstamp_config stmpconf;
13878 
13879 	if (!tg3_flag(tp, PTP_CAPABLE))
13880 		return -EOPNOTSUPP;
13881 
13882 	stmpconf.flags = 0;
13883 	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13884 			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13885 
13886 	switch (tp->rxptpctl) {
13887 	case 0:
13888 		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13889 		break;
13890 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13891 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13892 		break;
13893 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13894 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13895 		break;
13896 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13897 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13898 		break;
13899 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13900 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13901 		break;
13902 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13903 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13904 		break;
13905 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13906 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13907 		break;
13908 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13909 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13910 		break;
13911 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13912 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13913 		break;
13914 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13915 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13916 		break;
13917 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13918 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13919 		break;
13920 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13921 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13922 		break;
13923 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13924 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13925 		break;
13926 	default:
13927 		WARN_ON_ONCE(1);
13928 		return -ERANGE;
13929 	}
13930 
13931 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13932 		-EFAULT : 0;
13933 }
13934 
13935 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13936 {
13937 	struct mii_ioctl_data *data = if_mii(ifr);
13938 	struct tg3 *tp = netdev_priv(dev);
13939 	int err;
13940 
13941 	if (tg3_flag(tp, USE_PHYLIB)) {
13942 		struct phy_device *phydev;
13943 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13944 			return -EAGAIN;
13945 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13946 		return phy_mii_ioctl(phydev, ifr, cmd);
13947 	}
13948 
13949 	switch (cmd) {
13950 	case SIOCGMIIPHY:
13951 		data->phy_id = tp->phy_addr;
13952 
13953 		fallthrough;
13954 	case SIOCGMIIREG: {
13955 		u32 mii_regval;
13956 
13957 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13958 			break;			/* We have no PHY */
13959 
13960 		if (!netif_running(dev))
13961 			return -EAGAIN;
13962 
13963 		spin_lock_bh(&tp->lock);
13964 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
13965 				    data->reg_num & 0x1f, &mii_regval);
13966 		spin_unlock_bh(&tp->lock);
13967 
13968 		data->val_out = mii_regval;
13969 
13970 		return err;
13971 	}
13972 
13973 	case SIOCSMIIREG:
13974 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13975 			break;			/* We have no PHY */
13976 
13977 		if (!netif_running(dev))
13978 			return -EAGAIN;
13979 
13980 		spin_lock_bh(&tp->lock);
13981 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
13982 				     data->reg_num & 0x1f, data->val_in);
13983 		spin_unlock_bh(&tp->lock);
13984 
13985 		return err;
13986 
13987 	case SIOCSHWTSTAMP:
13988 		return tg3_hwtstamp_set(dev, ifr);
13989 
13990 	case SIOCGHWTSTAMP:
13991 		return tg3_hwtstamp_get(dev, ifr);
13992 
13993 	default:
13994 		/* do nothing */
13995 		break;
13996 	}
13997 	return -EOPNOTSUPP;
13998 }
13999 
14000 static int tg3_get_coalesce(struct net_device *dev,
14001 			    struct ethtool_coalesce *ec,
14002 			    struct kernel_ethtool_coalesce *kernel_coal,
14003 			    struct netlink_ext_ack *extack)
14004 {
14005 	struct tg3 *tp = netdev_priv(dev);
14006 
14007 	memcpy(ec, &tp->coal, sizeof(*ec));
14008 	return 0;
14009 }
14010 
14011 static int tg3_set_coalesce(struct net_device *dev,
14012 			    struct ethtool_coalesce *ec,
14013 			    struct kernel_ethtool_coalesce *kernel_coal,
14014 			    struct netlink_ext_ack *extack)
14015 {
14016 	struct tg3 *tp = netdev_priv(dev);
14017 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14018 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14019 
14020 	if (!tg3_flag(tp, 5705_PLUS)) {
14021 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14022 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14023 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14024 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14025 	}
14026 
14027 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14028 	    (!ec->rx_coalesce_usecs) ||
14029 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14030 	    (!ec->tx_coalesce_usecs) ||
14031 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14032 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14033 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14034 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14035 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14036 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14037 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14038 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14039 		return -EINVAL;
14040 
14041 	/* Only copy relevant parameters, ignore all others. */
14042 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14043 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14044 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14045 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14046 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14047 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14048 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14049 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14050 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14051 
14052 	if (netif_running(dev)) {
14053 		tg3_full_lock(tp, 0);
14054 		__tg3_set_coalesce(tp, &tp->coal);
14055 		tg3_full_unlock(tp);
14056 	}
14057 	return 0;
14058 }
14059 
14060 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14061 {
14062 	struct tg3 *tp = netdev_priv(dev);
14063 
14064 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14065 		netdev_warn(tp->dev, "Board does not support EEE!\n");
14066 		return -EOPNOTSUPP;
14067 	}
14068 
14069 	if (edata->advertised != tp->eee.advertised) {
14070 		netdev_warn(tp->dev,
14071 			    "Direct manipulation of EEE advertisement is not supported\n");
14072 		return -EINVAL;
14073 	}
14074 
14075 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14076 		netdev_warn(tp->dev,
14077 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
14078 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14079 		return -EINVAL;
14080 	}
14081 
14082 	tp->eee = *edata;
14083 
14084 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14085 	tg3_warn_mgmt_link_flap(tp);
14086 
14087 	if (netif_running(tp->dev)) {
14088 		tg3_full_lock(tp, 0);
14089 		tg3_setup_eee(tp);
14090 		tg3_phy_reset(tp);
14091 		tg3_full_unlock(tp);
14092 	}
14093 
14094 	return 0;
14095 }
14096 
14097 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14098 {
14099 	struct tg3 *tp = netdev_priv(dev);
14100 
14101 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14102 		netdev_warn(tp->dev,
14103 			    "Board does not support EEE!\n");
14104 		return -EOPNOTSUPP;
14105 	}
14106 
14107 	*edata = tp->eee;
14108 	return 0;
14109 }
14110 
14111 static const struct ethtool_ops tg3_ethtool_ops = {
14112 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14113 				     ETHTOOL_COALESCE_MAX_FRAMES |
14114 				     ETHTOOL_COALESCE_USECS_IRQ |
14115 				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14116 				     ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14117 	.get_drvinfo		= tg3_get_drvinfo,
14118 	.get_regs_len		= tg3_get_regs_len,
14119 	.get_regs		= tg3_get_regs,
14120 	.get_wol		= tg3_get_wol,
14121 	.set_wol		= tg3_set_wol,
14122 	.get_msglevel		= tg3_get_msglevel,
14123 	.set_msglevel		= tg3_set_msglevel,
14124 	.nway_reset		= tg3_nway_reset,
14125 	.get_link		= ethtool_op_get_link,
14126 	.get_eeprom_len		= tg3_get_eeprom_len,
14127 	.get_eeprom		= tg3_get_eeprom,
14128 	.set_eeprom		= tg3_set_eeprom,
14129 	.get_ringparam		= tg3_get_ringparam,
14130 	.set_ringparam		= tg3_set_ringparam,
14131 	.get_pauseparam		= tg3_get_pauseparam,
14132 	.set_pauseparam		= tg3_set_pauseparam,
14133 	.self_test		= tg3_self_test,
14134 	.get_strings		= tg3_get_strings,
14135 	.set_phys_id		= tg3_set_phys_id,
14136 	.get_ethtool_stats	= tg3_get_ethtool_stats,
14137 	.get_coalesce		= tg3_get_coalesce,
14138 	.set_coalesce		= tg3_set_coalesce,
14139 	.get_sset_count		= tg3_get_sset_count,
14140 	.get_rxnfc		= tg3_get_rxnfc,
14141 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14142 	.get_rxfh		= tg3_get_rxfh,
14143 	.set_rxfh		= tg3_set_rxfh,
14144 	.get_channels		= tg3_get_channels,
14145 	.set_channels		= tg3_set_channels,
14146 	.get_ts_info		= tg3_get_ts_info,
14147 	.get_eee		= tg3_get_eee,
14148 	.set_eee		= tg3_set_eee,
14149 	.get_link_ksettings	= tg3_get_link_ksettings,
14150 	.set_link_ksettings	= tg3_set_link_ksettings,
14151 };
14152 
14153 static void tg3_get_stats64(struct net_device *dev,
14154 			    struct rtnl_link_stats64 *stats)
14155 {
14156 	struct tg3 *tp = netdev_priv(dev);
14157 
14158 	spin_lock_bh(&tp->lock);
14159 	if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14160 		*stats = tp->net_stats_prev;
14161 		spin_unlock_bh(&tp->lock);
14162 		return;
14163 	}
14164 
14165 	tg3_get_nstats(tp, stats);
14166 	spin_unlock_bh(&tp->lock);
14167 }
14168 
14169 static void tg3_set_rx_mode(struct net_device *dev)
14170 {
14171 	struct tg3 *tp = netdev_priv(dev);
14172 
14173 	if (!netif_running(dev))
14174 		return;
14175 
14176 	tg3_full_lock(tp, 0);
14177 	__tg3_set_rx_mode(dev);
14178 	tg3_full_unlock(tp);
14179 }
14180 
14181 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14182 			       int new_mtu)
14183 {
14184 	dev->mtu = new_mtu;
14185 
14186 	if (new_mtu > ETH_DATA_LEN) {
14187 		if (tg3_flag(tp, 5780_CLASS)) {
14188 			netdev_update_features(dev);
14189 			tg3_flag_clear(tp, TSO_CAPABLE);
14190 		} else {
14191 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
14192 		}
14193 	} else {
14194 		if (tg3_flag(tp, 5780_CLASS)) {
14195 			tg3_flag_set(tp, TSO_CAPABLE);
14196 			netdev_update_features(dev);
14197 		}
14198 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14199 	}
14200 }
14201 
14202 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14203 {
14204 	struct tg3 *tp = netdev_priv(dev);
14205 	int err;
14206 	bool reset_phy = false;
14207 
14208 	if (!netif_running(dev)) {
14209 		/* We'll just catch it later when the
14210 		 * device is up'd.
14211 		 */
14212 		tg3_set_mtu(dev, tp, new_mtu);
14213 		return 0;
14214 	}
14215 
14216 	tg3_phy_stop(tp);
14217 
14218 	tg3_netif_stop(tp);
14219 
14220 	tg3_set_mtu(dev, tp, new_mtu);
14221 
14222 	tg3_full_lock(tp, 1);
14223 
14224 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14225 
14226 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
14227 	 * breaks all requests to 256 bytes.
14228 	 */
14229 	if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14230 	    tg3_asic_rev(tp) == ASIC_REV_5717 ||
14231 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
14232 	    tg3_asic_rev(tp) == ASIC_REV_5720)
14233 		reset_phy = true;
14234 
14235 	err = tg3_restart_hw(tp, reset_phy);
14236 
14237 	if (!err)
14238 		tg3_netif_start(tp);
14239 
14240 	tg3_full_unlock(tp);
14241 
14242 	if (!err)
14243 		tg3_phy_start(tp);
14244 
14245 	return err;
14246 }
14247 
14248 static const struct net_device_ops tg3_netdev_ops = {
14249 	.ndo_open		= tg3_open,
14250 	.ndo_stop		= tg3_close,
14251 	.ndo_start_xmit		= tg3_start_xmit,
14252 	.ndo_get_stats64	= tg3_get_stats64,
14253 	.ndo_validate_addr	= eth_validate_addr,
14254 	.ndo_set_rx_mode	= tg3_set_rx_mode,
14255 	.ndo_set_mac_address	= tg3_set_mac_addr,
14256 	.ndo_eth_ioctl		= tg3_ioctl,
14257 	.ndo_tx_timeout		= tg3_tx_timeout,
14258 	.ndo_change_mtu		= tg3_change_mtu,
14259 	.ndo_fix_features	= tg3_fix_features,
14260 	.ndo_set_features	= tg3_set_features,
14261 #ifdef CONFIG_NET_POLL_CONTROLLER
14262 	.ndo_poll_controller	= tg3_poll_controller,
14263 #endif
14264 };
14265 
14266 static void tg3_get_eeprom_size(struct tg3 *tp)
14267 {
14268 	u32 cursize, val, magic;
14269 
14270 	tp->nvram_size = EEPROM_CHIP_SIZE;
14271 
14272 	if (tg3_nvram_read(tp, 0, &magic) != 0)
14273 		return;
14274 
14275 	if ((magic != TG3_EEPROM_MAGIC) &&
14276 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14277 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14278 		return;
14279 
14280 	/*
14281 	 * Size the chip by reading offsets at increasing powers of two.
14282 	 * When we encounter our validation signature, we know the addressing
14283 	 * has wrapped around, and thus have our chip size.
14284 	 */
14285 	cursize = 0x10;
14286 
14287 	while (cursize < tp->nvram_size) {
14288 		if (tg3_nvram_read(tp, cursize, &val) != 0)
14289 			return;
14290 
14291 		if (val == magic)
14292 			break;
14293 
14294 		cursize <<= 1;
14295 	}
14296 
14297 	tp->nvram_size = cursize;
14298 }
14299 
14300 static void tg3_get_nvram_size(struct tg3 *tp)
14301 {
14302 	u32 val;
14303 
14304 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14305 		return;
14306 
14307 	/* Selfboot format */
14308 	if (val != TG3_EEPROM_MAGIC) {
14309 		tg3_get_eeprom_size(tp);
14310 		return;
14311 	}
14312 
14313 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14314 		if (val != 0) {
14315 			/* This is confusing.  We want to operate on the
14316 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14317 			 * call will read from NVRAM and byteswap the data
14318 			 * according to the byteswapping settings for all
14319 			 * other register accesses.  This ensures the data we
14320 			 * want will always reside in the lower 16-bits.
14321 			 * However, the data in NVRAM is in LE format, which
14322 			 * means the data from the NVRAM read will always be
14323 			 * opposite the endianness of the CPU.  The 16-bit
14324 			 * byteswap then brings the data to CPU endianness.
14325 			 */
14326 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14327 			return;
14328 		}
14329 	}
14330 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14331 }
14332 
14333 static void tg3_get_nvram_info(struct tg3 *tp)
14334 {
14335 	u32 nvcfg1;
14336 
14337 	nvcfg1 = tr32(NVRAM_CFG1);
14338 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14339 		tg3_flag_set(tp, FLASH);
14340 	} else {
14341 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14342 		tw32(NVRAM_CFG1, nvcfg1);
14343 	}
14344 
14345 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14346 	    tg3_flag(tp, 5780_CLASS)) {
14347 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14348 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14349 			tp->nvram_jedecnum = JEDEC_ATMEL;
14350 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14351 			tg3_flag_set(tp, NVRAM_BUFFERED);
14352 			break;
14353 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14354 			tp->nvram_jedecnum = JEDEC_ATMEL;
14355 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14356 			break;
14357 		case FLASH_VENDOR_ATMEL_EEPROM:
14358 			tp->nvram_jedecnum = JEDEC_ATMEL;
14359 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14360 			tg3_flag_set(tp, NVRAM_BUFFERED);
14361 			break;
14362 		case FLASH_VENDOR_ST:
14363 			tp->nvram_jedecnum = JEDEC_ST;
14364 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14365 			tg3_flag_set(tp, NVRAM_BUFFERED);
14366 			break;
14367 		case FLASH_VENDOR_SAIFUN:
14368 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14369 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14370 			break;
14371 		case FLASH_VENDOR_SST_SMALL:
14372 		case FLASH_VENDOR_SST_LARGE:
14373 			tp->nvram_jedecnum = JEDEC_SST;
14374 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14375 			break;
14376 		}
14377 	} else {
14378 		tp->nvram_jedecnum = JEDEC_ATMEL;
14379 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14380 		tg3_flag_set(tp, NVRAM_BUFFERED);
14381 	}
14382 }
14383 
14384 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14385 {
14386 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14387 	case FLASH_5752PAGE_SIZE_256:
14388 		tp->nvram_pagesize = 256;
14389 		break;
14390 	case FLASH_5752PAGE_SIZE_512:
14391 		tp->nvram_pagesize = 512;
14392 		break;
14393 	case FLASH_5752PAGE_SIZE_1K:
14394 		tp->nvram_pagesize = 1024;
14395 		break;
14396 	case FLASH_5752PAGE_SIZE_2K:
14397 		tp->nvram_pagesize = 2048;
14398 		break;
14399 	case FLASH_5752PAGE_SIZE_4K:
14400 		tp->nvram_pagesize = 4096;
14401 		break;
14402 	case FLASH_5752PAGE_SIZE_264:
14403 		tp->nvram_pagesize = 264;
14404 		break;
14405 	case FLASH_5752PAGE_SIZE_528:
14406 		tp->nvram_pagesize = 528;
14407 		break;
14408 	}
14409 }
14410 
14411 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14412 {
14413 	u32 nvcfg1;
14414 
14415 	nvcfg1 = tr32(NVRAM_CFG1);
14416 
14417 	/* NVRAM protection for TPM */
14418 	if (nvcfg1 & (1 << 27))
14419 		tg3_flag_set(tp, PROTECTED_NVRAM);
14420 
14421 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14422 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14423 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14424 		tp->nvram_jedecnum = JEDEC_ATMEL;
14425 		tg3_flag_set(tp, NVRAM_BUFFERED);
14426 		break;
14427 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14428 		tp->nvram_jedecnum = JEDEC_ATMEL;
14429 		tg3_flag_set(tp, NVRAM_BUFFERED);
14430 		tg3_flag_set(tp, FLASH);
14431 		break;
14432 	case FLASH_5752VENDOR_ST_M45PE10:
14433 	case FLASH_5752VENDOR_ST_M45PE20:
14434 	case FLASH_5752VENDOR_ST_M45PE40:
14435 		tp->nvram_jedecnum = JEDEC_ST;
14436 		tg3_flag_set(tp, NVRAM_BUFFERED);
14437 		tg3_flag_set(tp, FLASH);
14438 		break;
14439 	}
14440 
14441 	if (tg3_flag(tp, FLASH)) {
14442 		tg3_nvram_get_pagesize(tp, nvcfg1);
14443 	} else {
14444 		/* For eeprom, set pagesize to maximum eeprom size */
14445 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14446 
14447 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14448 		tw32(NVRAM_CFG1, nvcfg1);
14449 	}
14450 }
14451 
14452 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14453 {
14454 	u32 nvcfg1, protect = 0;
14455 
14456 	nvcfg1 = tr32(NVRAM_CFG1);
14457 
14458 	/* NVRAM protection for TPM */
14459 	if (nvcfg1 & (1 << 27)) {
14460 		tg3_flag_set(tp, PROTECTED_NVRAM);
14461 		protect = 1;
14462 	}
14463 
14464 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14465 	switch (nvcfg1) {
14466 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14467 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14468 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14469 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14470 		tp->nvram_jedecnum = JEDEC_ATMEL;
14471 		tg3_flag_set(tp, NVRAM_BUFFERED);
14472 		tg3_flag_set(tp, FLASH);
14473 		tp->nvram_pagesize = 264;
14474 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14475 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14476 			tp->nvram_size = (protect ? 0x3e200 :
14477 					  TG3_NVRAM_SIZE_512KB);
14478 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14479 			tp->nvram_size = (protect ? 0x1f200 :
14480 					  TG3_NVRAM_SIZE_256KB);
14481 		else
14482 			tp->nvram_size = (protect ? 0x1f200 :
14483 					  TG3_NVRAM_SIZE_128KB);
14484 		break;
14485 	case FLASH_5752VENDOR_ST_M45PE10:
14486 	case FLASH_5752VENDOR_ST_M45PE20:
14487 	case FLASH_5752VENDOR_ST_M45PE40:
14488 		tp->nvram_jedecnum = JEDEC_ST;
14489 		tg3_flag_set(tp, NVRAM_BUFFERED);
14490 		tg3_flag_set(tp, FLASH);
14491 		tp->nvram_pagesize = 256;
14492 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14493 			tp->nvram_size = (protect ?
14494 					  TG3_NVRAM_SIZE_64KB :
14495 					  TG3_NVRAM_SIZE_128KB);
14496 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14497 			tp->nvram_size = (protect ?
14498 					  TG3_NVRAM_SIZE_64KB :
14499 					  TG3_NVRAM_SIZE_256KB);
14500 		else
14501 			tp->nvram_size = (protect ?
14502 					  TG3_NVRAM_SIZE_128KB :
14503 					  TG3_NVRAM_SIZE_512KB);
14504 		break;
14505 	}
14506 }
14507 
14508 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14509 {
14510 	u32 nvcfg1;
14511 
14512 	nvcfg1 = tr32(NVRAM_CFG1);
14513 
14514 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14515 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14516 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14517 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14518 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14519 		tp->nvram_jedecnum = JEDEC_ATMEL;
14520 		tg3_flag_set(tp, NVRAM_BUFFERED);
14521 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14522 
14523 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14524 		tw32(NVRAM_CFG1, nvcfg1);
14525 		break;
14526 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14527 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14528 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14529 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14530 		tp->nvram_jedecnum = JEDEC_ATMEL;
14531 		tg3_flag_set(tp, NVRAM_BUFFERED);
14532 		tg3_flag_set(tp, FLASH);
14533 		tp->nvram_pagesize = 264;
14534 		break;
14535 	case FLASH_5752VENDOR_ST_M45PE10:
14536 	case FLASH_5752VENDOR_ST_M45PE20:
14537 	case FLASH_5752VENDOR_ST_M45PE40:
14538 		tp->nvram_jedecnum = JEDEC_ST;
14539 		tg3_flag_set(tp, NVRAM_BUFFERED);
14540 		tg3_flag_set(tp, FLASH);
14541 		tp->nvram_pagesize = 256;
14542 		break;
14543 	}
14544 }
14545 
14546 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14547 {
14548 	u32 nvcfg1, protect = 0;
14549 
14550 	nvcfg1 = tr32(NVRAM_CFG1);
14551 
14552 	/* NVRAM protection for TPM */
14553 	if (nvcfg1 & (1 << 27)) {
14554 		tg3_flag_set(tp, PROTECTED_NVRAM);
14555 		protect = 1;
14556 	}
14557 
14558 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14559 	switch (nvcfg1) {
14560 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14561 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14562 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14563 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14564 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14565 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14566 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14567 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14568 		tp->nvram_jedecnum = JEDEC_ATMEL;
14569 		tg3_flag_set(tp, NVRAM_BUFFERED);
14570 		tg3_flag_set(tp, FLASH);
14571 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14572 		tp->nvram_pagesize = 256;
14573 		break;
14574 	case FLASH_5761VENDOR_ST_A_M45PE20:
14575 	case FLASH_5761VENDOR_ST_A_M45PE40:
14576 	case FLASH_5761VENDOR_ST_A_M45PE80:
14577 	case FLASH_5761VENDOR_ST_A_M45PE16:
14578 	case FLASH_5761VENDOR_ST_M_M45PE20:
14579 	case FLASH_5761VENDOR_ST_M_M45PE40:
14580 	case FLASH_5761VENDOR_ST_M_M45PE80:
14581 	case FLASH_5761VENDOR_ST_M_M45PE16:
14582 		tp->nvram_jedecnum = JEDEC_ST;
14583 		tg3_flag_set(tp, NVRAM_BUFFERED);
14584 		tg3_flag_set(tp, FLASH);
14585 		tp->nvram_pagesize = 256;
14586 		break;
14587 	}
14588 
14589 	if (protect) {
14590 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14591 	} else {
14592 		switch (nvcfg1) {
14593 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14594 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14595 		case FLASH_5761VENDOR_ST_A_M45PE16:
14596 		case FLASH_5761VENDOR_ST_M_M45PE16:
14597 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14598 			break;
14599 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14600 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14601 		case FLASH_5761VENDOR_ST_A_M45PE80:
14602 		case FLASH_5761VENDOR_ST_M_M45PE80:
14603 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14604 			break;
14605 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14606 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14607 		case FLASH_5761VENDOR_ST_A_M45PE40:
14608 		case FLASH_5761VENDOR_ST_M_M45PE40:
14609 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14610 			break;
14611 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14612 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14613 		case FLASH_5761VENDOR_ST_A_M45PE20:
14614 		case FLASH_5761VENDOR_ST_M_M45PE20:
14615 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14616 			break;
14617 		}
14618 	}
14619 }
14620 
14621 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14622 {
14623 	tp->nvram_jedecnum = JEDEC_ATMEL;
14624 	tg3_flag_set(tp, NVRAM_BUFFERED);
14625 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14626 }
14627 
14628 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14629 {
14630 	u32 nvcfg1;
14631 
14632 	nvcfg1 = tr32(NVRAM_CFG1);
14633 
14634 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14635 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14636 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14637 		tp->nvram_jedecnum = JEDEC_ATMEL;
14638 		tg3_flag_set(tp, NVRAM_BUFFERED);
14639 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14640 
14641 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14642 		tw32(NVRAM_CFG1, nvcfg1);
14643 		return;
14644 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14645 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14646 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14647 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14648 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14649 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14650 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14651 		tp->nvram_jedecnum = JEDEC_ATMEL;
14652 		tg3_flag_set(tp, NVRAM_BUFFERED);
14653 		tg3_flag_set(tp, FLASH);
14654 
14655 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14656 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14657 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14658 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14659 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14660 			break;
14661 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14662 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14663 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14664 			break;
14665 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14666 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14667 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14668 			break;
14669 		}
14670 		break;
14671 	case FLASH_5752VENDOR_ST_M45PE10:
14672 	case FLASH_5752VENDOR_ST_M45PE20:
14673 	case FLASH_5752VENDOR_ST_M45PE40:
14674 		tp->nvram_jedecnum = JEDEC_ST;
14675 		tg3_flag_set(tp, NVRAM_BUFFERED);
14676 		tg3_flag_set(tp, FLASH);
14677 
14678 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14679 		case FLASH_5752VENDOR_ST_M45PE10:
14680 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14681 			break;
14682 		case FLASH_5752VENDOR_ST_M45PE20:
14683 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14684 			break;
14685 		case FLASH_5752VENDOR_ST_M45PE40:
14686 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14687 			break;
14688 		}
14689 		break;
14690 	default:
14691 		tg3_flag_set(tp, NO_NVRAM);
14692 		return;
14693 	}
14694 
14695 	tg3_nvram_get_pagesize(tp, nvcfg1);
14696 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14697 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14698 }
14699 
14700 
14701 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14702 {
14703 	u32 nvcfg1;
14704 
14705 	nvcfg1 = tr32(NVRAM_CFG1);
14706 
14707 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14708 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14709 	case FLASH_5717VENDOR_MICRO_EEPROM:
14710 		tp->nvram_jedecnum = JEDEC_ATMEL;
14711 		tg3_flag_set(tp, NVRAM_BUFFERED);
14712 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14713 
14714 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14715 		tw32(NVRAM_CFG1, nvcfg1);
14716 		return;
14717 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14718 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14719 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14720 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14721 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14722 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14723 	case FLASH_5717VENDOR_ATMEL_45USPT:
14724 		tp->nvram_jedecnum = JEDEC_ATMEL;
14725 		tg3_flag_set(tp, NVRAM_BUFFERED);
14726 		tg3_flag_set(tp, FLASH);
14727 
14728 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14729 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14730 			/* Detect size with tg3_nvram_get_size() */
14731 			break;
14732 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14733 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14734 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14735 			break;
14736 		default:
14737 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14738 			break;
14739 		}
14740 		break;
14741 	case FLASH_5717VENDOR_ST_M_M25PE10:
14742 	case FLASH_5717VENDOR_ST_A_M25PE10:
14743 	case FLASH_5717VENDOR_ST_M_M45PE10:
14744 	case FLASH_5717VENDOR_ST_A_M45PE10:
14745 	case FLASH_5717VENDOR_ST_M_M25PE20:
14746 	case FLASH_5717VENDOR_ST_A_M25PE20:
14747 	case FLASH_5717VENDOR_ST_M_M45PE20:
14748 	case FLASH_5717VENDOR_ST_A_M45PE20:
14749 	case FLASH_5717VENDOR_ST_25USPT:
14750 	case FLASH_5717VENDOR_ST_45USPT:
14751 		tp->nvram_jedecnum = JEDEC_ST;
14752 		tg3_flag_set(tp, NVRAM_BUFFERED);
14753 		tg3_flag_set(tp, FLASH);
14754 
14755 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14756 		case FLASH_5717VENDOR_ST_M_M25PE20:
14757 		case FLASH_5717VENDOR_ST_M_M45PE20:
14758 			/* Detect size with tg3_nvram_get_size() */
14759 			break;
14760 		case FLASH_5717VENDOR_ST_A_M25PE20:
14761 		case FLASH_5717VENDOR_ST_A_M45PE20:
14762 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14763 			break;
14764 		default:
14765 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14766 			break;
14767 		}
14768 		break;
14769 	default:
14770 		tg3_flag_set(tp, NO_NVRAM);
14771 		return;
14772 	}
14773 
14774 	tg3_nvram_get_pagesize(tp, nvcfg1);
14775 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14776 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14777 }
14778 
14779 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14780 {
14781 	u32 nvcfg1, nvmpinstrp, nv_status;
14782 
14783 	nvcfg1 = tr32(NVRAM_CFG1);
14784 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14785 
14786 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14787 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14788 			tg3_flag_set(tp, NO_NVRAM);
14789 			return;
14790 		}
14791 
14792 		switch (nvmpinstrp) {
14793 		case FLASH_5762_MX25L_100:
14794 		case FLASH_5762_MX25L_200:
14795 		case FLASH_5762_MX25L_400:
14796 		case FLASH_5762_MX25L_800:
14797 		case FLASH_5762_MX25L_160_320:
14798 			tp->nvram_pagesize = 4096;
14799 			tp->nvram_jedecnum = JEDEC_MACRONIX;
14800 			tg3_flag_set(tp, NVRAM_BUFFERED);
14801 			tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14802 			tg3_flag_set(tp, FLASH);
14803 			nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14804 			tp->nvram_size =
14805 				(1 << (nv_status >> AUTOSENSE_DEVID &
14806 						AUTOSENSE_DEVID_MASK)
14807 					<< AUTOSENSE_SIZE_IN_MB);
14808 			return;
14809 
14810 		case FLASH_5762_EEPROM_HD:
14811 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14812 			break;
14813 		case FLASH_5762_EEPROM_LD:
14814 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14815 			break;
14816 		case FLASH_5720VENDOR_M_ST_M45PE20:
14817 			/* This pinstrap supports multiple sizes, so force it
14818 			 * to read the actual size from location 0xf0.
14819 			 */
14820 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14821 			break;
14822 		}
14823 	}
14824 
14825 	switch (nvmpinstrp) {
14826 	case FLASH_5720_EEPROM_HD:
14827 	case FLASH_5720_EEPROM_LD:
14828 		tp->nvram_jedecnum = JEDEC_ATMEL;
14829 		tg3_flag_set(tp, NVRAM_BUFFERED);
14830 
14831 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14832 		tw32(NVRAM_CFG1, nvcfg1);
14833 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14834 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14835 		else
14836 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14837 		return;
14838 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14839 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14840 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14841 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14842 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14843 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14844 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14845 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14846 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14847 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14848 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14849 	case FLASH_5720VENDOR_ATMEL_45USPT:
14850 		tp->nvram_jedecnum = JEDEC_ATMEL;
14851 		tg3_flag_set(tp, NVRAM_BUFFERED);
14852 		tg3_flag_set(tp, FLASH);
14853 
14854 		switch (nvmpinstrp) {
14855 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14856 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14857 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14858 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14859 			break;
14860 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14861 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14862 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14863 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14864 			break;
14865 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14866 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14867 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14868 			break;
14869 		default:
14870 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14871 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14872 			break;
14873 		}
14874 		break;
14875 	case FLASH_5720VENDOR_M_ST_M25PE10:
14876 	case FLASH_5720VENDOR_M_ST_M45PE10:
14877 	case FLASH_5720VENDOR_A_ST_M25PE10:
14878 	case FLASH_5720VENDOR_A_ST_M45PE10:
14879 	case FLASH_5720VENDOR_M_ST_M25PE20:
14880 	case FLASH_5720VENDOR_M_ST_M45PE20:
14881 	case FLASH_5720VENDOR_A_ST_M25PE20:
14882 	case FLASH_5720VENDOR_A_ST_M45PE20:
14883 	case FLASH_5720VENDOR_M_ST_M25PE40:
14884 	case FLASH_5720VENDOR_M_ST_M45PE40:
14885 	case FLASH_5720VENDOR_A_ST_M25PE40:
14886 	case FLASH_5720VENDOR_A_ST_M45PE40:
14887 	case FLASH_5720VENDOR_M_ST_M25PE80:
14888 	case FLASH_5720VENDOR_M_ST_M45PE80:
14889 	case FLASH_5720VENDOR_A_ST_M25PE80:
14890 	case FLASH_5720VENDOR_A_ST_M45PE80:
14891 	case FLASH_5720VENDOR_ST_25USPT:
14892 	case FLASH_5720VENDOR_ST_45USPT:
14893 		tp->nvram_jedecnum = JEDEC_ST;
14894 		tg3_flag_set(tp, NVRAM_BUFFERED);
14895 		tg3_flag_set(tp, FLASH);
14896 
14897 		switch (nvmpinstrp) {
14898 		case FLASH_5720VENDOR_M_ST_M25PE20:
14899 		case FLASH_5720VENDOR_M_ST_M45PE20:
14900 		case FLASH_5720VENDOR_A_ST_M25PE20:
14901 		case FLASH_5720VENDOR_A_ST_M45PE20:
14902 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14903 			break;
14904 		case FLASH_5720VENDOR_M_ST_M25PE40:
14905 		case FLASH_5720VENDOR_M_ST_M45PE40:
14906 		case FLASH_5720VENDOR_A_ST_M25PE40:
14907 		case FLASH_5720VENDOR_A_ST_M45PE40:
14908 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14909 			break;
14910 		case FLASH_5720VENDOR_M_ST_M25PE80:
14911 		case FLASH_5720VENDOR_M_ST_M45PE80:
14912 		case FLASH_5720VENDOR_A_ST_M25PE80:
14913 		case FLASH_5720VENDOR_A_ST_M45PE80:
14914 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14915 			break;
14916 		default:
14917 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14918 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14919 			break;
14920 		}
14921 		break;
14922 	default:
14923 		tg3_flag_set(tp, NO_NVRAM);
14924 		return;
14925 	}
14926 
14927 	tg3_nvram_get_pagesize(tp, nvcfg1);
14928 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14929 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14930 
14931 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14932 		u32 val;
14933 
14934 		if (tg3_nvram_read(tp, 0, &val))
14935 			return;
14936 
14937 		if (val != TG3_EEPROM_MAGIC &&
14938 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14939 			tg3_flag_set(tp, NO_NVRAM);
14940 	}
14941 }
14942 
14943 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14944 static void tg3_nvram_init(struct tg3 *tp)
14945 {
14946 	if (tg3_flag(tp, IS_SSB_CORE)) {
14947 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14948 		tg3_flag_clear(tp, NVRAM);
14949 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14950 		tg3_flag_set(tp, NO_NVRAM);
14951 		return;
14952 	}
14953 
14954 	tw32_f(GRC_EEPROM_ADDR,
14955 	     (EEPROM_ADDR_FSM_RESET |
14956 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
14957 	       EEPROM_ADDR_CLKPERD_SHIFT)));
14958 
14959 	msleep(1);
14960 
14961 	/* Enable seeprom accesses. */
14962 	tw32_f(GRC_LOCAL_CTRL,
14963 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14964 	udelay(100);
14965 
14966 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14967 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
14968 		tg3_flag_set(tp, NVRAM);
14969 
14970 		if (tg3_nvram_lock(tp)) {
14971 			netdev_warn(tp->dev,
14972 				    "Cannot get nvram lock, %s failed\n",
14973 				    __func__);
14974 			return;
14975 		}
14976 		tg3_enable_nvram_access(tp);
14977 
14978 		tp->nvram_size = 0;
14979 
14980 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
14981 			tg3_get_5752_nvram_info(tp);
14982 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14983 			tg3_get_5755_nvram_info(tp);
14984 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14985 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14986 			 tg3_asic_rev(tp) == ASIC_REV_5785)
14987 			tg3_get_5787_nvram_info(tp);
14988 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14989 			tg3_get_5761_nvram_info(tp);
14990 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14991 			tg3_get_5906_nvram_info(tp);
14992 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14993 			 tg3_flag(tp, 57765_CLASS))
14994 			tg3_get_57780_nvram_info(tp);
14995 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14996 			 tg3_asic_rev(tp) == ASIC_REV_5719)
14997 			tg3_get_5717_nvram_info(tp);
14998 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14999 			 tg3_asic_rev(tp) == ASIC_REV_5762)
15000 			tg3_get_5720_nvram_info(tp);
15001 		else
15002 			tg3_get_nvram_info(tp);
15003 
15004 		if (tp->nvram_size == 0)
15005 			tg3_get_nvram_size(tp);
15006 
15007 		tg3_disable_nvram_access(tp);
15008 		tg3_nvram_unlock(tp);
15009 
15010 	} else {
15011 		tg3_flag_clear(tp, NVRAM);
15012 		tg3_flag_clear(tp, NVRAM_BUFFERED);
15013 
15014 		tg3_get_eeprom_size(tp);
15015 	}
15016 }
15017 
15018 struct subsys_tbl_ent {
15019 	u16 subsys_vendor, subsys_devid;
15020 	u32 phy_id;
15021 };
15022 
15023 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15024 	/* Broadcom boards. */
15025 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15026 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15027 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15028 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15029 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15030 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15031 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15032 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15033 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15034 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15035 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15036 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15037 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15038 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15039 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15040 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15041 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15042 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15043 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15044 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15045 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15046 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15047 
15048 	/* 3com boards. */
15049 	{ TG3PCI_SUBVENDOR_ID_3COM,
15050 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15051 	{ TG3PCI_SUBVENDOR_ID_3COM,
15052 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15053 	{ TG3PCI_SUBVENDOR_ID_3COM,
15054 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15055 	{ TG3PCI_SUBVENDOR_ID_3COM,
15056 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15057 	{ TG3PCI_SUBVENDOR_ID_3COM,
15058 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15059 
15060 	/* DELL boards. */
15061 	{ TG3PCI_SUBVENDOR_ID_DELL,
15062 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15063 	{ TG3PCI_SUBVENDOR_ID_DELL,
15064 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15065 	{ TG3PCI_SUBVENDOR_ID_DELL,
15066 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15067 	{ TG3PCI_SUBVENDOR_ID_DELL,
15068 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15069 
15070 	/* Compaq boards. */
15071 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15072 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15073 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15074 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15075 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15076 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15077 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15078 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15079 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15080 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15081 
15082 	/* IBM boards. */
15083 	{ TG3PCI_SUBVENDOR_ID_IBM,
15084 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15085 };
15086 
15087 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15088 {
15089 	int i;
15090 
15091 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15092 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
15093 		     tp->pdev->subsystem_vendor) &&
15094 		    (subsys_id_to_phy_id[i].subsys_devid ==
15095 		     tp->pdev->subsystem_device))
15096 			return &subsys_id_to_phy_id[i];
15097 	}
15098 	return NULL;
15099 }
15100 
15101 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15102 {
15103 	u32 val;
15104 
15105 	tp->phy_id = TG3_PHY_ID_INVALID;
15106 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15107 
15108 	/* Assume an onboard device and WOL capable by default.  */
15109 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
15110 	tg3_flag_set(tp, WOL_CAP);
15111 
15112 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15113 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15114 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15115 			tg3_flag_set(tp, IS_NIC);
15116 		}
15117 		val = tr32(VCPU_CFGSHDW);
15118 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
15119 			tg3_flag_set(tp, ASPM_WORKAROUND);
15120 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15121 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15122 			tg3_flag_set(tp, WOL_ENABLE);
15123 			device_set_wakeup_enable(&tp->pdev->dev, true);
15124 		}
15125 		goto done;
15126 	}
15127 
15128 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15129 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15130 		u32 nic_cfg, led_cfg;
15131 		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15132 		u32 nic_phy_id, ver, eeprom_phy_id;
15133 		int eeprom_phy_serdes = 0;
15134 
15135 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15136 		tp->nic_sram_data_cfg = nic_cfg;
15137 
15138 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15139 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
15140 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15141 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15142 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
15143 		    (ver > 0) && (ver < 0x100))
15144 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15145 
15146 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
15147 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15148 
15149 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15150 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15151 		    tg3_asic_rev(tp) == ASIC_REV_5720)
15152 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15153 
15154 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15155 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15156 			eeprom_phy_serdes = 1;
15157 
15158 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15159 		if (nic_phy_id != 0) {
15160 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15161 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15162 
15163 			eeprom_phy_id  = (id1 >> 16) << 10;
15164 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
15165 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15166 		} else
15167 			eeprom_phy_id = 0;
15168 
15169 		tp->phy_id = eeprom_phy_id;
15170 		if (eeprom_phy_serdes) {
15171 			if (!tg3_flag(tp, 5705_PLUS))
15172 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15173 			else
15174 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15175 		}
15176 
15177 		if (tg3_flag(tp, 5750_PLUS))
15178 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15179 				    SHASTA_EXT_LED_MODE_MASK);
15180 		else
15181 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15182 
15183 		switch (led_cfg) {
15184 		default:
15185 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15186 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15187 			break;
15188 
15189 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15190 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15191 			break;
15192 
15193 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15194 			tp->led_ctrl = LED_CTRL_MODE_MAC;
15195 
15196 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
15197 			 * read on some older 5700/5701 bootcode.
15198 			 */
15199 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15200 			    tg3_asic_rev(tp) == ASIC_REV_5701)
15201 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15202 
15203 			break;
15204 
15205 		case SHASTA_EXT_LED_SHARED:
15206 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
15207 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15208 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15209 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15210 						 LED_CTRL_MODE_PHY_2);
15211 
15212 			if (tg3_flag(tp, 5717_PLUS) ||
15213 			    tg3_asic_rev(tp) == ASIC_REV_5762)
15214 				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15215 						LED_CTRL_BLINK_RATE_MASK;
15216 
15217 			break;
15218 
15219 		case SHASTA_EXT_LED_MAC:
15220 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15221 			break;
15222 
15223 		case SHASTA_EXT_LED_COMBO:
15224 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
15225 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15226 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15227 						 LED_CTRL_MODE_PHY_2);
15228 			break;
15229 
15230 		}
15231 
15232 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15233 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
15234 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15235 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15236 
15237 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15238 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15239 
15240 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15241 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
15242 			if ((tp->pdev->subsystem_vendor ==
15243 			     PCI_VENDOR_ID_ARIMA) &&
15244 			    (tp->pdev->subsystem_device == 0x205a ||
15245 			     tp->pdev->subsystem_device == 0x2063))
15246 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15247 		} else {
15248 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15249 			tg3_flag_set(tp, IS_NIC);
15250 		}
15251 
15252 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15253 			tg3_flag_set(tp, ENABLE_ASF);
15254 			if (tg3_flag(tp, 5750_PLUS))
15255 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15256 		}
15257 
15258 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15259 		    tg3_flag(tp, 5750_PLUS))
15260 			tg3_flag_set(tp, ENABLE_APE);
15261 
15262 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15263 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15264 			tg3_flag_clear(tp, WOL_CAP);
15265 
15266 		if (tg3_flag(tp, WOL_CAP) &&
15267 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15268 			tg3_flag_set(tp, WOL_ENABLE);
15269 			device_set_wakeup_enable(&tp->pdev->dev, true);
15270 		}
15271 
15272 		if (cfg2 & (1 << 17))
15273 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15274 
15275 		/* serdes signal pre-emphasis in register 0x590 set by */
15276 		/* bootcode if bit 18 is set */
15277 		if (cfg2 & (1 << 18))
15278 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15279 
15280 		if ((tg3_flag(tp, 57765_PLUS) ||
15281 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15282 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15283 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15284 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15285 
15286 		if (tg3_flag(tp, PCI_EXPRESS)) {
15287 			u32 cfg3;
15288 
15289 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15290 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15291 			    !tg3_flag(tp, 57765_PLUS) &&
15292 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15293 				tg3_flag_set(tp, ASPM_WORKAROUND);
15294 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15295 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15296 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15297 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15298 		}
15299 
15300 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15301 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15302 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15303 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15304 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15305 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15306 
15307 		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15308 			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15309 	}
15310 done:
15311 	if (tg3_flag(tp, WOL_CAP))
15312 		device_set_wakeup_enable(&tp->pdev->dev,
15313 					 tg3_flag(tp, WOL_ENABLE));
15314 	else
15315 		device_set_wakeup_capable(&tp->pdev->dev, false);
15316 }
15317 
15318 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15319 {
15320 	int i, err;
15321 	u32 val2, off = offset * 8;
15322 
15323 	err = tg3_nvram_lock(tp);
15324 	if (err)
15325 		return err;
15326 
15327 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15328 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15329 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15330 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15331 	udelay(10);
15332 
15333 	for (i = 0; i < 100; i++) {
15334 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15335 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15336 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15337 			break;
15338 		}
15339 		udelay(10);
15340 	}
15341 
15342 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15343 
15344 	tg3_nvram_unlock(tp);
15345 	if (val2 & APE_OTP_STATUS_CMD_DONE)
15346 		return 0;
15347 
15348 	return -EBUSY;
15349 }
15350 
15351 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15352 {
15353 	int i;
15354 	u32 val;
15355 
15356 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15357 	tw32(OTP_CTRL, cmd);
15358 
15359 	/* Wait for up to 1 ms for command to execute. */
15360 	for (i = 0; i < 100; i++) {
15361 		val = tr32(OTP_STATUS);
15362 		if (val & OTP_STATUS_CMD_DONE)
15363 			break;
15364 		udelay(10);
15365 	}
15366 
15367 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15368 }
15369 
15370 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15371  * configuration is a 32-bit value that straddles the alignment boundary.
15372  * We do two 32-bit reads and then shift and merge the results.
15373  */
15374 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15375 {
15376 	u32 bhalf_otp, thalf_otp;
15377 
15378 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15379 
15380 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15381 		return 0;
15382 
15383 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15384 
15385 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15386 		return 0;
15387 
15388 	thalf_otp = tr32(OTP_READ_DATA);
15389 
15390 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15391 
15392 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15393 		return 0;
15394 
15395 	bhalf_otp = tr32(OTP_READ_DATA);
15396 
15397 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15398 }
15399 
15400 static void tg3_phy_init_link_config(struct tg3 *tp)
15401 {
15402 	u32 adv = ADVERTISED_Autoneg;
15403 
15404 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15405 		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15406 			adv |= ADVERTISED_1000baseT_Half;
15407 		adv |= ADVERTISED_1000baseT_Full;
15408 	}
15409 
15410 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15411 		adv |= ADVERTISED_100baseT_Half |
15412 		       ADVERTISED_100baseT_Full |
15413 		       ADVERTISED_10baseT_Half |
15414 		       ADVERTISED_10baseT_Full |
15415 		       ADVERTISED_TP;
15416 	else
15417 		adv |= ADVERTISED_FIBRE;
15418 
15419 	tp->link_config.advertising = adv;
15420 	tp->link_config.speed = SPEED_UNKNOWN;
15421 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15422 	tp->link_config.autoneg = AUTONEG_ENABLE;
15423 	tp->link_config.active_speed = SPEED_UNKNOWN;
15424 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15425 
15426 	tp->old_link = -1;
15427 }
15428 
15429 static int tg3_phy_probe(struct tg3 *tp)
15430 {
15431 	u32 hw_phy_id_1, hw_phy_id_2;
15432 	u32 hw_phy_id, hw_phy_id_masked;
15433 	int err;
15434 
15435 	/* flow control autonegotiation is default behavior */
15436 	tg3_flag_set(tp, PAUSE_AUTONEG);
15437 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15438 
15439 	if (tg3_flag(tp, ENABLE_APE)) {
15440 		switch (tp->pci_fn) {
15441 		case 0:
15442 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15443 			break;
15444 		case 1:
15445 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15446 			break;
15447 		case 2:
15448 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15449 			break;
15450 		case 3:
15451 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15452 			break;
15453 		}
15454 	}
15455 
15456 	if (!tg3_flag(tp, ENABLE_ASF) &&
15457 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15458 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15459 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15460 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15461 
15462 	if (tg3_flag(tp, USE_PHYLIB))
15463 		return tg3_phy_init(tp);
15464 
15465 	/* Reading the PHY ID register can conflict with ASF
15466 	 * firmware access to the PHY hardware.
15467 	 */
15468 	err = 0;
15469 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15470 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15471 	} else {
15472 		/* Now read the physical PHY_ID from the chip and verify
15473 		 * that it is sane.  If it doesn't look good, we fall back
15474 		 * to either the hard-coded table based PHY_ID and failing
15475 		 * that the value found in the eeprom area.
15476 		 */
15477 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15478 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15479 
15480 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15481 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15482 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15483 
15484 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15485 	}
15486 
15487 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15488 		tp->phy_id = hw_phy_id;
15489 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15490 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15491 		else
15492 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15493 	} else {
15494 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15495 			/* Do nothing, phy ID already set up in
15496 			 * tg3_get_eeprom_hw_cfg().
15497 			 */
15498 		} else {
15499 			struct subsys_tbl_ent *p;
15500 
15501 			/* No eeprom signature?  Try the hardcoded
15502 			 * subsys device table.
15503 			 */
15504 			p = tg3_lookup_by_subsys(tp);
15505 			if (p) {
15506 				tp->phy_id = p->phy_id;
15507 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15508 				/* For now we saw the IDs 0xbc050cd0,
15509 				 * 0xbc050f80 and 0xbc050c30 on devices
15510 				 * connected to an BCM4785 and there are
15511 				 * probably more. Just assume that the phy is
15512 				 * supported when it is connected to a SSB core
15513 				 * for now.
15514 				 */
15515 				return -ENODEV;
15516 			}
15517 
15518 			if (!tp->phy_id ||
15519 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15520 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15521 		}
15522 	}
15523 
15524 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15525 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15526 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15527 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15528 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15529 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15530 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15531 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15532 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15533 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15534 
15535 		tp->eee.supported = SUPPORTED_100baseT_Full |
15536 				    SUPPORTED_1000baseT_Full;
15537 		tp->eee.advertised = ADVERTISED_100baseT_Full |
15538 				     ADVERTISED_1000baseT_Full;
15539 		tp->eee.eee_enabled = 1;
15540 		tp->eee.tx_lpi_enabled = 1;
15541 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15542 	}
15543 
15544 	tg3_phy_init_link_config(tp);
15545 
15546 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15547 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15548 	    !tg3_flag(tp, ENABLE_APE) &&
15549 	    !tg3_flag(tp, ENABLE_ASF)) {
15550 		u32 bmsr, dummy;
15551 
15552 		tg3_readphy(tp, MII_BMSR, &bmsr);
15553 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15554 		    (bmsr & BMSR_LSTATUS))
15555 			goto skip_phy_reset;
15556 
15557 		err = tg3_phy_reset(tp);
15558 		if (err)
15559 			return err;
15560 
15561 		tg3_phy_set_wirespeed(tp);
15562 
15563 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15564 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15565 					    tp->link_config.flowctrl);
15566 
15567 			tg3_writephy(tp, MII_BMCR,
15568 				     BMCR_ANENABLE | BMCR_ANRESTART);
15569 		}
15570 	}
15571 
15572 skip_phy_reset:
15573 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15574 		err = tg3_init_5401phy_dsp(tp);
15575 		if (err)
15576 			return err;
15577 
15578 		err = tg3_init_5401phy_dsp(tp);
15579 	}
15580 
15581 	return err;
15582 }
15583 
15584 static void tg3_read_vpd(struct tg3 *tp)
15585 {
15586 	u8 *vpd_data;
15587 	unsigned int len, vpdlen;
15588 	int i;
15589 
15590 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15591 	if (!vpd_data)
15592 		goto out_no_vpd;
15593 
15594 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15595 					 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15596 	if (i < 0)
15597 		goto partno;
15598 
15599 	if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15600 		goto partno;
15601 
15602 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15603 					 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15604 	if (i < 0)
15605 		goto partno;
15606 
15607 	memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15608 	snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15609 
15610 partno:
15611 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15612 					 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15613 	if (i < 0)
15614 		goto out_not_found;
15615 
15616 	if (len > TG3_BPN_SIZE)
15617 		goto out_not_found;
15618 
15619 	memcpy(tp->board_part_number, &vpd_data[i], len);
15620 
15621 out_not_found:
15622 	kfree(vpd_data);
15623 	if (tp->board_part_number[0])
15624 		return;
15625 
15626 out_no_vpd:
15627 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15628 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15629 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15630 			strcpy(tp->board_part_number, "BCM5717");
15631 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15632 			strcpy(tp->board_part_number, "BCM5718");
15633 		else
15634 			goto nomatch;
15635 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15636 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15637 			strcpy(tp->board_part_number, "BCM57780");
15638 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15639 			strcpy(tp->board_part_number, "BCM57760");
15640 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15641 			strcpy(tp->board_part_number, "BCM57790");
15642 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15643 			strcpy(tp->board_part_number, "BCM57788");
15644 		else
15645 			goto nomatch;
15646 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15647 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15648 			strcpy(tp->board_part_number, "BCM57761");
15649 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15650 			strcpy(tp->board_part_number, "BCM57765");
15651 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15652 			strcpy(tp->board_part_number, "BCM57781");
15653 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15654 			strcpy(tp->board_part_number, "BCM57785");
15655 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15656 			strcpy(tp->board_part_number, "BCM57791");
15657 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15658 			strcpy(tp->board_part_number, "BCM57795");
15659 		else
15660 			goto nomatch;
15661 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15662 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15663 			strcpy(tp->board_part_number, "BCM57762");
15664 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15665 			strcpy(tp->board_part_number, "BCM57766");
15666 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15667 			strcpy(tp->board_part_number, "BCM57782");
15668 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15669 			strcpy(tp->board_part_number, "BCM57786");
15670 		else
15671 			goto nomatch;
15672 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15673 		strcpy(tp->board_part_number, "BCM95906");
15674 	} else {
15675 nomatch:
15676 		strcpy(tp->board_part_number, "none");
15677 	}
15678 }
15679 
15680 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15681 {
15682 	u32 val;
15683 
15684 	if (tg3_nvram_read(tp, offset, &val) ||
15685 	    (val & 0xfc000000) != 0x0c000000 ||
15686 	    tg3_nvram_read(tp, offset + 4, &val) ||
15687 	    val != 0)
15688 		return 0;
15689 
15690 	return 1;
15691 }
15692 
15693 static void tg3_read_bc_ver(struct tg3 *tp)
15694 {
15695 	u32 val, offset, start, ver_offset;
15696 	int i, dst_off;
15697 	bool newver = false;
15698 
15699 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15700 	    tg3_nvram_read(tp, 0x4, &start))
15701 		return;
15702 
15703 	offset = tg3_nvram_logical_addr(tp, offset);
15704 
15705 	if (tg3_nvram_read(tp, offset, &val))
15706 		return;
15707 
15708 	if ((val & 0xfc000000) == 0x0c000000) {
15709 		if (tg3_nvram_read(tp, offset + 4, &val))
15710 			return;
15711 
15712 		if (val == 0)
15713 			newver = true;
15714 	}
15715 
15716 	dst_off = strlen(tp->fw_ver);
15717 
15718 	if (newver) {
15719 		if (TG3_VER_SIZE - dst_off < 16 ||
15720 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15721 			return;
15722 
15723 		offset = offset + ver_offset - start;
15724 		for (i = 0; i < 16; i += 4) {
15725 			__be32 v;
15726 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15727 				return;
15728 
15729 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15730 		}
15731 	} else {
15732 		u32 major, minor;
15733 
15734 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15735 			return;
15736 
15737 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15738 			TG3_NVM_BCVER_MAJSFT;
15739 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15740 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15741 			 "v%d.%02d", major, minor);
15742 	}
15743 }
15744 
15745 static void tg3_read_hwsb_ver(struct tg3 *tp)
15746 {
15747 	u32 val, major, minor;
15748 
15749 	/* Use native endian representation */
15750 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15751 		return;
15752 
15753 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15754 		TG3_NVM_HWSB_CFG1_MAJSFT;
15755 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15756 		TG3_NVM_HWSB_CFG1_MINSFT;
15757 
15758 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15759 }
15760 
15761 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15762 {
15763 	u32 offset, major, minor, build;
15764 
15765 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15766 
15767 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15768 		return;
15769 
15770 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15771 	case TG3_EEPROM_SB_REVISION_0:
15772 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15773 		break;
15774 	case TG3_EEPROM_SB_REVISION_2:
15775 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15776 		break;
15777 	case TG3_EEPROM_SB_REVISION_3:
15778 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15779 		break;
15780 	case TG3_EEPROM_SB_REVISION_4:
15781 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15782 		break;
15783 	case TG3_EEPROM_SB_REVISION_5:
15784 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15785 		break;
15786 	case TG3_EEPROM_SB_REVISION_6:
15787 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15788 		break;
15789 	default:
15790 		return;
15791 	}
15792 
15793 	if (tg3_nvram_read(tp, offset, &val))
15794 		return;
15795 
15796 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15797 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15798 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15799 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15800 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15801 
15802 	if (minor > 99 || build > 26)
15803 		return;
15804 
15805 	offset = strlen(tp->fw_ver);
15806 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15807 		 " v%d.%02d", major, minor);
15808 
15809 	if (build > 0) {
15810 		offset = strlen(tp->fw_ver);
15811 		if (offset < TG3_VER_SIZE - 1)
15812 			tp->fw_ver[offset] = 'a' + build - 1;
15813 	}
15814 }
15815 
15816 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15817 {
15818 	u32 val, offset, start;
15819 	int i, vlen;
15820 
15821 	for (offset = TG3_NVM_DIR_START;
15822 	     offset < TG3_NVM_DIR_END;
15823 	     offset += TG3_NVM_DIRENT_SIZE) {
15824 		if (tg3_nvram_read(tp, offset, &val))
15825 			return;
15826 
15827 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15828 			break;
15829 	}
15830 
15831 	if (offset == TG3_NVM_DIR_END)
15832 		return;
15833 
15834 	if (!tg3_flag(tp, 5705_PLUS))
15835 		start = 0x08000000;
15836 	else if (tg3_nvram_read(tp, offset - 4, &start))
15837 		return;
15838 
15839 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15840 	    !tg3_fw_img_is_valid(tp, offset) ||
15841 	    tg3_nvram_read(tp, offset + 8, &val))
15842 		return;
15843 
15844 	offset += val - start;
15845 
15846 	vlen = strlen(tp->fw_ver);
15847 
15848 	tp->fw_ver[vlen++] = ',';
15849 	tp->fw_ver[vlen++] = ' ';
15850 
15851 	for (i = 0; i < 4; i++) {
15852 		__be32 v;
15853 		if (tg3_nvram_read_be32(tp, offset, &v))
15854 			return;
15855 
15856 		offset += sizeof(v);
15857 
15858 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15859 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15860 			break;
15861 		}
15862 
15863 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15864 		vlen += sizeof(v);
15865 	}
15866 }
15867 
15868 static void tg3_probe_ncsi(struct tg3 *tp)
15869 {
15870 	u32 apedata;
15871 
15872 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15873 	if (apedata != APE_SEG_SIG_MAGIC)
15874 		return;
15875 
15876 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15877 	if (!(apedata & APE_FW_STATUS_READY))
15878 		return;
15879 
15880 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15881 		tg3_flag_set(tp, APE_HAS_NCSI);
15882 }
15883 
15884 static void tg3_read_dash_ver(struct tg3 *tp)
15885 {
15886 	int vlen;
15887 	u32 apedata;
15888 	char *fwtype;
15889 
15890 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15891 
15892 	if (tg3_flag(tp, APE_HAS_NCSI))
15893 		fwtype = "NCSI";
15894 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15895 		fwtype = "SMASH";
15896 	else
15897 		fwtype = "DASH";
15898 
15899 	vlen = strlen(tp->fw_ver);
15900 
15901 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15902 		 fwtype,
15903 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15904 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15905 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15906 		 (apedata & APE_FW_VERSION_BLDMSK));
15907 }
15908 
15909 static void tg3_read_otp_ver(struct tg3 *tp)
15910 {
15911 	u32 val, val2;
15912 
15913 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
15914 		return;
15915 
15916 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15917 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15918 	    TG3_OTP_MAGIC0_VALID(val)) {
15919 		u64 val64 = (u64) val << 32 | val2;
15920 		u32 ver = 0;
15921 		int i, vlen;
15922 
15923 		for (i = 0; i < 7; i++) {
15924 			if ((val64 & 0xff) == 0)
15925 				break;
15926 			ver = val64 & 0xff;
15927 			val64 >>= 8;
15928 		}
15929 		vlen = strlen(tp->fw_ver);
15930 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15931 	}
15932 }
15933 
15934 static void tg3_read_fw_ver(struct tg3 *tp)
15935 {
15936 	u32 val;
15937 	bool vpd_vers = false;
15938 
15939 	if (tp->fw_ver[0] != 0)
15940 		vpd_vers = true;
15941 
15942 	if (tg3_flag(tp, NO_NVRAM)) {
15943 		strcat(tp->fw_ver, "sb");
15944 		tg3_read_otp_ver(tp);
15945 		return;
15946 	}
15947 
15948 	if (tg3_nvram_read(tp, 0, &val))
15949 		return;
15950 
15951 	if (val == TG3_EEPROM_MAGIC)
15952 		tg3_read_bc_ver(tp);
15953 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15954 		tg3_read_sb_ver(tp, val);
15955 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15956 		tg3_read_hwsb_ver(tp);
15957 
15958 	if (tg3_flag(tp, ENABLE_ASF)) {
15959 		if (tg3_flag(tp, ENABLE_APE)) {
15960 			tg3_probe_ncsi(tp);
15961 			if (!vpd_vers)
15962 				tg3_read_dash_ver(tp);
15963 		} else if (!vpd_vers) {
15964 			tg3_read_mgmtfw_ver(tp);
15965 		}
15966 	}
15967 
15968 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15969 }
15970 
15971 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15972 {
15973 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
15974 		return TG3_RX_RET_MAX_SIZE_5717;
15975 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15976 		return TG3_RX_RET_MAX_SIZE_5700;
15977 	else
15978 		return TG3_RX_RET_MAX_SIZE_5705;
15979 }
15980 
15981 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15982 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15983 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15984 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15985 	{ },
15986 };
15987 
15988 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15989 {
15990 	struct pci_dev *peer;
15991 	unsigned int func, devnr = tp->pdev->devfn & ~7;
15992 
15993 	for (func = 0; func < 8; func++) {
15994 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
15995 		if (peer && peer != tp->pdev)
15996 			break;
15997 		pci_dev_put(peer);
15998 	}
15999 	/* 5704 can be configured in single-port mode, set peer to
16000 	 * tp->pdev in that case.
16001 	 */
16002 	if (!peer) {
16003 		peer = tp->pdev;
16004 		return peer;
16005 	}
16006 
16007 	/*
16008 	 * We don't need to keep the refcount elevated; there's no way
16009 	 * to remove one half of this device without removing the other
16010 	 */
16011 	pci_dev_put(peer);
16012 
16013 	return peer;
16014 }
16015 
16016 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16017 {
16018 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16019 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16020 		u32 reg;
16021 
16022 		/* All devices that use the alternate
16023 		 * ASIC REV location have a CPMU.
16024 		 */
16025 		tg3_flag_set(tp, CPMU_PRESENT);
16026 
16027 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16028 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16029 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16030 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16031 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16032 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16033 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16034 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16035 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16036 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16037 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16038 			reg = TG3PCI_GEN2_PRODID_ASICREV;
16039 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16040 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16041 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16042 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16043 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16044 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16045 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16046 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16047 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16048 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16049 			reg = TG3PCI_GEN15_PRODID_ASICREV;
16050 		else
16051 			reg = TG3PCI_PRODID_ASICREV;
16052 
16053 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16054 	}
16055 
16056 	/* Wrong chip ID in 5752 A0. This code can be removed later
16057 	 * as A0 is not in production.
16058 	 */
16059 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16060 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16061 
16062 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16063 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16064 
16065 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16066 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16067 	    tg3_asic_rev(tp) == ASIC_REV_5720)
16068 		tg3_flag_set(tp, 5717_PLUS);
16069 
16070 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16071 	    tg3_asic_rev(tp) == ASIC_REV_57766)
16072 		tg3_flag_set(tp, 57765_CLASS);
16073 
16074 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16075 	     tg3_asic_rev(tp) == ASIC_REV_5762)
16076 		tg3_flag_set(tp, 57765_PLUS);
16077 
16078 	/* Intentionally exclude ASIC_REV_5906 */
16079 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16080 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16081 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16082 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16083 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
16084 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16085 	    tg3_flag(tp, 57765_PLUS))
16086 		tg3_flag_set(tp, 5755_PLUS);
16087 
16088 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16089 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16090 		tg3_flag_set(tp, 5780_CLASS);
16091 
16092 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16093 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16094 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
16095 	    tg3_flag(tp, 5755_PLUS) ||
16096 	    tg3_flag(tp, 5780_CLASS))
16097 		tg3_flag_set(tp, 5750_PLUS);
16098 
16099 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16100 	    tg3_flag(tp, 5750_PLUS))
16101 		tg3_flag_set(tp, 5705_PLUS);
16102 }
16103 
16104 static bool tg3_10_100_only_device(struct tg3 *tp,
16105 				   const struct pci_device_id *ent)
16106 {
16107 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16108 
16109 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16110 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16111 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
16112 		return true;
16113 
16114 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16115 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16116 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16117 				return true;
16118 		} else {
16119 			return true;
16120 		}
16121 	}
16122 
16123 	return false;
16124 }
16125 
16126 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16127 {
16128 	u32 misc_ctrl_reg;
16129 	u32 pci_state_reg, grc_misc_cfg;
16130 	u32 val;
16131 	u16 pci_cmd;
16132 	int err;
16133 
16134 	/* Force memory write invalidate off.  If we leave it on,
16135 	 * then on 5700_BX chips we have to enable a workaround.
16136 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16137 	 * to match the cacheline size.  The Broadcom driver have this
16138 	 * workaround but turns MWI off all the times so never uses
16139 	 * it.  This seems to suggest that the workaround is insufficient.
16140 	 */
16141 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16142 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16143 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16144 
16145 	/* Important! -- Make sure register accesses are byteswapped
16146 	 * correctly.  Also, for those chips that require it, make
16147 	 * sure that indirect register accesses are enabled before
16148 	 * the first operation.
16149 	 */
16150 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16151 			      &misc_ctrl_reg);
16152 	tp->misc_host_ctrl |= (misc_ctrl_reg &
16153 			       MISC_HOST_CTRL_CHIPREV);
16154 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16155 			       tp->misc_host_ctrl);
16156 
16157 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
16158 
16159 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16160 	 * we need to disable memory and use config. cycles
16161 	 * only to access all registers. The 5702/03 chips
16162 	 * can mistakenly decode the special cycles from the
16163 	 * ICH chipsets as memory write cycles, causing corruption
16164 	 * of register and memory space. Only certain ICH bridges
16165 	 * will drive special cycles with non-zero data during the
16166 	 * address phase which can fall within the 5703's address
16167 	 * range. This is not an ICH bug as the PCI spec allows
16168 	 * non-zero address during special cycles. However, only
16169 	 * these ICH bridges are known to drive non-zero addresses
16170 	 * during special cycles.
16171 	 *
16172 	 * Since special cycles do not cross PCI bridges, we only
16173 	 * enable this workaround if the 5703 is on the secondary
16174 	 * bus of these ICH bridges.
16175 	 */
16176 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16177 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16178 		static struct tg3_dev_id {
16179 			u32	vendor;
16180 			u32	device;
16181 			u32	rev;
16182 		} ich_chipsets[] = {
16183 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16184 			  PCI_ANY_ID },
16185 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16186 			  PCI_ANY_ID },
16187 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16188 			  0xa },
16189 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16190 			  PCI_ANY_ID },
16191 			{ },
16192 		};
16193 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
16194 		struct pci_dev *bridge = NULL;
16195 
16196 		while (pci_id->vendor != 0) {
16197 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
16198 						bridge);
16199 			if (!bridge) {
16200 				pci_id++;
16201 				continue;
16202 			}
16203 			if (pci_id->rev != PCI_ANY_ID) {
16204 				if (bridge->revision > pci_id->rev)
16205 					continue;
16206 			}
16207 			if (bridge->subordinate &&
16208 			    (bridge->subordinate->number ==
16209 			     tp->pdev->bus->number)) {
16210 				tg3_flag_set(tp, ICH_WORKAROUND);
16211 				pci_dev_put(bridge);
16212 				break;
16213 			}
16214 		}
16215 	}
16216 
16217 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16218 		static struct tg3_dev_id {
16219 			u32	vendor;
16220 			u32	device;
16221 		} bridge_chipsets[] = {
16222 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16223 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16224 			{ },
16225 		};
16226 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16227 		struct pci_dev *bridge = NULL;
16228 
16229 		while (pci_id->vendor != 0) {
16230 			bridge = pci_get_device(pci_id->vendor,
16231 						pci_id->device,
16232 						bridge);
16233 			if (!bridge) {
16234 				pci_id++;
16235 				continue;
16236 			}
16237 			if (bridge->subordinate &&
16238 			    (bridge->subordinate->number <=
16239 			     tp->pdev->bus->number) &&
16240 			    (bridge->subordinate->busn_res.end >=
16241 			     tp->pdev->bus->number)) {
16242 				tg3_flag_set(tp, 5701_DMA_BUG);
16243 				pci_dev_put(bridge);
16244 				break;
16245 			}
16246 		}
16247 	}
16248 
16249 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
16250 	 * DMA addresses > 40-bit. This bridge may have other additional
16251 	 * 57xx devices behind it in some 4-port NIC designs for example.
16252 	 * Any tg3 device found behind the bridge will also need the 40-bit
16253 	 * DMA workaround.
16254 	 */
16255 	if (tg3_flag(tp, 5780_CLASS)) {
16256 		tg3_flag_set(tp, 40BIT_DMA_BUG);
16257 		tp->msi_cap = tp->pdev->msi_cap;
16258 	} else {
16259 		struct pci_dev *bridge = NULL;
16260 
16261 		do {
16262 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16263 						PCI_DEVICE_ID_SERVERWORKS_EPB,
16264 						bridge);
16265 			if (bridge && bridge->subordinate &&
16266 			    (bridge->subordinate->number <=
16267 			     tp->pdev->bus->number) &&
16268 			    (bridge->subordinate->busn_res.end >=
16269 			     tp->pdev->bus->number)) {
16270 				tg3_flag_set(tp, 40BIT_DMA_BUG);
16271 				pci_dev_put(bridge);
16272 				break;
16273 			}
16274 		} while (bridge);
16275 	}
16276 
16277 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16278 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16279 		tp->pdev_peer = tg3_find_peer(tp);
16280 
16281 	/* Determine TSO capabilities */
16282 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16283 		; /* Do nothing. HW bug. */
16284 	else if (tg3_flag(tp, 57765_PLUS))
16285 		tg3_flag_set(tp, HW_TSO_3);
16286 	else if (tg3_flag(tp, 5755_PLUS) ||
16287 		 tg3_asic_rev(tp) == ASIC_REV_5906)
16288 		tg3_flag_set(tp, HW_TSO_2);
16289 	else if (tg3_flag(tp, 5750_PLUS)) {
16290 		tg3_flag_set(tp, HW_TSO_1);
16291 		tg3_flag_set(tp, TSO_BUG);
16292 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16293 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16294 			tg3_flag_clear(tp, TSO_BUG);
16295 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16296 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16297 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16298 		tg3_flag_set(tp, FW_TSO);
16299 		tg3_flag_set(tp, TSO_BUG);
16300 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16301 			tp->fw_needed = FIRMWARE_TG3TSO5;
16302 		else
16303 			tp->fw_needed = FIRMWARE_TG3TSO;
16304 	}
16305 
16306 	/* Selectively allow TSO based on operating conditions */
16307 	if (tg3_flag(tp, HW_TSO_1) ||
16308 	    tg3_flag(tp, HW_TSO_2) ||
16309 	    tg3_flag(tp, HW_TSO_3) ||
16310 	    tg3_flag(tp, FW_TSO)) {
16311 		/* For firmware TSO, assume ASF is disabled.
16312 		 * We'll disable TSO later if we discover ASF
16313 		 * is enabled in tg3_get_eeprom_hw_cfg().
16314 		 */
16315 		tg3_flag_set(tp, TSO_CAPABLE);
16316 	} else {
16317 		tg3_flag_clear(tp, TSO_CAPABLE);
16318 		tg3_flag_clear(tp, TSO_BUG);
16319 		tp->fw_needed = NULL;
16320 	}
16321 
16322 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16323 		tp->fw_needed = FIRMWARE_TG3;
16324 
16325 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16326 		tp->fw_needed = FIRMWARE_TG357766;
16327 
16328 	tp->irq_max = 1;
16329 
16330 	if (tg3_flag(tp, 5750_PLUS)) {
16331 		tg3_flag_set(tp, SUPPORT_MSI);
16332 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16333 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16334 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16335 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16336 		     tp->pdev_peer == tp->pdev))
16337 			tg3_flag_clear(tp, SUPPORT_MSI);
16338 
16339 		if (tg3_flag(tp, 5755_PLUS) ||
16340 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16341 			tg3_flag_set(tp, 1SHOT_MSI);
16342 		}
16343 
16344 		if (tg3_flag(tp, 57765_PLUS)) {
16345 			tg3_flag_set(tp, SUPPORT_MSIX);
16346 			tp->irq_max = TG3_IRQ_MAX_VECS;
16347 		}
16348 	}
16349 
16350 	tp->txq_max = 1;
16351 	tp->rxq_max = 1;
16352 	if (tp->irq_max > 1) {
16353 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16354 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16355 
16356 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16357 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16358 			tp->txq_max = tp->irq_max - 1;
16359 	}
16360 
16361 	if (tg3_flag(tp, 5755_PLUS) ||
16362 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16363 		tg3_flag_set(tp, SHORT_DMA_BUG);
16364 
16365 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16366 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16367 
16368 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16369 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16370 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16371 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16372 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16373 
16374 	if (tg3_flag(tp, 57765_PLUS) &&
16375 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16376 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16377 
16378 	if (!tg3_flag(tp, 5705_PLUS) ||
16379 	    tg3_flag(tp, 5780_CLASS) ||
16380 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16381 		tg3_flag_set(tp, JUMBO_CAPABLE);
16382 
16383 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16384 			      &pci_state_reg);
16385 
16386 	if (pci_is_pcie(tp->pdev)) {
16387 		u16 lnkctl;
16388 
16389 		tg3_flag_set(tp, PCI_EXPRESS);
16390 
16391 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16392 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16393 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16394 				tg3_flag_clear(tp, HW_TSO_2);
16395 				tg3_flag_clear(tp, TSO_CAPABLE);
16396 			}
16397 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16398 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16399 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16400 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16401 				tg3_flag_set(tp, CLKREQ_BUG);
16402 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16403 			tg3_flag_set(tp, L1PLLPD_EN);
16404 		}
16405 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16406 		/* BCM5785 devices are effectively PCIe devices, and should
16407 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16408 		 * section.
16409 		 */
16410 		tg3_flag_set(tp, PCI_EXPRESS);
16411 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16412 		   tg3_flag(tp, 5780_CLASS)) {
16413 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16414 		if (!tp->pcix_cap) {
16415 			dev_err(&tp->pdev->dev,
16416 				"Cannot find PCI-X capability, aborting\n");
16417 			return -EIO;
16418 		}
16419 
16420 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16421 			tg3_flag_set(tp, PCIX_MODE);
16422 	}
16423 
16424 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16425 	 * reordering to the mailbox registers done by the host
16426 	 * controller can cause major troubles.  We read back from
16427 	 * every mailbox register write to force the writes to be
16428 	 * posted to the chip in order.
16429 	 */
16430 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16431 	    !tg3_flag(tp, PCI_EXPRESS))
16432 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16433 
16434 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16435 			     &tp->pci_cacheline_sz);
16436 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16437 			     &tp->pci_lat_timer);
16438 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16439 	    tp->pci_lat_timer < 64) {
16440 		tp->pci_lat_timer = 64;
16441 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16442 				      tp->pci_lat_timer);
16443 	}
16444 
16445 	/* Important! -- It is critical that the PCI-X hw workaround
16446 	 * situation is decided before the first MMIO register access.
16447 	 */
16448 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16449 		/* 5700 BX chips need to have their TX producer index
16450 		 * mailboxes written twice to workaround a bug.
16451 		 */
16452 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16453 
16454 		/* If we are in PCI-X mode, enable register write workaround.
16455 		 *
16456 		 * The workaround is to use indirect register accesses
16457 		 * for all chip writes not to mailbox registers.
16458 		 */
16459 		if (tg3_flag(tp, PCIX_MODE)) {
16460 			u32 pm_reg;
16461 
16462 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16463 
16464 			/* The chip can have it's power management PCI config
16465 			 * space registers clobbered due to this bug.
16466 			 * So explicitly force the chip into D0 here.
16467 			 */
16468 			pci_read_config_dword(tp->pdev,
16469 					      tp->pdev->pm_cap + PCI_PM_CTRL,
16470 					      &pm_reg);
16471 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16472 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16473 			pci_write_config_dword(tp->pdev,
16474 					       tp->pdev->pm_cap + PCI_PM_CTRL,
16475 					       pm_reg);
16476 
16477 			/* Also, force SERR#/PERR# in PCI command. */
16478 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16479 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16480 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16481 		}
16482 	}
16483 
16484 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16485 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16486 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16487 		tg3_flag_set(tp, PCI_32BIT);
16488 
16489 	/* Chip-specific fixup from Broadcom driver */
16490 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16491 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16492 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16493 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16494 	}
16495 
16496 	/* Default fast path register access methods */
16497 	tp->read32 = tg3_read32;
16498 	tp->write32 = tg3_write32;
16499 	tp->read32_mbox = tg3_read32;
16500 	tp->write32_mbox = tg3_write32;
16501 	tp->write32_tx_mbox = tg3_write32;
16502 	tp->write32_rx_mbox = tg3_write32;
16503 
16504 	/* Various workaround register access methods */
16505 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16506 		tp->write32 = tg3_write_indirect_reg32;
16507 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16508 		 (tg3_flag(tp, PCI_EXPRESS) &&
16509 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16510 		/*
16511 		 * Back to back register writes can cause problems on these
16512 		 * chips, the workaround is to read back all reg writes
16513 		 * except those to mailbox regs.
16514 		 *
16515 		 * See tg3_write_indirect_reg32().
16516 		 */
16517 		tp->write32 = tg3_write_flush_reg32;
16518 	}
16519 
16520 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16521 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16522 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16523 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16524 	}
16525 
16526 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16527 		tp->read32 = tg3_read_indirect_reg32;
16528 		tp->write32 = tg3_write_indirect_reg32;
16529 		tp->read32_mbox = tg3_read_indirect_mbox;
16530 		tp->write32_mbox = tg3_write_indirect_mbox;
16531 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16532 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16533 
16534 		iounmap(tp->regs);
16535 		tp->regs = NULL;
16536 
16537 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16538 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16539 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16540 	}
16541 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16542 		tp->read32_mbox = tg3_read32_mbox_5906;
16543 		tp->write32_mbox = tg3_write32_mbox_5906;
16544 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16545 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16546 	}
16547 
16548 	if (tp->write32 == tg3_write_indirect_reg32 ||
16549 	    (tg3_flag(tp, PCIX_MODE) &&
16550 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16551 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16552 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16553 
16554 	/* The memory arbiter has to be enabled in order for SRAM accesses
16555 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16556 	 * sure it is enabled, but other entities such as system netboot
16557 	 * code might disable it.
16558 	 */
16559 	val = tr32(MEMARB_MODE);
16560 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16561 
16562 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16563 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16564 	    tg3_flag(tp, 5780_CLASS)) {
16565 		if (tg3_flag(tp, PCIX_MODE)) {
16566 			pci_read_config_dword(tp->pdev,
16567 					      tp->pcix_cap + PCI_X_STATUS,
16568 					      &val);
16569 			tp->pci_fn = val & 0x7;
16570 		}
16571 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16572 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16573 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16574 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16575 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16576 			val = tr32(TG3_CPMU_STATUS);
16577 
16578 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16579 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16580 		else
16581 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16582 				     TG3_CPMU_STATUS_FSHFT_5719;
16583 	}
16584 
16585 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16586 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16587 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16588 	}
16589 
16590 	/* Get eeprom hw config before calling tg3_set_power_state().
16591 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16592 	 * determined before calling tg3_set_power_state() so that
16593 	 * we know whether or not to switch out of Vaux power.
16594 	 * When the flag is set, it means that GPIO1 is used for eeprom
16595 	 * write protect and also implies that it is a LOM where GPIOs
16596 	 * are not used to switch power.
16597 	 */
16598 	tg3_get_eeprom_hw_cfg(tp);
16599 
16600 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16601 		tg3_flag_clear(tp, TSO_CAPABLE);
16602 		tg3_flag_clear(tp, TSO_BUG);
16603 		tp->fw_needed = NULL;
16604 	}
16605 
16606 	if (tg3_flag(tp, ENABLE_APE)) {
16607 		/* Allow reads and writes to the
16608 		 * APE register and memory space.
16609 		 */
16610 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16611 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16612 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16613 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16614 				       pci_state_reg);
16615 
16616 		tg3_ape_lock_init(tp);
16617 		tp->ape_hb_interval =
16618 			msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16619 	}
16620 
16621 	/* Set up tp->grc_local_ctrl before calling
16622 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16623 	 * will bring 5700's external PHY out of reset.
16624 	 * It is also used as eeprom write protect on LOMs.
16625 	 */
16626 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16627 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16628 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16629 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16630 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16631 	/* Unused GPIO3 must be driven as output on 5752 because there
16632 	 * are no pull-up resistors on unused GPIO pins.
16633 	 */
16634 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16635 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16636 
16637 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16638 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16639 	    tg3_flag(tp, 57765_CLASS))
16640 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16641 
16642 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16643 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16644 		/* Turn off the debug UART. */
16645 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16646 		if (tg3_flag(tp, IS_NIC))
16647 			/* Keep VMain power. */
16648 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16649 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16650 	}
16651 
16652 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16653 		tp->grc_local_ctrl |=
16654 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16655 
16656 	/* Switch out of Vaux if it is a NIC */
16657 	tg3_pwrsrc_switch_to_vmain(tp);
16658 
16659 	/* Derive initial jumbo mode from MTU assigned in
16660 	 * ether_setup() via the alloc_etherdev() call
16661 	 */
16662 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16663 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16664 
16665 	/* Determine WakeOnLan speed to use. */
16666 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16667 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16668 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16669 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16670 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16671 	} else {
16672 		tg3_flag_set(tp, WOL_SPEED_100MB);
16673 	}
16674 
16675 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16676 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16677 
16678 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16679 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16680 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16681 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16682 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16683 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16684 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16685 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16686 
16687 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16688 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16689 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16690 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16691 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16692 
16693 	if (tg3_flag(tp, 5705_PLUS) &&
16694 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16695 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16696 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16697 	    !tg3_flag(tp, 57765_PLUS)) {
16698 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16699 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16700 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16701 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16702 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16703 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16704 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16705 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16706 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16707 		} else
16708 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16709 	}
16710 
16711 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16712 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16713 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16714 		if (tp->phy_otp == 0)
16715 			tp->phy_otp = TG3_OTP_DEFAULT;
16716 	}
16717 
16718 	if (tg3_flag(tp, CPMU_PRESENT))
16719 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16720 	else
16721 		tp->mi_mode = MAC_MI_MODE_BASE;
16722 
16723 	tp->coalesce_mode = 0;
16724 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16725 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16726 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16727 
16728 	/* Set these bits to enable statistics workaround. */
16729 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16730 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
16731 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16732 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16733 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16734 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16735 	}
16736 
16737 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16738 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16739 		tg3_flag_set(tp, USE_PHYLIB);
16740 
16741 	err = tg3_mdio_init(tp);
16742 	if (err)
16743 		return err;
16744 
16745 	/* Initialize data/descriptor byte/word swapping. */
16746 	val = tr32(GRC_MODE);
16747 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16748 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16749 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16750 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16751 			GRC_MODE_B2HRX_ENABLE |
16752 			GRC_MODE_HTX2B_ENABLE |
16753 			GRC_MODE_HOST_STACKUP);
16754 	else
16755 		val &= GRC_MODE_HOST_STACKUP;
16756 
16757 	tw32(GRC_MODE, val | tp->grc_mode);
16758 
16759 	tg3_switch_clocks(tp);
16760 
16761 	/* Clear this out for sanity. */
16762 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16763 
16764 	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16765 	tw32(TG3PCI_REG_BASE_ADDR, 0);
16766 
16767 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16768 			      &pci_state_reg);
16769 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16770 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16771 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16772 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16773 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16774 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16775 			void __iomem *sram_base;
16776 
16777 			/* Write some dummy words into the SRAM status block
16778 			 * area, see if it reads back correctly.  If the return
16779 			 * value is bad, force enable the PCIX workaround.
16780 			 */
16781 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16782 
16783 			writel(0x00000000, sram_base);
16784 			writel(0x00000000, sram_base + 4);
16785 			writel(0xffffffff, sram_base + 4);
16786 			if (readl(sram_base) != 0x00000000)
16787 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16788 		}
16789 	}
16790 
16791 	udelay(50);
16792 	tg3_nvram_init(tp);
16793 
16794 	/* If the device has an NVRAM, no need to load patch firmware */
16795 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16796 	    !tg3_flag(tp, NO_NVRAM))
16797 		tp->fw_needed = NULL;
16798 
16799 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16800 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16801 
16802 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16803 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16804 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16805 		tg3_flag_set(tp, IS_5788);
16806 
16807 	if (!tg3_flag(tp, IS_5788) &&
16808 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16809 		tg3_flag_set(tp, TAGGED_STATUS);
16810 	if (tg3_flag(tp, TAGGED_STATUS)) {
16811 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16812 				      HOSTCC_MODE_CLRTICK_TXBD);
16813 
16814 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16815 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16816 				       tp->misc_host_ctrl);
16817 	}
16818 
16819 	/* Preserve the APE MAC_MODE bits */
16820 	if (tg3_flag(tp, ENABLE_APE))
16821 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16822 	else
16823 		tp->mac_mode = 0;
16824 
16825 	if (tg3_10_100_only_device(tp, ent))
16826 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16827 
16828 	err = tg3_phy_probe(tp);
16829 	if (err) {
16830 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16831 		/* ... but do not return immediately ... */
16832 		tg3_mdio_fini(tp);
16833 	}
16834 
16835 	tg3_read_vpd(tp);
16836 	tg3_read_fw_ver(tp);
16837 
16838 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16839 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16840 	} else {
16841 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16842 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16843 		else
16844 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16845 	}
16846 
16847 	/* 5700 {AX,BX} chips have a broken status block link
16848 	 * change bit implementation, so we must use the
16849 	 * status register in those cases.
16850 	 */
16851 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16852 		tg3_flag_set(tp, USE_LINKCHG_REG);
16853 	else
16854 		tg3_flag_clear(tp, USE_LINKCHG_REG);
16855 
16856 	/* The led_ctrl is set during tg3_phy_probe, here we might
16857 	 * have to force the link status polling mechanism based
16858 	 * upon subsystem IDs.
16859 	 */
16860 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16861 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16862 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16863 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16864 		tg3_flag_set(tp, USE_LINKCHG_REG);
16865 	}
16866 
16867 	/* For all SERDES we poll the MAC status register. */
16868 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16869 		tg3_flag_set(tp, POLL_SERDES);
16870 	else
16871 		tg3_flag_clear(tp, POLL_SERDES);
16872 
16873 	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16874 		tg3_flag_set(tp, POLL_CPMU_LINK);
16875 
16876 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16877 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16878 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16879 	    tg3_flag(tp, PCIX_MODE)) {
16880 		tp->rx_offset = NET_SKB_PAD;
16881 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16882 		tp->rx_copy_thresh = ~(u16)0;
16883 #endif
16884 	}
16885 
16886 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16887 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16888 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16889 
16890 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16891 
16892 	/* Increment the rx prod index on the rx std ring by at most
16893 	 * 8 for these chips to workaround hw errata.
16894 	 */
16895 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16896 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16897 	    tg3_asic_rev(tp) == ASIC_REV_5755)
16898 		tp->rx_std_max_post = 8;
16899 
16900 	if (tg3_flag(tp, ASPM_WORKAROUND))
16901 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16902 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
16903 
16904 	return err;
16905 }
16906 
16907 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
16908 {
16909 	u32 hi, lo, mac_offset;
16910 	int addr_ok = 0;
16911 	int err;
16912 
16913 	if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
16914 		return 0;
16915 
16916 	if (tg3_flag(tp, IS_SSB_CORE)) {
16917 		err = ssb_gige_get_macaddr(tp->pdev, addr);
16918 		if (!err && is_valid_ether_addr(addr))
16919 			return 0;
16920 	}
16921 
16922 	mac_offset = 0x7c;
16923 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16924 	    tg3_flag(tp, 5780_CLASS)) {
16925 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16926 			mac_offset = 0xcc;
16927 		if (tg3_nvram_lock(tp))
16928 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16929 		else
16930 			tg3_nvram_unlock(tp);
16931 	} else if (tg3_flag(tp, 5717_PLUS)) {
16932 		if (tp->pci_fn & 1)
16933 			mac_offset = 0xcc;
16934 		if (tp->pci_fn > 1)
16935 			mac_offset += 0x18c;
16936 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16937 		mac_offset = 0x10;
16938 
16939 	/* First try to get it from MAC address mailbox. */
16940 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16941 	if ((hi >> 16) == 0x484b) {
16942 		addr[0] = (hi >>  8) & 0xff;
16943 		addr[1] = (hi >>  0) & 0xff;
16944 
16945 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16946 		addr[2] = (lo >> 24) & 0xff;
16947 		addr[3] = (lo >> 16) & 0xff;
16948 		addr[4] = (lo >>  8) & 0xff;
16949 		addr[5] = (lo >>  0) & 0xff;
16950 
16951 		/* Some old bootcode may report a 0 MAC address in SRAM */
16952 		addr_ok = is_valid_ether_addr(addr);
16953 	}
16954 	if (!addr_ok) {
16955 		/* Next, try NVRAM. */
16956 		if (!tg3_flag(tp, NO_NVRAM) &&
16957 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16958 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16959 			memcpy(&addr[0], ((char *)&hi) + 2, 2);
16960 			memcpy(&addr[2], (char *)&lo, sizeof(lo));
16961 		}
16962 		/* Finally just fetch it out of the MAC control regs. */
16963 		else {
16964 			hi = tr32(MAC_ADDR_0_HIGH);
16965 			lo = tr32(MAC_ADDR_0_LOW);
16966 
16967 			addr[5] = lo & 0xff;
16968 			addr[4] = (lo >> 8) & 0xff;
16969 			addr[3] = (lo >> 16) & 0xff;
16970 			addr[2] = (lo >> 24) & 0xff;
16971 			addr[1] = hi & 0xff;
16972 			addr[0] = (hi >> 8) & 0xff;
16973 		}
16974 	}
16975 
16976 	if (!is_valid_ether_addr(addr))
16977 		return -EINVAL;
16978 	return 0;
16979 }
16980 
16981 #define BOUNDARY_SINGLE_CACHELINE	1
16982 #define BOUNDARY_MULTI_CACHELINE	2
16983 
16984 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16985 {
16986 	int cacheline_size;
16987 	u8 byte;
16988 	int goal;
16989 
16990 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16991 	if (byte == 0)
16992 		cacheline_size = 1024;
16993 	else
16994 		cacheline_size = (int) byte * 4;
16995 
16996 	/* On 5703 and later chips, the boundary bits have no
16997 	 * effect.
16998 	 */
16999 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17000 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
17001 	    !tg3_flag(tp, PCI_EXPRESS))
17002 		goto out;
17003 
17004 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17005 	goal = BOUNDARY_MULTI_CACHELINE;
17006 #else
17007 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17008 	goal = BOUNDARY_SINGLE_CACHELINE;
17009 #else
17010 	goal = 0;
17011 #endif
17012 #endif
17013 
17014 	if (tg3_flag(tp, 57765_PLUS)) {
17015 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17016 		goto out;
17017 	}
17018 
17019 	if (!goal)
17020 		goto out;
17021 
17022 	/* PCI controllers on most RISC systems tend to disconnect
17023 	 * when a device tries to burst across a cache-line boundary.
17024 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17025 	 *
17026 	 * Unfortunately, for PCI-E there are only limited
17027 	 * write-side controls for this, and thus for reads
17028 	 * we will still get the disconnects.  We'll also waste
17029 	 * these PCI cycles for both read and write for chips
17030 	 * other than 5700 and 5701 which do not implement the
17031 	 * boundary bits.
17032 	 */
17033 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17034 		switch (cacheline_size) {
17035 		case 16:
17036 		case 32:
17037 		case 64:
17038 		case 128:
17039 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17040 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17041 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17042 			} else {
17043 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17044 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17045 			}
17046 			break;
17047 
17048 		case 256:
17049 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17050 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17051 			break;
17052 
17053 		default:
17054 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17055 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17056 			break;
17057 		}
17058 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
17059 		switch (cacheline_size) {
17060 		case 16:
17061 		case 32:
17062 		case 64:
17063 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17064 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17065 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17066 				break;
17067 			}
17068 			fallthrough;
17069 		case 128:
17070 		default:
17071 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17072 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17073 			break;
17074 		}
17075 	} else {
17076 		switch (cacheline_size) {
17077 		case 16:
17078 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17079 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
17080 					DMA_RWCTRL_WRITE_BNDRY_16);
17081 				break;
17082 			}
17083 			fallthrough;
17084 		case 32:
17085 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17086 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
17087 					DMA_RWCTRL_WRITE_BNDRY_32);
17088 				break;
17089 			}
17090 			fallthrough;
17091 		case 64:
17092 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17093 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
17094 					DMA_RWCTRL_WRITE_BNDRY_64);
17095 				break;
17096 			}
17097 			fallthrough;
17098 		case 128:
17099 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17100 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
17101 					DMA_RWCTRL_WRITE_BNDRY_128);
17102 				break;
17103 			}
17104 			fallthrough;
17105 		case 256:
17106 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
17107 				DMA_RWCTRL_WRITE_BNDRY_256);
17108 			break;
17109 		case 512:
17110 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
17111 				DMA_RWCTRL_WRITE_BNDRY_512);
17112 			break;
17113 		case 1024:
17114 		default:
17115 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17116 				DMA_RWCTRL_WRITE_BNDRY_1024);
17117 			break;
17118 		}
17119 	}
17120 
17121 out:
17122 	return val;
17123 }
17124 
17125 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17126 			   int size, bool to_device)
17127 {
17128 	struct tg3_internal_buffer_desc test_desc;
17129 	u32 sram_dma_descs;
17130 	int i, ret;
17131 
17132 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17133 
17134 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17135 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17136 	tw32(RDMAC_STATUS, 0);
17137 	tw32(WDMAC_STATUS, 0);
17138 
17139 	tw32(BUFMGR_MODE, 0);
17140 	tw32(FTQ_RESET, 0);
17141 
17142 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
17143 	test_desc.addr_lo = buf_dma & 0xffffffff;
17144 	test_desc.nic_mbuf = 0x00002100;
17145 	test_desc.len = size;
17146 
17147 	/*
17148 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17149 	 * the *second* time the tg3 driver was getting loaded after an
17150 	 * initial scan.
17151 	 *
17152 	 * Broadcom tells me:
17153 	 *   ...the DMA engine is connected to the GRC block and a DMA
17154 	 *   reset may affect the GRC block in some unpredictable way...
17155 	 *   The behavior of resets to individual blocks has not been tested.
17156 	 *
17157 	 * Broadcom noted the GRC reset will also reset all sub-components.
17158 	 */
17159 	if (to_device) {
17160 		test_desc.cqid_sqid = (13 << 8) | 2;
17161 
17162 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17163 		udelay(40);
17164 	} else {
17165 		test_desc.cqid_sqid = (16 << 8) | 7;
17166 
17167 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17168 		udelay(40);
17169 	}
17170 	test_desc.flags = 0x00000005;
17171 
17172 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17173 		u32 val;
17174 
17175 		val = *(((u32 *)&test_desc) + i);
17176 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17177 				       sram_dma_descs + (i * sizeof(u32)));
17178 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17179 	}
17180 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17181 
17182 	if (to_device)
17183 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17184 	else
17185 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17186 
17187 	ret = -ENODEV;
17188 	for (i = 0; i < 40; i++) {
17189 		u32 val;
17190 
17191 		if (to_device)
17192 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17193 		else
17194 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17195 		if ((val & 0xffff) == sram_dma_descs) {
17196 			ret = 0;
17197 			break;
17198 		}
17199 
17200 		udelay(100);
17201 	}
17202 
17203 	return ret;
17204 }
17205 
17206 #define TEST_BUFFER_SIZE	0x2000
17207 
17208 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17209 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17210 	{ },
17211 };
17212 
17213 static int tg3_test_dma(struct tg3 *tp)
17214 {
17215 	dma_addr_t buf_dma;
17216 	u32 *buf, saved_dma_rwctrl;
17217 	int ret = 0;
17218 
17219 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17220 				 &buf_dma, GFP_KERNEL);
17221 	if (!buf) {
17222 		ret = -ENOMEM;
17223 		goto out_nofree;
17224 	}
17225 
17226 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17227 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17228 
17229 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17230 
17231 	if (tg3_flag(tp, 57765_PLUS))
17232 		goto out;
17233 
17234 	if (tg3_flag(tp, PCI_EXPRESS)) {
17235 		/* DMA read watermark not used on PCIE */
17236 		tp->dma_rwctrl |= 0x00180000;
17237 	} else if (!tg3_flag(tp, PCIX_MODE)) {
17238 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17239 		    tg3_asic_rev(tp) == ASIC_REV_5750)
17240 			tp->dma_rwctrl |= 0x003f0000;
17241 		else
17242 			tp->dma_rwctrl |= 0x003f000f;
17243 	} else {
17244 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17245 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17246 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17247 			u32 read_water = 0x7;
17248 
17249 			/* If the 5704 is behind the EPB bridge, we can
17250 			 * do the less restrictive ONE_DMA workaround for
17251 			 * better performance.
17252 			 */
17253 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17254 			    tg3_asic_rev(tp) == ASIC_REV_5704)
17255 				tp->dma_rwctrl |= 0x8000;
17256 			else if (ccval == 0x6 || ccval == 0x7)
17257 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17258 
17259 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17260 				read_water = 4;
17261 			/* Set bit 23 to enable PCIX hw bug fix */
17262 			tp->dma_rwctrl |=
17263 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17264 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17265 				(1 << 23);
17266 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17267 			/* 5780 always in PCIX mode */
17268 			tp->dma_rwctrl |= 0x00144000;
17269 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17270 			/* 5714 always in PCIX mode */
17271 			tp->dma_rwctrl |= 0x00148000;
17272 		} else {
17273 			tp->dma_rwctrl |= 0x001b000f;
17274 		}
17275 	}
17276 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17277 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17278 
17279 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17280 	    tg3_asic_rev(tp) == ASIC_REV_5704)
17281 		tp->dma_rwctrl &= 0xfffffff0;
17282 
17283 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17284 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17285 		/* Remove this if it causes problems for some boards. */
17286 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17287 
17288 		/* On 5700/5701 chips, we need to set this bit.
17289 		 * Otherwise the chip will issue cacheline transactions
17290 		 * to streamable DMA memory with not all the byte
17291 		 * enables turned on.  This is an error on several
17292 		 * RISC PCI controllers, in particular sparc64.
17293 		 *
17294 		 * On 5703/5704 chips, this bit has been reassigned
17295 		 * a different meaning.  In particular, it is used
17296 		 * on those chips to enable a PCI-X workaround.
17297 		 */
17298 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17299 	}
17300 
17301 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17302 
17303 
17304 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17305 	    tg3_asic_rev(tp) != ASIC_REV_5701)
17306 		goto out;
17307 
17308 	/* It is best to perform DMA test with maximum write burst size
17309 	 * to expose the 5700/5701 write DMA bug.
17310 	 */
17311 	saved_dma_rwctrl = tp->dma_rwctrl;
17312 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17313 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17314 
17315 	while (1) {
17316 		u32 *p = buf, i;
17317 
17318 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17319 			p[i] = i;
17320 
17321 		/* Send the buffer to the chip. */
17322 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17323 		if (ret) {
17324 			dev_err(&tp->pdev->dev,
17325 				"%s: Buffer write failed. err = %d\n",
17326 				__func__, ret);
17327 			break;
17328 		}
17329 
17330 		/* Now read it back. */
17331 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17332 		if (ret) {
17333 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17334 				"err = %d\n", __func__, ret);
17335 			break;
17336 		}
17337 
17338 		/* Verify it. */
17339 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17340 			if (p[i] == i)
17341 				continue;
17342 
17343 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17344 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17345 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17346 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17347 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17348 				break;
17349 			} else {
17350 				dev_err(&tp->pdev->dev,
17351 					"%s: Buffer corrupted on read back! "
17352 					"(%d != %d)\n", __func__, p[i], i);
17353 				ret = -ENODEV;
17354 				goto out;
17355 			}
17356 		}
17357 
17358 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17359 			/* Success. */
17360 			ret = 0;
17361 			break;
17362 		}
17363 	}
17364 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17365 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17366 		/* DMA test passed without adjusting DMA boundary,
17367 		 * now look for chipsets that are known to expose the
17368 		 * DMA bug without failing the test.
17369 		 */
17370 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17371 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17372 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17373 		} else {
17374 			/* Safe to use the calculated DMA boundary. */
17375 			tp->dma_rwctrl = saved_dma_rwctrl;
17376 		}
17377 
17378 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17379 	}
17380 
17381 out:
17382 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17383 out_nofree:
17384 	return ret;
17385 }
17386 
17387 static void tg3_init_bufmgr_config(struct tg3 *tp)
17388 {
17389 	if (tg3_flag(tp, 57765_PLUS)) {
17390 		tp->bufmgr_config.mbuf_read_dma_low_water =
17391 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17392 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17393 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17394 		tp->bufmgr_config.mbuf_high_water =
17395 			DEFAULT_MB_HIGH_WATER_57765;
17396 
17397 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17398 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17399 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17400 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17401 		tp->bufmgr_config.mbuf_high_water_jumbo =
17402 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17403 	} else if (tg3_flag(tp, 5705_PLUS)) {
17404 		tp->bufmgr_config.mbuf_read_dma_low_water =
17405 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17406 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17407 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17408 		tp->bufmgr_config.mbuf_high_water =
17409 			DEFAULT_MB_HIGH_WATER_5705;
17410 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17411 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17412 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17413 			tp->bufmgr_config.mbuf_high_water =
17414 				DEFAULT_MB_HIGH_WATER_5906;
17415 		}
17416 
17417 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17418 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17419 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17420 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17421 		tp->bufmgr_config.mbuf_high_water_jumbo =
17422 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17423 	} else {
17424 		tp->bufmgr_config.mbuf_read_dma_low_water =
17425 			DEFAULT_MB_RDMA_LOW_WATER;
17426 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17427 			DEFAULT_MB_MACRX_LOW_WATER;
17428 		tp->bufmgr_config.mbuf_high_water =
17429 			DEFAULT_MB_HIGH_WATER;
17430 
17431 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17432 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17433 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17434 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17435 		tp->bufmgr_config.mbuf_high_water_jumbo =
17436 			DEFAULT_MB_HIGH_WATER_JUMBO;
17437 	}
17438 
17439 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17440 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17441 }
17442 
17443 static char *tg3_phy_string(struct tg3 *tp)
17444 {
17445 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17446 	case TG3_PHY_ID_BCM5400:	return "5400";
17447 	case TG3_PHY_ID_BCM5401:	return "5401";
17448 	case TG3_PHY_ID_BCM5411:	return "5411";
17449 	case TG3_PHY_ID_BCM5701:	return "5701";
17450 	case TG3_PHY_ID_BCM5703:	return "5703";
17451 	case TG3_PHY_ID_BCM5704:	return "5704";
17452 	case TG3_PHY_ID_BCM5705:	return "5705";
17453 	case TG3_PHY_ID_BCM5750:	return "5750";
17454 	case TG3_PHY_ID_BCM5752:	return "5752";
17455 	case TG3_PHY_ID_BCM5714:	return "5714";
17456 	case TG3_PHY_ID_BCM5780:	return "5780";
17457 	case TG3_PHY_ID_BCM5755:	return "5755";
17458 	case TG3_PHY_ID_BCM5787:	return "5787";
17459 	case TG3_PHY_ID_BCM5784:	return "5784";
17460 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17461 	case TG3_PHY_ID_BCM5906:	return "5906";
17462 	case TG3_PHY_ID_BCM5761:	return "5761";
17463 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17464 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17465 	case TG3_PHY_ID_BCM57765:	return "57765";
17466 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17467 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17468 	case TG3_PHY_ID_BCM5762:	return "5762C";
17469 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17470 	case 0:			return "serdes";
17471 	default:		return "unknown";
17472 	}
17473 }
17474 
17475 static char *tg3_bus_string(struct tg3 *tp, char *str)
17476 {
17477 	if (tg3_flag(tp, PCI_EXPRESS)) {
17478 		strcpy(str, "PCI Express");
17479 		return str;
17480 	} else if (tg3_flag(tp, PCIX_MODE)) {
17481 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17482 
17483 		strcpy(str, "PCIX:");
17484 
17485 		if ((clock_ctrl == 7) ||
17486 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17487 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17488 			strcat(str, "133MHz");
17489 		else if (clock_ctrl == 0)
17490 			strcat(str, "33MHz");
17491 		else if (clock_ctrl == 2)
17492 			strcat(str, "50MHz");
17493 		else if (clock_ctrl == 4)
17494 			strcat(str, "66MHz");
17495 		else if (clock_ctrl == 6)
17496 			strcat(str, "100MHz");
17497 	} else {
17498 		strcpy(str, "PCI:");
17499 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17500 			strcat(str, "66MHz");
17501 		else
17502 			strcat(str, "33MHz");
17503 	}
17504 	if (tg3_flag(tp, PCI_32BIT))
17505 		strcat(str, ":32-bit");
17506 	else
17507 		strcat(str, ":64-bit");
17508 	return str;
17509 }
17510 
17511 static void tg3_init_coal(struct tg3 *tp)
17512 {
17513 	struct ethtool_coalesce *ec = &tp->coal;
17514 
17515 	memset(ec, 0, sizeof(*ec));
17516 	ec->cmd = ETHTOOL_GCOALESCE;
17517 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17518 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17519 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17520 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17521 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17522 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17523 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17524 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17525 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17526 
17527 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17528 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17529 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17530 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17531 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17532 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17533 	}
17534 
17535 	if (tg3_flag(tp, 5705_PLUS)) {
17536 		ec->rx_coalesce_usecs_irq = 0;
17537 		ec->tx_coalesce_usecs_irq = 0;
17538 		ec->stats_block_coalesce_usecs = 0;
17539 	}
17540 }
17541 
17542 static int tg3_init_one(struct pci_dev *pdev,
17543 				  const struct pci_device_id *ent)
17544 {
17545 	struct net_device *dev;
17546 	struct tg3 *tp;
17547 	int i, err;
17548 	u32 sndmbx, rcvmbx, intmbx;
17549 	char str[40];
17550 	u64 dma_mask, persist_dma_mask;
17551 	netdev_features_t features = 0;
17552 	u8 addr[ETH_ALEN] __aligned(2);
17553 
17554 	err = pci_enable_device(pdev);
17555 	if (err) {
17556 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17557 		return err;
17558 	}
17559 
17560 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17561 	if (err) {
17562 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17563 		goto err_out_disable_pdev;
17564 	}
17565 
17566 	pci_set_master(pdev);
17567 
17568 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17569 	if (!dev) {
17570 		err = -ENOMEM;
17571 		goto err_out_free_res;
17572 	}
17573 
17574 	SET_NETDEV_DEV(dev, &pdev->dev);
17575 
17576 	tp = netdev_priv(dev);
17577 	tp->pdev = pdev;
17578 	tp->dev = dev;
17579 	tp->rx_mode = TG3_DEF_RX_MODE;
17580 	tp->tx_mode = TG3_DEF_TX_MODE;
17581 	tp->irq_sync = 1;
17582 	tp->pcierr_recovery = false;
17583 
17584 	if (tg3_debug > 0)
17585 		tp->msg_enable = tg3_debug;
17586 	else
17587 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17588 
17589 	if (pdev_is_ssb_gige_core(pdev)) {
17590 		tg3_flag_set(tp, IS_SSB_CORE);
17591 		if (ssb_gige_must_flush_posted_writes(pdev))
17592 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17593 		if (ssb_gige_one_dma_at_once(pdev))
17594 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17595 		if (ssb_gige_have_roboswitch(pdev)) {
17596 			tg3_flag_set(tp, USE_PHYLIB);
17597 			tg3_flag_set(tp, ROBOSWITCH);
17598 		}
17599 		if (ssb_gige_is_rgmii(pdev))
17600 			tg3_flag_set(tp, RGMII_MODE);
17601 	}
17602 
17603 	/* The word/byte swap controls here control register access byte
17604 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17605 	 * setting below.
17606 	 */
17607 	tp->misc_host_ctrl =
17608 		MISC_HOST_CTRL_MASK_PCI_INT |
17609 		MISC_HOST_CTRL_WORD_SWAP |
17610 		MISC_HOST_CTRL_INDIR_ACCESS |
17611 		MISC_HOST_CTRL_PCISTATE_RW;
17612 
17613 	/* The NONFRM (non-frame) byte/word swap controls take effect
17614 	 * on descriptor entries, anything which isn't packet data.
17615 	 *
17616 	 * The StrongARM chips on the board (one for tx, one for rx)
17617 	 * are running in big-endian mode.
17618 	 */
17619 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17620 			GRC_MODE_WSWAP_NONFRM_DATA);
17621 #ifdef __BIG_ENDIAN
17622 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17623 #endif
17624 	spin_lock_init(&tp->lock);
17625 	spin_lock_init(&tp->indirect_lock);
17626 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17627 
17628 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17629 	if (!tp->regs) {
17630 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17631 		err = -ENOMEM;
17632 		goto err_out_free_dev;
17633 	}
17634 
17635 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17636 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17637 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17638 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17639 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17640 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17641 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17642 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17643 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17644 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17645 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17646 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17647 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17648 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17649 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17650 		tg3_flag_set(tp, ENABLE_APE);
17651 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17652 		if (!tp->aperegs) {
17653 			dev_err(&pdev->dev,
17654 				"Cannot map APE registers, aborting\n");
17655 			err = -ENOMEM;
17656 			goto err_out_iounmap;
17657 		}
17658 	}
17659 
17660 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17661 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17662 
17663 	dev->ethtool_ops = &tg3_ethtool_ops;
17664 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17665 	dev->netdev_ops = &tg3_netdev_ops;
17666 	dev->irq = pdev->irq;
17667 
17668 	err = tg3_get_invariants(tp, ent);
17669 	if (err) {
17670 		dev_err(&pdev->dev,
17671 			"Problem fetching invariants of chip, aborting\n");
17672 		goto err_out_apeunmap;
17673 	}
17674 
17675 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17676 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17677 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17678 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17679 	 * do DMA address check in tg3_start_xmit().
17680 	 */
17681 	if (tg3_flag(tp, IS_5788))
17682 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17683 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17684 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17685 #ifdef CONFIG_HIGHMEM
17686 		dma_mask = DMA_BIT_MASK(64);
17687 #endif
17688 	} else
17689 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17690 
17691 	/* Configure DMA attributes. */
17692 	if (dma_mask > DMA_BIT_MASK(32)) {
17693 		err = dma_set_mask(&pdev->dev, dma_mask);
17694 		if (!err) {
17695 			features |= NETIF_F_HIGHDMA;
17696 			err = dma_set_coherent_mask(&pdev->dev,
17697 						    persist_dma_mask);
17698 			if (err < 0) {
17699 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17700 					"DMA for consistent allocations\n");
17701 				goto err_out_apeunmap;
17702 			}
17703 		}
17704 	}
17705 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17706 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17707 		if (err) {
17708 			dev_err(&pdev->dev,
17709 				"No usable DMA configuration, aborting\n");
17710 			goto err_out_apeunmap;
17711 		}
17712 	}
17713 
17714 	tg3_init_bufmgr_config(tp);
17715 
17716 	/* 5700 B0 chips do not support checksumming correctly due
17717 	 * to hardware bugs.
17718 	 */
17719 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17720 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17721 
17722 		if (tg3_flag(tp, 5755_PLUS))
17723 			features |= NETIF_F_IPV6_CSUM;
17724 	}
17725 
17726 	/* TSO is on by default on chips that support hardware TSO.
17727 	 * Firmware TSO on older chips gives lower performance, so it
17728 	 * is off by default, but can be enabled using ethtool.
17729 	 */
17730 	if ((tg3_flag(tp, HW_TSO_1) ||
17731 	     tg3_flag(tp, HW_TSO_2) ||
17732 	     tg3_flag(tp, HW_TSO_3)) &&
17733 	    (features & NETIF_F_IP_CSUM))
17734 		features |= NETIF_F_TSO;
17735 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17736 		if (features & NETIF_F_IPV6_CSUM)
17737 			features |= NETIF_F_TSO6;
17738 		if (tg3_flag(tp, HW_TSO_3) ||
17739 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17740 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17741 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17742 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17743 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17744 			features |= NETIF_F_TSO_ECN;
17745 	}
17746 
17747 	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17748 			 NETIF_F_HW_VLAN_CTAG_RX;
17749 	dev->vlan_features |= features;
17750 
17751 	/*
17752 	 * Add loopback capability only for a subset of devices that support
17753 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17754 	 * loopback for the remaining devices.
17755 	 */
17756 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17757 	    !tg3_flag(tp, CPMU_PRESENT))
17758 		/* Add the loopback capability */
17759 		features |= NETIF_F_LOOPBACK;
17760 
17761 	dev->hw_features |= features;
17762 	dev->priv_flags |= IFF_UNICAST_FLT;
17763 
17764 	/* MTU range: 60 - 9000 or 1500, depending on hardware */
17765 	dev->min_mtu = TG3_MIN_MTU;
17766 	dev->max_mtu = TG3_MAX_MTU(tp);
17767 
17768 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17769 	    !tg3_flag(tp, TSO_CAPABLE) &&
17770 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17771 		tg3_flag_set(tp, MAX_RXPEND_64);
17772 		tp->rx_pending = 63;
17773 	}
17774 
17775 	err = tg3_get_device_address(tp, addr);
17776 	if (err) {
17777 		dev_err(&pdev->dev,
17778 			"Could not obtain valid ethernet address, aborting\n");
17779 		goto err_out_apeunmap;
17780 	}
17781 	eth_hw_addr_set(dev, addr);
17782 
17783 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17784 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17785 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17786 	for (i = 0; i < tp->irq_max; i++) {
17787 		struct tg3_napi *tnapi = &tp->napi[i];
17788 
17789 		tnapi->tp = tp;
17790 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17791 
17792 		tnapi->int_mbox = intmbx;
17793 		if (i <= 4)
17794 			intmbx += 0x8;
17795 		else
17796 			intmbx += 0x4;
17797 
17798 		tnapi->consmbox = rcvmbx;
17799 		tnapi->prodmbox = sndmbx;
17800 
17801 		if (i)
17802 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17803 		else
17804 			tnapi->coal_now = HOSTCC_MODE_NOW;
17805 
17806 		if (!tg3_flag(tp, SUPPORT_MSIX))
17807 			break;
17808 
17809 		/*
17810 		 * If we support MSIX, we'll be using RSS.  If we're using
17811 		 * RSS, the first vector only handles link interrupts and the
17812 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17813 		 * mailbox values for the next iteration.  The values we setup
17814 		 * above are still useful for the single vectored mode.
17815 		 */
17816 		if (!i)
17817 			continue;
17818 
17819 		rcvmbx += 0x8;
17820 
17821 		if (sndmbx & 0x4)
17822 			sndmbx -= 0x4;
17823 		else
17824 			sndmbx += 0xc;
17825 	}
17826 
17827 	/*
17828 	 * Reset chip in case UNDI or EFI driver did not shutdown
17829 	 * DMA self test will enable WDMAC and we'll see (spurious)
17830 	 * pending DMA on the PCI bus at that point.
17831 	 */
17832 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17833 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17834 		tg3_full_lock(tp, 0);
17835 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17836 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17837 		tg3_full_unlock(tp);
17838 	}
17839 
17840 	err = tg3_test_dma(tp);
17841 	if (err) {
17842 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17843 		goto err_out_apeunmap;
17844 	}
17845 
17846 	tg3_init_coal(tp);
17847 
17848 	pci_set_drvdata(pdev, dev);
17849 
17850 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17851 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17852 	    tg3_asic_rev(tp) == ASIC_REV_5762)
17853 		tg3_flag_set(tp, PTP_CAPABLE);
17854 
17855 	tg3_timer_init(tp);
17856 
17857 	tg3_carrier_off(tp);
17858 
17859 	err = register_netdev(dev);
17860 	if (err) {
17861 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17862 		goto err_out_apeunmap;
17863 	}
17864 
17865 	if (tg3_flag(tp, PTP_CAPABLE)) {
17866 		tg3_ptp_init(tp);
17867 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17868 						   &tp->pdev->dev);
17869 		if (IS_ERR(tp->ptp_clock))
17870 			tp->ptp_clock = NULL;
17871 	}
17872 
17873 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17874 		    tp->board_part_number,
17875 		    tg3_chip_rev_id(tp),
17876 		    tg3_bus_string(tp, str),
17877 		    dev->dev_addr);
17878 
17879 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17880 		char *ethtype;
17881 
17882 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17883 			ethtype = "10/100Base-TX";
17884 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17885 			ethtype = "1000Base-SX";
17886 		else
17887 			ethtype = "10/100/1000Base-T";
17888 
17889 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17890 			    "(WireSpeed[%d], EEE[%d])\n",
17891 			    tg3_phy_string(tp), ethtype,
17892 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17893 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17894 	}
17895 
17896 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17897 		    (dev->features & NETIF_F_RXCSUM) != 0,
17898 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
17899 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17900 		    tg3_flag(tp, ENABLE_ASF) != 0,
17901 		    tg3_flag(tp, TSO_CAPABLE) != 0);
17902 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17903 		    tp->dma_rwctrl,
17904 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17905 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17906 
17907 	pci_save_state(pdev);
17908 
17909 	return 0;
17910 
17911 err_out_apeunmap:
17912 	if (tp->aperegs) {
17913 		iounmap(tp->aperegs);
17914 		tp->aperegs = NULL;
17915 	}
17916 
17917 err_out_iounmap:
17918 	if (tp->regs) {
17919 		iounmap(tp->regs);
17920 		tp->regs = NULL;
17921 	}
17922 
17923 err_out_free_dev:
17924 	free_netdev(dev);
17925 
17926 err_out_free_res:
17927 	pci_release_regions(pdev);
17928 
17929 err_out_disable_pdev:
17930 	if (pci_is_enabled(pdev))
17931 		pci_disable_device(pdev);
17932 	return err;
17933 }
17934 
17935 static void tg3_remove_one(struct pci_dev *pdev)
17936 {
17937 	struct net_device *dev = pci_get_drvdata(pdev);
17938 
17939 	if (dev) {
17940 		struct tg3 *tp = netdev_priv(dev);
17941 
17942 		tg3_ptp_fini(tp);
17943 
17944 		release_firmware(tp->fw);
17945 
17946 		tg3_reset_task_cancel(tp);
17947 
17948 		if (tg3_flag(tp, USE_PHYLIB)) {
17949 			tg3_phy_fini(tp);
17950 			tg3_mdio_fini(tp);
17951 		}
17952 
17953 		unregister_netdev(dev);
17954 		if (tp->aperegs) {
17955 			iounmap(tp->aperegs);
17956 			tp->aperegs = NULL;
17957 		}
17958 		if (tp->regs) {
17959 			iounmap(tp->regs);
17960 			tp->regs = NULL;
17961 		}
17962 		free_netdev(dev);
17963 		pci_release_regions(pdev);
17964 		pci_disable_device(pdev);
17965 	}
17966 }
17967 
17968 #ifdef CONFIG_PM_SLEEP
17969 static int tg3_suspend(struct device *device)
17970 {
17971 	struct net_device *dev = dev_get_drvdata(device);
17972 	struct tg3 *tp = netdev_priv(dev);
17973 	int err = 0;
17974 
17975 	rtnl_lock();
17976 
17977 	if (!netif_running(dev))
17978 		goto unlock;
17979 
17980 	tg3_reset_task_cancel(tp);
17981 	tg3_phy_stop(tp);
17982 	tg3_netif_stop(tp);
17983 
17984 	tg3_timer_stop(tp);
17985 
17986 	tg3_full_lock(tp, 1);
17987 	tg3_disable_ints(tp);
17988 	tg3_full_unlock(tp);
17989 
17990 	netif_device_detach(dev);
17991 
17992 	tg3_full_lock(tp, 0);
17993 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17994 	tg3_flag_clear(tp, INIT_COMPLETE);
17995 	tg3_full_unlock(tp);
17996 
17997 	err = tg3_power_down_prepare(tp);
17998 	if (err) {
17999 		int err2;
18000 
18001 		tg3_full_lock(tp, 0);
18002 
18003 		tg3_flag_set(tp, INIT_COMPLETE);
18004 		err2 = tg3_restart_hw(tp, true);
18005 		if (err2)
18006 			goto out;
18007 
18008 		tg3_timer_start(tp);
18009 
18010 		netif_device_attach(dev);
18011 		tg3_netif_start(tp);
18012 
18013 out:
18014 		tg3_full_unlock(tp);
18015 
18016 		if (!err2)
18017 			tg3_phy_start(tp);
18018 	}
18019 
18020 unlock:
18021 	rtnl_unlock();
18022 	return err;
18023 }
18024 
18025 static int tg3_resume(struct device *device)
18026 {
18027 	struct net_device *dev = dev_get_drvdata(device);
18028 	struct tg3 *tp = netdev_priv(dev);
18029 	int err = 0;
18030 
18031 	rtnl_lock();
18032 
18033 	if (!netif_running(dev))
18034 		goto unlock;
18035 
18036 	netif_device_attach(dev);
18037 
18038 	tg3_full_lock(tp, 0);
18039 
18040 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18041 
18042 	tg3_flag_set(tp, INIT_COMPLETE);
18043 	err = tg3_restart_hw(tp,
18044 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18045 	if (err)
18046 		goto out;
18047 
18048 	tg3_timer_start(tp);
18049 
18050 	tg3_netif_start(tp);
18051 
18052 out:
18053 	tg3_full_unlock(tp);
18054 
18055 	if (!err)
18056 		tg3_phy_start(tp);
18057 
18058 unlock:
18059 	rtnl_unlock();
18060 	return err;
18061 }
18062 #endif /* CONFIG_PM_SLEEP */
18063 
18064 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18065 
18066 static void tg3_shutdown(struct pci_dev *pdev)
18067 {
18068 	struct net_device *dev = pci_get_drvdata(pdev);
18069 	struct tg3 *tp = netdev_priv(dev);
18070 
18071 	tg3_reset_task_cancel(tp);
18072 
18073 	rtnl_lock();
18074 
18075 	netif_device_detach(dev);
18076 
18077 	if (netif_running(dev))
18078 		dev_close(dev);
18079 
18080 	tg3_power_down(tp);
18081 
18082 	rtnl_unlock();
18083 
18084 	pci_disable_device(pdev);
18085 }
18086 
18087 /**
18088  * tg3_io_error_detected - called when PCI error is detected
18089  * @pdev: Pointer to PCI device
18090  * @state: The current pci connection state
18091  *
18092  * This function is called after a PCI bus error affecting
18093  * this device has been detected.
18094  */
18095 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18096 					      pci_channel_state_t state)
18097 {
18098 	struct net_device *netdev = pci_get_drvdata(pdev);
18099 	struct tg3 *tp = netdev_priv(netdev);
18100 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18101 
18102 	netdev_info(netdev, "PCI I/O error detected\n");
18103 
18104 	rtnl_lock();
18105 
18106 	/* Could be second call or maybe we don't have netdev yet */
18107 	if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18108 		goto done;
18109 
18110 	/* We needn't recover from permanent error */
18111 	if (state == pci_channel_io_frozen)
18112 		tp->pcierr_recovery = true;
18113 
18114 	tg3_phy_stop(tp);
18115 
18116 	tg3_netif_stop(tp);
18117 
18118 	tg3_timer_stop(tp);
18119 
18120 	/* Want to make sure that the reset task doesn't run */
18121 	tg3_reset_task_cancel(tp);
18122 
18123 	netif_device_detach(netdev);
18124 
18125 	/* Clean up software state, even if MMIO is blocked */
18126 	tg3_full_lock(tp, 0);
18127 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18128 	tg3_full_unlock(tp);
18129 
18130 done:
18131 	if (state == pci_channel_io_perm_failure) {
18132 		if (netdev) {
18133 			tg3_napi_enable(tp);
18134 			dev_close(netdev);
18135 		}
18136 		err = PCI_ERS_RESULT_DISCONNECT;
18137 	} else {
18138 		pci_disable_device(pdev);
18139 	}
18140 
18141 	rtnl_unlock();
18142 
18143 	return err;
18144 }
18145 
18146 /**
18147  * tg3_io_slot_reset - called after the pci bus has been reset.
18148  * @pdev: Pointer to PCI device
18149  *
18150  * Restart the card from scratch, as if from a cold-boot.
18151  * At this point, the card has exprienced a hard reset,
18152  * followed by fixups by BIOS, and has its config space
18153  * set up identically to what it was at cold boot.
18154  */
18155 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18156 {
18157 	struct net_device *netdev = pci_get_drvdata(pdev);
18158 	struct tg3 *tp = netdev_priv(netdev);
18159 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18160 	int err;
18161 
18162 	rtnl_lock();
18163 
18164 	if (pci_enable_device(pdev)) {
18165 		dev_err(&pdev->dev,
18166 			"Cannot re-enable PCI device after reset.\n");
18167 		goto done;
18168 	}
18169 
18170 	pci_set_master(pdev);
18171 	pci_restore_state(pdev);
18172 	pci_save_state(pdev);
18173 
18174 	if (!netdev || !netif_running(netdev)) {
18175 		rc = PCI_ERS_RESULT_RECOVERED;
18176 		goto done;
18177 	}
18178 
18179 	err = tg3_power_up(tp);
18180 	if (err)
18181 		goto done;
18182 
18183 	rc = PCI_ERS_RESULT_RECOVERED;
18184 
18185 done:
18186 	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18187 		tg3_napi_enable(tp);
18188 		dev_close(netdev);
18189 	}
18190 	rtnl_unlock();
18191 
18192 	return rc;
18193 }
18194 
18195 /**
18196  * tg3_io_resume - called when traffic can start flowing again.
18197  * @pdev: Pointer to PCI device
18198  *
18199  * This callback is called when the error recovery driver tells
18200  * us that its OK to resume normal operation.
18201  */
18202 static void tg3_io_resume(struct pci_dev *pdev)
18203 {
18204 	struct net_device *netdev = pci_get_drvdata(pdev);
18205 	struct tg3 *tp = netdev_priv(netdev);
18206 	int err;
18207 
18208 	rtnl_lock();
18209 
18210 	if (!netdev || !netif_running(netdev))
18211 		goto done;
18212 
18213 	tg3_full_lock(tp, 0);
18214 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18215 	tg3_flag_set(tp, INIT_COMPLETE);
18216 	err = tg3_restart_hw(tp, true);
18217 	if (err) {
18218 		tg3_full_unlock(tp);
18219 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
18220 		goto done;
18221 	}
18222 
18223 	netif_device_attach(netdev);
18224 
18225 	tg3_timer_start(tp);
18226 
18227 	tg3_netif_start(tp);
18228 
18229 	tg3_full_unlock(tp);
18230 
18231 	tg3_phy_start(tp);
18232 
18233 done:
18234 	tp->pcierr_recovery = false;
18235 	rtnl_unlock();
18236 }
18237 
18238 static const struct pci_error_handlers tg3_err_handler = {
18239 	.error_detected	= tg3_io_error_detected,
18240 	.slot_reset	= tg3_io_slot_reset,
18241 	.resume		= tg3_io_resume
18242 };
18243 
18244 static struct pci_driver tg3_driver = {
18245 	.name		= DRV_MODULE_NAME,
18246 	.id_table	= tg3_pci_tbl,
18247 	.probe		= tg3_init_one,
18248 	.remove		= tg3_remove_one,
18249 	.err_handler	= &tg3_err_handler,
18250 	.driver.pm	= &tg3_pm_ops,
18251 	.shutdown	= tg3_shutdown,
18252 };
18253 
18254 module_pci_driver(tg3_driver);
18255