xref: /openbmc/linux/drivers/net/ethernet/broadcom/tg3.c (revision 360823a09426347ea8f232b0b0b5156d0aed0302)
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *	Derived from proprietary unpublished source code,
14  *	Copyright (C) 2000-2016 Broadcom Corporation.
15  *	Copyright (C) 2016-2017 Broadcom Ltd.
16  *	Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *	refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *	Permission is hereby granted for the distribution of this firmware
20  *	data in hexadecimal or equivalent format, provided this copyright
21  *	notice is accompanying it.
22  */
23 
24 
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58 #include <linux/dmi.h>
59 
60 #include <net/checksum.h>
61 #include <net/gso.h>
62 #include <net/ip.h>
63 
64 #include <linux/io.h>
65 #include <asm/byteorder.h>
66 #include <linux/uaccess.h>
67 
68 #include <uapi/linux/net_tstamp.h>
69 #include <linux/ptp_clock_kernel.h>
70 
71 #define BAR_0	0
72 #define BAR_2	2
73 
74 #include "tg3.h"
75 
76 /* Functions & macros to verify TG3_FLAGS types */
77 
_tg3_flag(enum TG3_FLAGS flag,unsigned long * bits)78 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 	return test_bit(flag, bits);
81 }
82 
_tg3_flag_set(enum TG3_FLAGS flag,unsigned long * bits)83 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 	set_bit(flag, bits);
86 }
87 
_tg3_flag_clear(enum TG3_FLAGS flag,unsigned long * bits)88 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
89 {
90 	clear_bit(flag, bits);
91 }
92 
93 #define tg3_flag(tp, flag)				\
94 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_set(tp, flag)				\
96 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
97 #define tg3_flag_clear(tp, flag)			\
98 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
99 
100 #define DRV_MODULE_NAME		"tg3"
101 /* DO NOT UPDATE TG3_*_NUM defines */
102 #define TG3_MAJ_NUM			3
103 #define TG3_MIN_NUM			137
104 
105 #define RESET_KIND_SHUTDOWN	0
106 #define RESET_KIND_INIT		1
107 #define RESET_KIND_SUSPEND	2
108 
109 #define TG3_DEF_RX_MODE		0
110 #define TG3_DEF_TX_MODE		0
111 #define TG3_DEF_MSG_ENABLE	  \
112 	(NETIF_MSG_DRV		| \
113 	 NETIF_MSG_PROBE	| \
114 	 NETIF_MSG_LINK		| \
115 	 NETIF_MSG_TIMER	| \
116 	 NETIF_MSG_IFDOWN	| \
117 	 NETIF_MSG_IFUP		| \
118 	 NETIF_MSG_RX_ERR	| \
119 	 NETIF_MSG_TX_ERR)
120 
121 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
122 
123 /* length of time before we decide the hardware is borked,
124  * and dev->tx_timeout() should be called to fix the problem
125  */
126 
127 #define TG3_TX_TIMEOUT			(5 * HZ)
128 
129 /* hardware minimum and maximum for a single frame's data payload */
130 #define TG3_MIN_MTU			ETH_ZLEN
131 #define TG3_MAX_MTU(tp)	\
132 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
133 
134 /* These numbers seem to be hard coded in the NIC firmware somehow.
135  * You can't change the ring sizes, but you can change where you place
136  * them in the NIC onboard memory.
137  */
138 #define TG3_RX_STD_RING_SIZE(tp) \
139 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
141 #define TG3_DEF_RX_RING_PENDING		200
142 #define TG3_RX_JMB_RING_SIZE(tp) \
143 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
145 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
146 
147 /* Do not place this n-ring entries value into the tp struct itself,
148  * we really want to expose these constants to GCC so that modulo et
149  * al.  operations are done with shifts and masks instead of with
150  * hw multiply/modulo instructions.  Another solution would be to
151  * replace things like '% foo' with '& (foo - 1)'.
152  */
153 
154 #define TG3_TX_RING_SIZE		512
155 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
156 
157 #define TG3_RX_STD_RING_BYTES(tp) \
158 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
159 #define TG3_RX_JMB_RING_BYTES(tp) \
160 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
161 #define TG3_RX_RCB_RING_BYTES(tp) \
162 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
163 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
164 				 TG3_TX_RING_SIZE)
165 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
166 
167 #define TG3_DMA_BYTE_ENAB		64
168 
169 #define TG3_RX_STD_DMA_SZ		1536
170 #define TG3_RX_JMB_DMA_SZ		9046
171 
172 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
173 
174 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
175 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
176 
177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
178 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
179 
180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
181 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
182 
183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
184  * that are at least dword aligned when used in PCIX mode.  The driver
185  * works around this bug by double copying the packet.  This workaround
186  * is built into the normal double copy length check for efficiency.
187  *
188  * However, the double copy is only necessary on those architectures
189  * where unaligned memory accesses are inefficient.  For those architectures
190  * where unaligned memory accesses incur little penalty, we can reintegrate
191  * the 5701 in the normal rx path.  Doing so saves a device structure
192  * dereference by hardcoding the double copy threshold in place.
193  */
194 #define TG3_RX_COPY_THRESHOLD		256
195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
196 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
197 #else
198 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
199 #endif
200 
201 #if (NET_IP_ALIGN != 0)
202 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
203 #else
204 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
205 #endif
206 
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K		2048
210 #define TG3_TX_BD_DMA_MAX_4K		4096
211 
212 #define TG3_RAW_IP_ALIGN 2
213 
214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
216 
217 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
218 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
219 
220 #define FIRMWARE_TG3		"tigon/tg3.bin"
221 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
222 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
223 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
224 
225 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
226 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
227 MODULE_LICENSE("GPL");
228 MODULE_FIRMWARE(FIRMWARE_TG3);
229 MODULE_FIRMWARE(FIRMWARE_TG357766);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
232 
233 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
234 module_param(tg3_debug, int, 0);
235 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
236 
237 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
238 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
239 
240 static const struct pci_device_id tg3_pci_tbl[] = {
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
257 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
258 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
260 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 			TG3_DRV_DATA_FLAG_5705_10_100},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
263 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 			TG3_DRV_DATA_FLAG_5705_10_100},
265 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
267 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
268 			TG3_DRV_DATA_FLAG_5705_10_100},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
272 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
275 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
278 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
281 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
286 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
287 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
288 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
289 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
290 			PCI_VENDOR_ID_LENOVO,
291 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
292 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
295 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
311 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
312 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
313 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
314 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
316 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
319 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
321 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
322 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
323 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
326 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
330 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
332 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
333 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
335 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
347 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
348 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
349 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
350 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
351 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
352 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
353 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
354 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
355 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356 	{}
357 };
358 
359 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
360 
361 static const struct {
362 	const char string[ETH_GSTRING_LEN];
363 } ethtool_stats_keys[] = {
364 	{ "rx_octets" },
365 	{ "rx_fragments" },
366 	{ "rx_ucast_packets" },
367 	{ "rx_mcast_packets" },
368 	{ "rx_bcast_packets" },
369 	{ "rx_fcs_errors" },
370 	{ "rx_align_errors" },
371 	{ "rx_xon_pause_rcvd" },
372 	{ "rx_xoff_pause_rcvd" },
373 	{ "rx_mac_ctrl_rcvd" },
374 	{ "rx_xoff_entered" },
375 	{ "rx_frame_too_long_errors" },
376 	{ "rx_jabbers" },
377 	{ "rx_undersize_packets" },
378 	{ "rx_in_length_errors" },
379 	{ "rx_out_length_errors" },
380 	{ "rx_64_or_less_octet_packets" },
381 	{ "rx_65_to_127_octet_packets" },
382 	{ "rx_128_to_255_octet_packets" },
383 	{ "rx_256_to_511_octet_packets" },
384 	{ "rx_512_to_1023_octet_packets" },
385 	{ "rx_1024_to_1522_octet_packets" },
386 	{ "rx_1523_to_2047_octet_packets" },
387 	{ "rx_2048_to_4095_octet_packets" },
388 	{ "rx_4096_to_8191_octet_packets" },
389 	{ "rx_8192_to_9022_octet_packets" },
390 
391 	{ "tx_octets" },
392 	{ "tx_collisions" },
393 
394 	{ "tx_xon_sent" },
395 	{ "tx_xoff_sent" },
396 	{ "tx_flow_control" },
397 	{ "tx_mac_errors" },
398 	{ "tx_single_collisions" },
399 	{ "tx_mult_collisions" },
400 	{ "tx_deferred" },
401 	{ "tx_excessive_collisions" },
402 	{ "tx_late_collisions" },
403 	{ "tx_collide_2times" },
404 	{ "tx_collide_3times" },
405 	{ "tx_collide_4times" },
406 	{ "tx_collide_5times" },
407 	{ "tx_collide_6times" },
408 	{ "tx_collide_7times" },
409 	{ "tx_collide_8times" },
410 	{ "tx_collide_9times" },
411 	{ "tx_collide_10times" },
412 	{ "tx_collide_11times" },
413 	{ "tx_collide_12times" },
414 	{ "tx_collide_13times" },
415 	{ "tx_collide_14times" },
416 	{ "tx_collide_15times" },
417 	{ "tx_ucast_packets" },
418 	{ "tx_mcast_packets" },
419 	{ "tx_bcast_packets" },
420 	{ "tx_carrier_sense_errors" },
421 	{ "tx_discards" },
422 	{ "tx_errors" },
423 
424 	{ "dma_writeq_full" },
425 	{ "dma_write_prioq_full" },
426 	{ "rxbds_empty" },
427 	{ "rx_discards" },
428 	{ "rx_errors" },
429 	{ "rx_threshold_hit" },
430 
431 	{ "dma_readq_full" },
432 	{ "dma_read_prioq_full" },
433 	{ "tx_comp_queue_full" },
434 
435 	{ "ring_set_send_prod_index" },
436 	{ "ring_status_update" },
437 	{ "nic_irqs" },
438 	{ "nic_avoided_irqs" },
439 	{ "nic_tx_threshold_hit" },
440 
441 	{ "mbuf_lwm_thresh_hit" },
442 };
443 
444 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
445 #define TG3_NVRAM_TEST		0
446 #define TG3_LINK_TEST		1
447 #define TG3_REGISTER_TEST	2
448 #define TG3_MEMORY_TEST		3
449 #define TG3_MAC_LOOPB_TEST	4
450 #define TG3_PHY_LOOPB_TEST	5
451 #define TG3_EXT_LOOPB_TEST	6
452 #define TG3_INTERRUPT_TEST	7
453 
454 
455 static const struct {
456 	const char string[ETH_GSTRING_LEN];
457 } ethtool_test_keys[] = {
458 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
459 	[TG3_LINK_TEST]		= { "link test         (online) " },
460 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
461 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
462 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
463 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
464 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
465 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
466 };
467 
468 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
469 
470 
tg3_write32(struct tg3 * tp,u32 off,u32 val)471 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
472 {
473 	writel(val, tp->regs + off);
474 }
475 
tg3_read32(struct tg3 * tp,u32 off)476 static u32 tg3_read32(struct tg3 *tp, u32 off)
477 {
478 	return readl(tp->regs + off);
479 }
480 
tg3_ape_write32(struct tg3 * tp,u32 off,u32 val)481 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
482 {
483 	writel(val, tp->aperegs + off);
484 }
485 
tg3_ape_read32(struct tg3 * tp,u32 off)486 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
487 {
488 	return readl(tp->aperegs + off);
489 }
490 
tg3_write_indirect_reg32(struct tg3 * tp,u32 off,u32 val)491 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
492 {
493 	unsigned long flags;
494 
495 	spin_lock_irqsave(&tp->indirect_lock, flags);
496 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
497 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
498 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
499 }
500 
tg3_write_flush_reg32(struct tg3 * tp,u32 off,u32 val)501 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
502 {
503 	writel(val, tp->regs + off);
504 	readl(tp->regs + off);
505 }
506 
tg3_read_indirect_reg32(struct tg3 * tp,u32 off)507 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
508 {
509 	unsigned long flags;
510 	u32 val;
511 
512 	spin_lock_irqsave(&tp->indirect_lock, flags);
513 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
514 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
515 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 	return val;
517 }
518 
tg3_write_indirect_mbox(struct tg3 * tp,u32 off,u32 val)519 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
520 {
521 	unsigned long flags;
522 
523 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
524 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
525 				       TG3_64BIT_REG_LOW, val);
526 		return;
527 	}
528 	if (off == TG3_RX_STD_PROD_IDX_REG) {
529 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
530 				       TG3_64BIT_REG_LOW, val);
531 		return;
532 	}
533 
534 	spin_lock_irqsave(&tp->indirect_lock, flags);
535 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
536 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
537 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
538 
539 	/* In indirect mode when disabling interrupts, we also need
540 	 * to clear the interrupt bit in the GRC local ctrl register.
541 	 */
542 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
543 	    (val == 0x1)) {
544 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
545 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
546 	}
547 }
548 
tg3_read_indirect_mbox(struct tg3 * tp,u32 off)549 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
550 {
551 	unsigned long flags;
552 	u32 val;
553 
554 	spin_lock_irqsave(&tp->indirect_lock, flags);
555 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
556 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
557 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
558 	return val;
559 }
560 
561 /* usec_wait specifies the wait time in usec when writing to certain registers
562  * where it is unsafe to read back the register without some delay.
563  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
564  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
565  */
_tw32_flush(struct tg3 * tp,u32 off,u32 val,u32 usec_wait)566 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
567 {
568 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
569 		/* Non-posted methods */
570 		tp->write32(tp, off, val);
571 	else {
572 		/* Posted method */
573 		tg3_write32(tp, off, val);
574 		if (usec_wait)
575 			udelay(usec_wait);
576 		tp->read32(tp, off);
577 	}
578 	/* Wait again after the read for the posted method to guarantee that
579 	 * the wait time is met.
580 	 */
581 	if (usec_wait)
582 		udelay(usec_wait);
583 }
584 
tw32_mailbox_flush(struct tg3 * tp,u32 off,u32 val)585 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
586 {
587 	tp->write32_mbox(tp, off, val);
588 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
589 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
590 	     !tg3_flag(tp, ICH_WORKAROUND)))
591 		tp->read32_mbox(tp, off);
592 }
593 
tg3_write32_tx_mbox(struct tg3 * tp,u32 off,u32 val)594 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
595 {
596 	void __iomem *mbox = tp->regs + off;
597 	writel(val, mbox);
598 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
599 		writel(val, mbox);
600 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
601 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
602 		readl(mbox);
603 }
604 
tg3_read32_mbox_5906(struct tg3 * tp,u32 off)605 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
606 {
607 	return readl(tp->regs + off + GRCMBOX_BASE);
608 }
609 
tg3_write32_mbox_5906(struct tg3 * tp,u32 off,u32 val)610 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
611 {
612 	writel(val, tp->regs + off + GRCMBOX_BASE);
613 }
614 
615 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
616 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
617 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
618 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
619 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
620 
621 #define tw32(reg, val)			tp->write32(tp, reg, val)
622 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
623 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
624 #define tr32(reg)			tp->read32(tp, reg)
625 
tg3_write_mem(struct tg3 * tp,u32 off,u32 val)626 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
627 {
628 	unsigned long flags;
629 
630 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
631 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
632 		return;
633 
634 	spin_lock_irqsave(&tp->indirect_lock, flags);
635 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
636 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
637 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
638 
639 		/* Always leave this as zero. */
640 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
641 	} else {
642 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
643 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
644 
645 		/* Always leave this as zero. */
646 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
647 	}
648 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
649 }
650 
tg3_read_mem(struct tg3 * tp,u32 off,u32 * val)651 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
652 {
653 	unsigned long flags;
654 
655 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
656 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
657 		*val = 0;
658 		return;
659 	}
660 
661 	spin_lock_irqsave(&tp->indirect_lock, flags);
662 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
663 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
664 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
665 
666 		/* Always leave this as zero. */
667 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
668 	} else {
669 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
670 		*val = tr32(TG3PCI_MEM_WIN_DATA);
671 
672 		/* Always leave this as zero. */
673 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
674 	}
675 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
676 }
677 
tg3_ape_lock_init(struct tg3 * tp)678 static void tg3_ape_lock_init(struct tg3 *tp)
679 {
680 	int i;
681 	u32 regbase, bit;
682 
683 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
684 		regbase = TG3_APE_LOCK_GRANT;
685 	else
686 		regbase = TG3_APE_PER_LOCK_GRANT;
687 
688 	/* Make sure the driver hasn't any stale locks. */
689 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
690 		switch (i) {
691 		case TG3_APE_LOCK_PHY0:
692 		case TG3_APE_LOCK_PHY1:
693 		case TG3_APE_LOCK_PHY2:
694 		case TG3_APE_LOCK_PHY3:
695 			bit = APE_LOCK_GRANT_DRIVER;
696 			break;
697 		default:
698 			if (!tp->pci_fn)
699 				bit = APE_LOCK_GRANT_DRIVER;
700 			else
701 				bit = 1 << tp->pci_fn;
702 		}
703 		tg3_ape_write32(tp, regbase + 4 * i, bit);
704 	}
705 
706 }
707 
tg3_ape_lock(struct tg3 * tp,int locknum)708 static int tg3_ape_lock(struct tg3 *tp, int locknum)
709 {
710 	int i, off;
711 	int ret = 0;
712 	u32 status, req, gnt, bit;
713 
714 	if (!tg3_flag(tp, ENABLE_APE))
715 		return 0;
716 
717 	switch (locknum) {
718 	case TG3_APE_LOCK_GPIO:
719 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
720 			return 0;
721 		fallthrough;
722 	case TG3_APE_LOCK_GRC:
723 	case TG3_APE_LOCK_MEM:
724 		if (!tp->pci_fn)
725 			bit = APE_LOCK_REQ_DRIVER;
726 		else
727 			bit = 1 << tp->pci_fn;
728 		break;
729 	case TG3_APE_LOCK_PHY0:
730 	case TG3_APE_LOCK_PHY1:
731 	case TG3_APE_LOCK_PHY2:
732 	case TG3_APE_LOCK_PHY3:
733 		bit = APE_LOCK_REQ_DRIVER;
734 		break;
735 	default:
736 		return -EINVAL;
737 	}
738 
739 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
740 		req = TG3_APE_LOCK_REQ;
741 		gnt = TG3_APE_LOCK_GRANT;
742 	} else {
743 		req = TG3_APE_PER_LOCK_REQ;
744 		gnt = TG3_APE_PER_LOCK_GRANT;
745 	}
746 
747 	off = 4 * locknum;
748 
749 	tg3_ape_write32(tp, req + off, bit);
750 
751 	/* Wait for up to 1 millisecond to acquire lock. */
752 	for (i = 0; i < 100; i++) {
753 		status = tg3_ape_read32(tp, gnt + off);
754 		if (status == bit)
755 			break;
756 		if (pci_channel_offline(tp->pdev))
757 			break;
758 
759 		udelay(10);
760 	}
761 
762 	if (status != bit) {
763 		/* Revoke the lock request. */
764 		tg3_ape_write32(tp, gnt + off, bit);
765 		ret = -EBUSY;
766 	}
767 
768 	return ret;
769 }
770 
tg3_ape_unlock(struct tg3 * tp,int locknum)771 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
772 {
773 	u32 gnt, bit;
774 
775 	if (!tg3_flag(tp, ENABLE_APE))
776 		return;
777 
778 	switch (locknum) {
779 	case TG3_APE_LOCK_GPIO:
780 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
781 			return;
782 		fallthrough;
783 	case TG3_APE_LOCK_GRC:
784 	case TG3_APE_LOCK_MEM:
785 		if (!tp->pci_fn)
786 			bit = APE_LOCK_GRANT_DRIVER;
787 		else
788 			bit = 1 << tp->pci_fn;
789 		break;
790 	case TG3_APE_LOCK_PHY0:
791 	case TG3_APE_LOCK_PHY1:
792 	case TG3_APE_LOCK_PHY2:
793 	case TG3_APE_LOCK_PHY3:
794 		bit = APE_LOCK_GRANT_DRIVER;
795 		break;
796 	default:
797 		return;
798 	}
799 
800 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
801 		gnt = TG3_APE_LOCK_GRANT;
802 	else
803 		gnt = TG3_APE_PER_LOCK_GRANT;
804 
805 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
806 }
807 
tg3_ape_event_lock(struct tg3 * tp,u32 timeout_us)808 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
809 {
810 	u32 apedata;
811 
812 	while (timeout_us) {
813 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
814 			return -EBUSY;
815 
816 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
817 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
818 			break;
819 
820 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
821 
822 		udelay(10);
823 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
824 	}
825 
826 	return timeout_us ? 0 : -EBUSY;
827 }
828 
829 #ifdef CONFIG_TIGON3_HWMON
tg3_ape_wait_for_event(struct tg3 * tp,u32 timeout_us)830 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
831 {
832 	u32 i, apedata;
833 
834 	for (i = 0; i < timeout_us / 10; i++) {
835 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
836 
837 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
838 			break;
839 
840 		udelay(10);
841 	}
842 
843 	return i == timeout_us / 10;
844 }
845 
tg3_ape_scratchpad_read(struct tg3 * tp,u32 * data,u32 base_off,u32 len)846 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
847 				   u32 len)
848 {
849 	int err;
850 	u32 i, bufoff, msgoff, maxlen, apedata;
851 
852 	if (!tg3_flag(tp, APE_HAS_NCSI))
853 		return 0;
854 
855 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
856 	if (apedata != APE_SEG_SIG_MAGIC)
857 		return -ENODEV;
858 
859 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
860 	if (!(apedata & APE_FW_STATUS_READY))
861 		return -EAGAIN;
862 
863 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
864 		 TG3_APE_SHMEM_BASE;
865 	msgoff = bufoff + 2 * sizeof(u32);
866 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
867 
868 	while (len) {
869 		u32 length;
870 
871 		/* Cap xfer sizes to scratchpad limits. */
872 		length = (len > maxlen) ? maxlen : len;
873 		len -= length;
874 
875 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
876 		if (!(apedata & APE_FW_STATUS_READY))
877 			return -EAGAIN;
878 
879 		/* Wait for up to 1 msec for APE to service previous event. */
880 		err = tg3_ape_event_lock(tp, 1000);
881 		if (err)
882 			return err;
883 
884 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
885 			  APE_EVENT_STATUS_SCRTCHPD_READ |
886 			  APE_EVENT_STATUS_EVENT_PENDING;
887 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
888 
889 		tg3_ape_write32(tp, bufoff, base_off);
890 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
891 
892 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
893 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
894 
895 		base_off += length;
896 
897 		if (tg3_ape_wait_for_event(tp, 30000))
898 			return -EAGAIN;
899 
900 		for (i = 0; length; i += 4, length -= 4) {
901 			u32 val = tg3_ape_read32(tp, msgoff + i);
902 			memcpy(data, &val, sizeof(u32));
903 			data++;
904 		}
905 	}
906 
907 	return 0;
908 }
909 #endif
910 
tg3_ape_send_event(struct tg3 * tp,u32 event)911 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
912 {
913 	int err;
914 	u32 apedata;
915 
916 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
917 	if (apedata != APE_SEG_SIG_MAGIC)
918 		return -EAGAIN;
919 
920 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
921 	if (!(apedata & APE_FW_STATUS_READY))
922 		return -EAGAIN;
923 
924 	/* Wait for up to 20 millisecond for APE to service previous event. */
925 	err = tg3_ape_event_lock(tp, 20000);
926 	if (err)
927 		return err;
928 
929 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
930 			event | APE_EVENT_STATUS_EVENT_PENDING);
931 
932 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
933 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
934 
935 	return 0;
936 }
937 
tg3_ape_driver_state_change(struct tg3 * tp,int kind)938 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
939 {
940 	u32 event;
941 	u32 apedata;
942 
943 	if (!tg3_flag(tp, ENABLE_APE))
944 		return;
945 
946 	switch (kind) {
947 	case RESET_KIND_INIT:
948 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
949 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
950 				APE_HOST_SEG_SIG_MAGIC);
951 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
952 				APE_HOST_SEG_LEN_MAGIC);
953 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
954 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
955 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
956 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
957 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
958 				APE_HOST_BEHAV_NO_PHYLOCK);
959 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
960 				    TG3_APE_HOST_DRVR_STATE_START);
961 
962 		event = APE_EVENT_STATUS_STATE_START;
963 		break;
964 	case RESET_KIND_SHUTDOWN:
965 		if (device_may_wakeup(&tp->pdev->dev) &&
966 		    tg3_flag(tp, WOL_ENABLE)) {
967 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
968 					    TG3_APE_HOST_WOL_SPEED_AUTO);
969 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
970 		} else
971 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
972 
973 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
974 
975 		event = APE_EVENT_STATUS_STATE_UNLOAD;
976 		break;
977 	default:
978 		return;
979 	}
980 
981 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
982 
983 	tg3_ape_send_event(tp, event);
984 }
985 
tg3_send_ape_heartbeat(struct tg3 * tp,unsigned long interval)986 static void tg3_send_ape_heartbeat(struct tg3 *tp,
987 				   unsigned long interval)
988 {
989 	/* Check if hb interval has exceeded */
990 	if (!tg3_flag(tp, ENABLE_APE) ||
991 	    time_before(jiffies, tp->ape_hb_jiffies + interval))
992 		return;
993 
994 	tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
995 	tp->ape_hb_jiffies = jiffies;
996 }
997 
tg3_disable_ints(struct tg3 * tp)998 static void tg3_disable_ints(struct tg3 *tp)
999 {
1000 	int i;
1001 
1002 	tw32(TG3PCI_MISC_HOST_CTRL,
1003 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1004 	for (i = 0; i < tp->irq_max; i++)
1005 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1006 }
1007 
tg3_enable_ints(struct tg3 * tp)1008 static void tg3_enable_ints(struct tg3 *tp)
1009 {
1010 	int i;
1011 
1012 	tp->irq_sync = 0;
1013 	wmb();
1014 
1015 	tw32(TG3PCI_MISC_HOST_CTRL,
1016 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1017 
1018 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1019 	for (i = 0; i < tp->irq_cnt; i++) {
1020 		struct tg3_napi *tnapi = &tp->napi[i];
1021 
1022 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1023 		if (tg3_flag(tp, 1SHOT_MSI))
1024 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1025 
1026 		tp->coal_now |= tnapi->coal_now;
1027 	}
1028 
1029 	/* Force an initial interrupt */
1030 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1031 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1032 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1033 	else
1034 		tw32(HOSTCC_MODE, tp->coal_now);
1035 
1036 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1037 }
1038 
tg3_has_work(struct tg3_napi * tnapi)1039 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1040 {
1041 	struct tg3 *tp = tnapi->tp;
1042 	struct tg3_hw_status *sblk = tnapi->hw_status;
1043 	unsigned int work_exists = 0;
1044 
1045 	/* check for phy events */
1046 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1047 		if (sblk->status & SD_STATUS_LINK_CHG)
1048 			work_exists = 1;
1049 	}
1050 
1051 	/* check for TX work to do */
1052 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1053 		work_exists = 1;
1054 
1055 	/* check for RX work to do */
1056 	if (tnapi->rx_rcb_prod_idx &&
1057 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1058 		work_exists = 1;
1059 
1060 	return work_exists;
1061 }
1062 
1063 /* tg3_int_reenable
1064  *  similar to tg3_enable_ints, but it accurately determines whether there
1065  *  is new work pending and can return without flushing the PIO write
1066  *  which reenables interrupts
1067  */
tg3_int_reenable(struct tg3_napi * tnapi)1068 static void tg3_int_reenable(struct tg3_napi *tnapi)
1069 {
1070 	struct tg3 *tp = tnapi->tp;
1071 
1072 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1073 
1074 	/* When doing tagged status, this work check is unnecessary.
1075 	 * The last_tag we write above tells the chip which piece of
1076 	 * work we've completed.
1077 	 */
1078 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1079 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1080 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1081 }
1082 
tg3_switch_clocks(struct tg3 * tp)1083 static void tg3_switch_clocks(struct tg3 *tp)
1084 {
1085 	u32 clock_ctrl;
1086 	u32 orig_clock_ctrl;
1087 
1088 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1089 		return;
1090 
1091 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1092 
1093 	orig_clock_ctrl = clock_ctrl;
1094 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1095 		       CLOCK_CTRL_CLKRUN_OENABLE |
1096 		       0x1f);
1097 	tp->pci_clock_ctrl = clock_ctrl;
1098 
1099 	if (tg3_flag(tp, 5705_PLUS)) {
1100 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1101 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1102 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1103 		}
1104 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1105 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1106 			    clock_ctrl |
1107 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1108 			    40);
1109 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1110 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1111 			    40);
1112 	}
1113 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1114 }
1115 
1116 #define PHY_BUSY_LOOPS	5000
1117 
__tg3_readphy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 * val)1118 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1119 			 u32 *val)
1120 {
1121 	u32 frame_val;
1122 	unsigned int loops;
1123 	int ret;
1124 
1125 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1126 		tw32_f(MAC_MI_MODE,
1127 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1128 		udelay(80);
1129 	}
1130 
1131 	tg3_ape_lock(tp, tp->phy_ape_lock);
1132 
1133 	*val = 0x0;
1134 
1135 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1136 		      MI_COM_PHY_ADDR_MASK);
1137 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1138 		      MI_COM_REG_ADDR_MASK);
1139 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1140 
1141 	tw32_f(MAC_MI_COM, frame_val);
1142 
1143 	loops = PHY_BUSY_LOOPS;
1144 	while (loops != 0) {
1145 		udelay(10);
1146 		frame_val = tr32(MAC_MI_COM);
1147 
1148 		if ((frame_val & MI_COM_BUSY) == 0) {
1149 			udelay(5);
1150 			frame_val = tr32(MAC_MI_COM);
1151 			break;
1152 		}
1153 		loops -= 1;
1154 	}
1155 
1156 	ret = -EBUSY;
1157 	if (loops != 0) {
1158 		*val = frame_val & MI_COM_DATA_MASK;
1159 		ret = 0;
1160 	}
1161 
1162 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1163 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1164 		udelay(80);
1165 	}
1166 
1167 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1168 
1169 	return ret;
1170 }
1171 
tg3_readphy(struct tg3 * tp,int reg,u32 * val)1172 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1173 {
1174 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1175 }
1176 
__tg3_writephy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 val)1177 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1178 			  u32 val)
1179 {
1180 	u32 frame_val;
1181 	unsigned int loops;
1182 	int ret;
1183 
1184 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1185 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1186 		return 0;
1187 
1188 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1189 		tw32_f(MAC_MI_MODE,
1190 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1191 		udelay(80);
1192 	}
1193 
1194 	tg3_ape_lock(tp, tp->phy_ape_lock);
1195 
1196 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1197 		      MI_COM_PHY_ADDR_MASK);
1198 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1199 		      MI_COM_REG_ADDR_MASK);
1200 	frame_val |= (val & MI_COM_DATA_MASK);
1201 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1202 
1203 	tw32_f(MAC_MI_COM, frame_val);
1204 
1205 	loops = PHY_BUSY_LOOPS;
1206 	while (loops != 0) {
1207 		udelay(10);
1208 		frame_val = tr32(MAC_MI_COM);
1209 		if ((frame_val & MI_COM_BUSY) == 0) {
1210 			udelay(5);
1211 			frame_val = tr32(MAC_MI_COM);
1212 			break;
1213 		}
1214 		loops -= 1;
1215 	}
1216 
1217 	ret = -EBUSY;
1218 	if (loops != 0)
1219 		ret = 0;
1220 
1221 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1222 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1223 		udelay(80);
1224 	}
1225 
1226 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1227 
1228 	return ret;
1229 }
1230 
tg3_writephy(struct tg3 * tp,int reg,u32 val)1231 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1232 {
1233 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1234 }
1235 
tg3_phy_cl45_write(struct tg3 * tp,u32 devad,u32 addr,u32 val)1236 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1237 {
1238 	int err;
1239 
1240 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1241 	if (err)
1242 		goto done;
1243 
1244 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1245 	if (err)
1246 		goto done;
1247 
1248 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1249 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1250 	if (err)
1251 		goto done;
1252 
1253 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1254 
1255 done:
1256 	return err;
1257 }
1258 
tg3_phy_cl45_read(struct tg3 * tp,u32 devad,u32 addr,u32 * val)1259 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1260 {
1261 	int err;
1262 
1263 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1264 	if (err)
1265 		goto done;
1266 
1267 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1268 	if (err)
1269 		goto done;
1270 
1271 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1272 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1273 	if (err)
1274 		goto done;
1275 
1276 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1277 
1278 done:
1279 	return err;
1280 }
1281 
tg3_phydsp_read(struct tg3 * tp,u32 reg,u32 * val)1282 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1283 {
1284 	int err;
1285 
1286 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1287 	if (!err)
1288 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1289 
1290 	return err;
1291 }
1292 
tg3_phydsp_write(struct tg3 * tp,u32 reg,u32 val)1293 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1294 {
1295 	int err;
1296 
1297 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1298 	if (!err)
1299 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1300 
1301 	return err;
1302 }
1303 
tg3_phy_auxctl_read(struct tg3 * tp,int reg,u32 * val)1304 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1305 {
1306 	int err;
1307 
1308 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1309 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1310 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1311 	if (!err)
1312 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1313 
1314 	return err;
1315 }
1316 
tg3_phy_auxctl_write(struct tg3 * tp,int reg,u32 set)1317 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1318 {
1319 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1320 		set |= MII_TG3_AUXCTL_MISC_WREN;
1321 
1322 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1323 }
1324 
tg3_phy_toggle_auxctl_smdsp(struct tg3 * tp,bool enable)1325 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1326 {
1327 	u32 val;
1328 	int err;
1329 
1330 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1331 
1332 	if (err)
1333 		return err;
1334 
1335 	if (enable)
1336 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1337 	else
1338 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1339 
1340 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1341 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1342 
1343 	return err;
1344 }
1345 
tg3_phy_shdw_write(struct tg3 * tp,int reg,u32 val)1346 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1347 {
1348 	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1349 			    reg | val | MII_TG3_MISC_SHDW_WREN);
1350 }
1351 
tg3_bmcr_reset(struct tg3 * tp)1352 static int tg3_bmcr_reset(struct tg3 *tp)
1353 {
1354 	u32 phy_control;
1355 	int limit, err;
1356 
1357 	/* OK, reset it, and poll the BMCR_RESET bit until it
1358 	 * clears or we time out.
1359 	 */
1360 	phy_control = BMCR_RESET;
1361 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1362 	if (err != 0)
1363 		return -EBUSY;
1364 
1365 	limit = 5000;
1366 	while (limit--) {
1367 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1368 		if (err != 0)
1369 			return -EBUSY;
1370 
1371 		if ((phy_control & BMCR_RESET) == 0) {
1372 			udelay(40);
1373 			break;
1374 		}
1375 		udelay(10);
1376 	}
1377 	if (limit < 0)
1378 		return -EBUSY;
1379 
1380 	return 0;
1381 }
1382 
tg3_mdio_read(struct mii_bus * bp,int mii_id,int reg)1383 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1384 {
1385 	struct tg3 *tp = bp->priv;
1386 	u32 val;
1387 
1388 	spin_lock_bh(&tp->lock);
1389 
1390 	if (__tg3_readphy(tp, mii_id, reg, &val))
1391 		val = -EIO;
1392 
1393 	spin_unlock_bh(&tp->lock);
1394 
1395 	return val;
1396 }
1397 
tg3_mdio_write(struct mii_bus * bp,int mii_id,int reg,u16 val)1398 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1399 {
1400 	struct tg3 *tp = bp->priv;
1401 	u32 ret = 0;
1402 
1403 	spin_lock_bh(&tp->lock);
1404 
1405 	if (__tg3_writephy(tp, mii_id, reg, val))
1406 		ret = -EIO;
1407 
1408 	spin_unlock_bh(&tp->lock);
1409 
1410 	return ret;
1411 }
1412 
tg3_mdio_config_5785(struct tg3 * tp)1413 static void tg3_mdio_config_5785(struct tg3 *tp)
1414 {
1415 	u32 val;
1416 	struct phy_device *phydev;
1417 
1418 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1419 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1420 	case PHY_ID_BCM50610:
1421 	case PHY_ID_BCM50610M:
1422 		val = MAC_PHYCFG2_50610_LED_MODES;
1423 		break;
1424 	case PHY_ID_BCMAC131:
1425 		val = MAC_PHYCFG2_AC131_LED_MODES;
1426 		break;
1427 	case PHY_ID_RTL8211C:
1428 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1429 		break;
1430 	case PHY_ID_RTL8201E:
1431 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1432 		break;
1433 	default:
1434 		return;
1435 	}
1436 
1437 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1438 		tw32(MAC_PHYCFG2, val);
1439 
1440 		val = tr32(MAC_PHYCFG1);
1441 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1442 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1443 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1444 		tw32(MAC_PHYCFG1, val);
1445 
1446 		return;
1447 	}
1448 
1449 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1450 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1451 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1452 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1453 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1454 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1455 		       MAC_PHYCFG2_INBAND_ENABLE;
1456 
1457 	tw32(MAC_PHYCFG2, val);
1458 
1459 	val = tr32(MAC_PHYCFG1);
1460 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1461 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1462 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1465 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1466 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1467 	}
1468 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1469 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1470 	tw32(MAC_PHYCFG1, val);
1471 
1472 	val = tr32(MAC_EXT_RGMII_MODE);
1473 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1474 		 MAC_RGMII_MODE_RX_QUALITY |
1475 		 MAC_RGMII_MODE_RX_ACTIVITY |
1476 		 MAC_RGMII_MODE_RX_ENG_DET |
1477 		 MAC_RGMII_MODE_TX_ENABLE |
1478 		 MAC_RGMII_MODE_TX_LOWPWR |
1479 		 MAC_RGMII_MODE_TX_RESET);
1480 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1481 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1482 			val |= MAC_RGMII_MODE_RX_INT_B |
1483 			       MAC_RGMII_MODE_RX_QUALITY |
1484 			       MAC_RGMII_MODE_RX_ACTIVITY |
1485 			       MAC_RGMII_MODE_RX_ENG_DET;
1486 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1487 			val |= MAC_RGMII_MODE_TX_ENABLE |
1488 			       MAC_RGMII_MODE_TX_LOWPWR |
1489 			       MAC_RGMII_MODE_TX_RESET;
1490 	}
1491 	tw32(MAC_EXT_RGMII_MODE, val);
1492 }
1493 
tg3_mdio_start(struct tg3 * tp)1494 static void tg3_mdio_start(struct tg3 *tp)
1495 {
1496 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1497 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1498 	udelay(80);
1499 
1500 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1501 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1502 		tg3_mdio_config_5785(tp);
1503 }
1504 
tg3_mdio_init(struct tg3 * tp)1505 static int tg3_mdio_init(struct tg3 *tp)
1506 {
1507 	int i;
1508 	u32 reg;
1509 	struct phy_device *phydev;
1510 
1511 	if (tg3_flag(tp, 5717_PLUS)) {
1512 		u32 is_serdes;
1513 
1514 		tp->phy_addr = tp->pci_fn + 1;
1515 
1516 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1517 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1518 		else
1519 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1520 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1521 		if (is_serdes)
1522 			tp->phy_addr += 7;
1523 	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1524 		int addr;
1525 
1526 		addr = ssb_gige_get_phyaddr(tp->pdev);
1527 		if (addr < 0)
1528 			return addr;
1529 		tp->phy_addr = addr;
1530 	} else
1531 		tp->phy_addr = TG3_PHY_MII_ADDR;
1532 
1533 	tg3_mdio_start(tp);
1534 
1535 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1536 		return 0;
1537 
1538 	tp->mdio_bus = mdiobus_alloc();
1539 	if (tp->mdio_bus == NULL)
1540 		return -ENOMEM;
1541 
1542 	tp->mdio_bus->name     = "tg3 mdio bus";
1543 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
1544 	tp->mdio_bus->priv     = tp;
1545 	tp->mdio_bus->parent   = &tp->pdev->dev;
1546 	tp->mdio_bus->read     = &tg3_mdio_read;
1547 	tp->mdio_bus->write    = &tg3_mdio_write;
1548 	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1549 
1550 	/* The bus registration will look for all the PHYs on the mdio bus.
1551 	 * Unfortunately, it does not ensure the PHY is powered up before
1552 	 * accessing the PHY ID registers.  A chip reset is the
1553 	 * quickest way to bring the device back to an operational state..
1554 	 */
1555 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1556 		tg3_bmcr_reset(tp);
1557 
1558 	i = mdiobus_register(tp->mdio_bus);
1559 	if (i) {
1560 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1561 		mdiobus_free(tp->mdio_bus);
1562 		return i;
1563 	}
1564 
1565 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1566 
1567 	if (!phydev || !phydev->drv) {
1568 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1569 		mdiobus_unregister(tp->mdio_bus);
1570 		mdiobus_free(tp->mdio_bus);
1571 		return -ENODEV;
1572 	}
1573 
1574 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1575 	case PHY_ID_BCM57780:
1576 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1577 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1578 		break;
1579 	case PHY_ID_BCM50610:
1580 	case PHY_ID_BCM50610M:
1581 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1582 				     PHY_BRCM_RX_REFCLK_UNUSED |
1583 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1584 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1585 		fallthrough;
1586 	case PHY_ID_RTL8211C:
1587 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1588 		break;
1589 	case PHY_ID_RTL8201E:
1590 	case PHY_ID_BCMAC131:
1591 		phydev->interface = PHY_INTERFACE_MODE_MII;
1592 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1593 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1594 		break;
1595 	}
1596 
1597 	tg3_flag_set(tp, MDIOBUS_INITED);
1598 
1599 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1600 		tg3_mdio_config_5785(tp);
1601 
1602 	return 0;
1603 }
1604 
tg3_mdio_fini(struct tg3 * tp)1605 static void tg3_mdio_fini(struct tg3 *tp)
1606 {
1607 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1608 		tg3_flag_clear(tp, MDIOBUS_INITED);
1609 		mdiobus_unregister(tp->mdio_bus);
1610 		mdiobus_free(tp->mdio_bus);
1611 	}
1612 }
1613 
1614 /* tp->lock is held. */
tg3_generate_fw_event(struct tg3 * tp)1615 static inline void tg3_generate_fw_event(struct tg3 *tp)
1616 {
1617 	u32 val;
1618 
1619 	val = tr32(GRC_RX_CPU_EVENT);
1620 	val |= GRC_RX_CPU_DRIVER_EVENT;
1621 	tw32_f(GRC_RX_CPU_EVENT, val);
1622 
1623 	tp->last_event_jiffies = jiffies;
1624 }
1625 
1626 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1627 
1628 /* tp->lock is held. */
tg3_wait_for_event_ack(struct tg3 * tp)1629 static void tg3_wait_for_event_ack(struct tg3 *tp)
1630 {
1631 	int i;
1632 	unsigned int delay_cnt;
1633 	long time_remain;
1634 
1635 	/* If enough time has passed, no wait is necessary. */
1636 	time_remain = (long)(tp->last_event_jiffies + 1 +
1637 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1638 		      (long)jiffies;
1639 	if (time_remain < 0)
1640 		return;
1641 
1642 	/* Check if we can shorten the wait time. */
1643 	delay_cnt = jiffies_to_usecs(time_remain);
1644 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1645 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1646 	delay_cnt = (delay_cnt >> 3) + 1;
1647 
1648 	for (i = 0; i < delay_cnt; i++) {
1649 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1650 			break;
1651 		if (pci_channel_offline(tp->pdev))
1652 			break;
1653 
1654 		udelay(8);
1655 	}
1656 }
1657 
1658 /* tp->lock is held. */
tg3_phy_gather_ump_data(struct tg3 * tp,u32 * data)1659 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1660 {
1661 	u32 reg, val;
1662 
1663 	val = 0;
1664 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1665 		val = reg << 16;
1666 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1667 		val |= (reg & 0xffff);
1668 	*data++ = val;
1669 
1670 	val = 0;
1671 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1672 		val = reg << 16;
1673 	if (!tg3_readphy(tp, MII_LPA, &reg))
1674 		val |= (reg & 0xffff);
1675 	*data++ = val;
1676 
1677 	val = 0;
1678 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1679 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1680 			val = reg << 16;
1681 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1682 			val |= (reg & 0xffff);
1683 	}
1684 	*data++ = val;
1685 
1686 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1687 		val = reg << 16;
1688 	else
1689 		val = 0;
1690 	*data++ = val;
1691 }
1692 
1693 /* tp->lock is held. */
tg3_ump_link_report(struct tg3 * tp)1694 static void tg3_ump_link_report(struct tg3 *tp)
1695 {
1696 	u32 data[4];
1697 
1698 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1699 		return;
1700 
1701 	tg3_phy_gather_ump_data(tp, data);
1702 
1703 	tg3_wait_for_event_ack(tp);
1704 
1705 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1706 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1707 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1708 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1709 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1710 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1711 
1712 	tg3_generate_fw_event(tp);
1713 }
1714 
1715 /* tp->lock is held. */
tg3_stop_fw(struct tg3 * tp)1716 static void tg3_stop_fw(struct tg3 *tp)
1717 {
1718 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1719 		/* Wait for RX cpu to ACK the previous event. */
1720 		tg3_wait_for_event_ack(tp);
1721 
1722 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1723 
1724 		tg3_generate_fw_event(tp);
1725 
1726 		/* Wait for RX cpu to ACK this event. */
1727 		tg3_wait_for_event_ack(tp);
1728 	}
1729 }
1730 
1731 /* tp->lock is held. */
tg3_write_sig_pre_reset(struct tg3 * tp,int kind)1732 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1733 {
1734 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1735 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1736 
1737 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1738 		switch (kind) {
1739 		case RESET_KIND_INIT:
1740 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1741 				      DRV_STATE_START);
1742 			break;
1743 
1744 		case RESET_KIND_SHUTDOWN:
1745 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1746 				      DRV_STATE_UNLOAD);
1747 			break;
1748 
1749 		case RESET_KIND_SUSPEND:
1750 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1751 				      DRV_STATE_SUSPEND);
1752 			break;
1753 
1754 		default:
1755 			break;
1756 		}
1757 	}
1758 }
1759 
1760 /* tp->lock is held. */
tg3_write_sig_post_reset(struct tg3 * tp,int kind)1761 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1762 {
1763 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1764 		switch (kind) {
1765 		case RESET_KIND_INIT:
1766 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1767 				      DRV_STATE_START_DONE);
1768 			break;
1769 
1770 		case RESET_KIND_SHUTDOWN:
1771 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1772 				      DRV_STATE_UNLOAD_DONE);
1773 			break;
1774 
1775 		default:
1776 			break;
1777 		}
1778 	}
1779 }
1780 
1781 /* tp->lock is held. */
tg3_write_sig_legacy(struct tg3 * tp,int kind)1782 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1783 {
1784 	if (tg3_flag(tp, ENABLE_ASF)) {
1785 		switch (kind) {
1786 		case RESET_KIND_INIT:
1787 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788 				      DRV_STATE_START);
1789 			break;
1790 
1791 		case RESET_KIND_SHUTDOWN:
1792 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1793 				      DRV_STATE_UNLOAD);
1794 			break;
1795 
1796 		case RESET_KIND_SUSPEND:
1797 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 				      DRV_STATE_SUSPEND);
1799 			break;
1800 
1801 		default:
1802 			break;
1803 		}
1804 	}
1805 }
1806 
tg3_poll_fw(struct tg3 * tp)1807 static int tg3_poll_fw(struct tg3 *tp)
1808 {
1809 	int i;
1810 	u32 val;
1811 
1812 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1813 		return 0;
1814 
1815 	if (tg3_flag(tp, IS_SSB_CORE)) {
1816 		/* We don't use firmware. */
1817 		return 0;
1818 	}
1819 
1820 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1821 		/* Wait up to 20ms for init done. */
1822 		for (i = 0; i < 200; i++) {
1823 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1824 				return 0;
1825 			if (pci_channel_offline(tp->pdev))
1826 				return -ENODEV;
1827 
1828 			udelay(100);
1829 		}
1830 		return -ENODEV;
1831 	}
1832 
1833 	/* Wait for firmware initialization to complete. */
1834 	for (i = 0; i < 100000; i++) {
1835 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1836 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1837 			break;
1838 		if (pci_channel_offline(tp->pdev)) {
1839 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1840 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1841 				netdev_info(tp->dev, "No firmware running\n");
1842 			}
1843 
1844 			break;
1845 		}
1846 
1847 		udelay(10);
1848 	}
1849 
1850 	/* Chip might not be fitted with firmware.  Some Sun onboard
1851 	 * parts are configured like that.  So don't signal the timeout
1852 	 * of the above loop as an error, but do report the lack of
1853 	 * running firmware once.
1854 	 */
1855 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1856 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1857 
1858 		netdev_info(tp->dev, "No firmware running\n");
1859 	}
1860 
1861 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1862 		/* The 57765 A0 needs a little more
1863 		 * time to do some important work.
1864 		 */
1865 		mdelay(10);
1866 	}
1867 
1868 	return 0;
1869 }
1870 
tg3_link_report(struct tg3 * tp)1871 static void tg3_link_report(struct tg3 *tp)
1872 {
1873 	if (!netif_carrier_ok(tp->dev)) {
1874 		netif_info(tp, link, tp->dev, "Link is down\n");
1875 		tg3_ump_link_report(tp);
1876 	} else if (netif_msg_link(tp)) {
1877 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1878 			    (tp->link_config.active_speed == SPEED_1000 ?
1879 			     1000 :
1880 			     (tp->link_config.active_speed == SPEED_100 ?
1881 			      100 : 10)),
1882 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1883 			     "full" : "half"));
1884 
1885 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1886 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1887 			    "on" : "off",
1888 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1889 			    "on" : "off");
1890 
1891 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1892 			netdev_info(tp->dev, "EEE is %s\n",
1893 				    tp->setlpicnt ? "enabled" : "disabled");
1894 
1895 		tg3_ump_link_report(tp);
1896 	}
1897 
1898 	tp->link_up = netif_carrier_ok(tp->dev);
1899 }
1900 
tg3_decode_flowctrl_1000T(u32 adv)1901 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1902 {
1903 	u32 flowctrl = 0;
1904 
1905 	if (adv & ADVERTISE_PAUSE_CAP) {
1906 		flowctrl |= FLOW_CTRL_RX;
1907 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1908 			flowctrl |= FLOW_CTRL_TX;
1909 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1910 		flowctrl |= FLOW_CTRL_TX;
1911 
1912 	return flowctrl;
1913 }
1914 
tg3_advert_flowctrl_1000X(u8 flow_ctrl)1915 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1916 {
1917 	u16 miireg;
1918 
1919 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1920 		miireg = ADVERTISE_1000XPAUSE;
1921 	else if (flow_ctrl & FLOW_CTRL_TX)
1922 		miireg = ADVERTISE_1000XPSE_ASYM;
1923 	else if (flow_ctrl & FLOW_CTRL_RX)
1924 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1925 	else
1926 		miireg = 0;
1927 
1928 	return miireg;
1929 }
1930 
tg3_decode_flowctrl_1000X(u32 adv)1931 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1932 {
1933 	u32 flowctrl = 0;
1934 
1935 	if (adv & ADVERTISE_1000XPAUSE) {
1936 		flowctrl |= FLOW_CTRL_RX;
1937 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1938 			flowctrl |= FLOW_CTRL_TX;
1939 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1940 		flowctrl |= FLOW_CTRL_TX;
1941 
1942 	return flowctrl;
1943 }
1944 
tg3_resolve_flowctrl_1000X(u16 lcladv,u16 rmtadv)1945 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1946 {
1947 	u8 cap = 0;
1948 
1949 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1950 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1951 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1952 		if (lcladv & ADVERTISE_1000XPAUSE)
1953 			cap = FLOW_CTRL_RX;
1954 		if (rmtadv & ADVERTISE_1000XPAUSE)
1955 			cap = FLOW_CTRL_TX;
1956 	}
1957 
1958 	return cap;
1959 }
1960 
tg3_setup_flow_control(struct tg3 * tp,u32 lcladv,u32 rmtadv)1961 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1962 {
1963 	u8 autoneg;
1964 	u8 flowctrl = 0;
1965 	u32 old_rx_mode = tp->rx_mode;
1966 	u32 old_tx_mode = tp->tx_mode;
1967 
1968 	if (tg3_flag(tp, USE_PHYLIB))
1969 		autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1970 	else
1971 		autoneg = tp->link_config.autoneg;
1972 
1973 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1974 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1975 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1976 		else
1977 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1978 	} else
1979 		flowctrl = tp->link_config.flowctrl;
1980 
1981 	tp->link_config.active_flowctrl = flowctrl;
1982 
1983 	if (flowctrl & FLOW_CTRL_RX)
1984 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1985 	else
1986 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1987 
1988 	if (old_rx_mode != tp->rx_mode)
1989 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1990 
1991 	if (flowctrl & FLOW_CTRL_TX)
1992 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1993 	else
1994 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1995 
1996 	if (old_tx_mode != tp->tx_mode)
1997 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1998 }
1999 
tg3_adjust_link(struct net_device * dev)2000 static void tg3_adjust_link(struct net_device *dev)
2001 {
2002 	u8 oldflowctrl, linkmesg = 0;
2003 	u32 mac_mode, lcl_adv, rmt_adv;
2004 	struct tg3 *tp = netdev_priv(dev);
2005 	struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2006 
2007 	spin_lock_bh(&tp->lock);
2008 
2009 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2010 				    MAC_MODE_HALF_DUPLEX);
2011 
2012 	oldflowctrl = tp->link_config.active_flowctrl;
2013 
2014 	if (phydev->link) {
2015 		lcl_adv = 0;
2016 		rmt_adv = 0;
2017 
2018 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2019 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2020 		else if (phydev->speed == SPEED_1000 ||
2021 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2022 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2023 		else
2024 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2025 
2026 		if (phydev->duplex == DUPLEX_HALF)
2027 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2028 		else {
2029 			lcl_adv = mii_advertise_flowctrl(
2030 				  tp->link_config.flowctrl);
2031 
2032 			if (phydev->pause)
2033 				rmt_adv = LPA_PAUSE_CAP;
2034 			if (phydev->asym_pause)
2035 				rmt_adv |= LPA_PAUSE_ASYM;
2036 		}
2037 
2038 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2039 	} else
2040 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2041 
2042 	if (mac_mode != tp->mac_mode) {
2043 		tp->mac_mode = mac_mode;
2044 		tw32_f(MAC_MODE, tp->mac_mode);
2045 		udelay(40);
2046 	}
2047 
2048 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2049 		if (phydev->speed == SPEED_10)
2050 			tw32(MAC_MI_STAT,
2051 			     MAC_MI_STAT_10MBPS_MODE |
2052 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2053 		else
2054 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2055 	}
2056 
2057 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2058 		tw32(MAC_TX_LENGTHS,
2059 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2060 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2061 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2062 	else
2063 		tw32(MAC_TX_LENGTHS,
2064 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2065 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2066 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2067 
2068 	if (phydev->link != tp->old_link ||
2069 	    phydev->speed != tp->link_config.active_speed ||
2070 	    phydev->duplex != tp->link_config.active_duplex ||
2071 	    oldflowctrl != tp->link_config.active_flowctrl)
2072 		linkmesg = 1;
2073 
2074 	tp->old_link = phydev->link;
2075 	tp->link_config.active_speed = phydev->speed;
2076 	tp->link_config.active_duplex = phydev->duplex;
2077 
2078 	spin_unlock_bh(&tp->lock);
2079 
2080 	if (linkmesg)
2081 		tg3_link_report(tp);
2082 }
2083 
tg3_phy_init(struct tg3 * tp)2084 static int tg3_phy_init(struct tg3 *tp)
2085 {
2086 	struct phy_device *phydev;
2087 
2088 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2089 		return 0;
2090 
2091 	/* Bring the PHY back to a known state. */
2092 	tg3_bmcr_reset(tp);
2093 
2094 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2095 
2096 	/* Attach the MAC to the PHY. */
2097 	phydev = phy_connect(tp->dev, phydev_name(phydev),
2098 			     tg3_adjust_link, phydev->interface);
2099 	if (IS_ERR(phydev)) {
2100 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2101 		return PTR_ERR(phydev);
2102 	}
2103 
2104 	/* Mask with MAC supported features. */
2105 	switch (phydev->interface) {
2106 	case PHY_INTERFACE_MODE_GMII:
2107 	case PHY_INTERFACE_MODE_RGMII:
2108 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2109 			phy_set_max_speed(phydev, SPEED_1000);
2110 			phy_support_asym_pause(phydev);
2111 			break;
2112 		}
2113 		fallthrough;
2114 	case PHY_INTERFACE_MODE_MII:
2115 		phy_set_max_speed(phydev, SPEED_100);
2116 		phy_support_asym_pause(phydev);
2117 		break;
2118 	default:
2119 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2120 		return -EINVAL;
2121 	}
2122 
2123 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2124 
2125 	phy_attached_info(phydev);
2126 
2127 	return 0;
2128 }
2129 
tg3_phy_start(struct tg3 * tp)2130 static void tg3_phy_start(struct tg3 *tp)
2131 {
2132 	struct phy_device *phydev;
2133 
2134 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2135 		return;
2136 
2137 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2138 
2139 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2140 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2141 		phydev->speed = tp->link_config.speed;
2142 		phydev->duplex = tp->link_config.duplex;
2143 		phydev->autoneg = tp->link_config.autoneg;
2144 		ethtool_convert_legacy_u32_to_link_mode(
2145 			phydev->advertising, tp->link_config.advertising);
2146 	}
2147 
2148 	phy_start(phydev);
2149 
2150 	phy_start_aneg(phydev);
2151 }
2152 
tg3_phy_stop(struct tg3 * tp)2153 static void tg3_phy_stop(struct tg3 *tp)
2154 {
2155 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2156 		return;
2157 
2158 	phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2159 }
2160 
tg3_phy_fini(struct tg3 * tp)2161 static void tg3_phy_fini(struct tg3 *tp)
2162 {
2163 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2164 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2165 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2166 	}
2167 }
2168 
tg3_phy_set_extloopbk(struct tg3 * tp)2169 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2170 {
2171 	int err;
2172 	u32 val;
2173 
2174 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2175 		return 0;
2176 
2177 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2178 		/* Cannot do read-modify-write on 5401 */
2179 		err = tg3_phy_auxctl_write(tp,
2180 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2181 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2182 					   0x4c20);
2183 		goto done;
2184 	}
2185 
2186 	err = tg3_phy_auxctl_read(tp,
2187 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2188 	if (err)
2189 		return err;
2190 
2191 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2192 	err = tg3_phy_auxctl_write(tp,
2193 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2194 
2195 done:
2196 	return err;
2197 }
2198 
tg3_phy_fet_toggle_apd(struct tg3 * tp,bool enable)2199 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2200 {
2201 	u32 phytest;
2202 
2203 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2204 		u32 phy;
2205 
2206 		tg3_writephy(tp, MII_TG3_FET_TEST,
2207 			     phytest | MII_TG3_FET_SHADOW_EN);
2208 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2209 			if (enable)
2210 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211 			else
2212 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2213 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2214 		}
2215 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2216 	}
2217 }
2218 
tg3_phy_toggle_apd(struct tg3 * tp,bool enable)2219 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2220 {
2221 	u32 reg;
2222 
2223 	if (!tg3_flag(tp, 5705_PLUS) ||
2224 	    (tg3_flag(tp, 5717_PLUS) &&
2225 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2226 		return;
2227 
2228 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2229 		tg3_phy_fet_toggle_apd(tp, enable);
2230 		return;
2231 	}
2232 
2233 	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2234 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2235 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2236 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2237 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2238 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2239 
2240 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2241 
2242 
2243 	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2244 	if (enable)
2245 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2246 
2247 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2248 }
2249 
tg3_phy_toggle_automdix(struct tg3 * tp,bool enable)2250 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2251 {
2252 	u32 phy;
2253 
2254 	if (!tg3_flag(tp, 5705_PLUS) ||
2255 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2256 		return;
2257 
2258 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2259 		u32 ephy;
2260 
2261 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2262 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2263 
2264 			tg3_writephy(tp, MII_TG3_FET_TEST,
2265 				     ephy | MII_TG3_FET_SHADOW_EN);
2266 			if (!tg3_readphy(tp, reg, &phy)) {
2267 				if (enable)
2268 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2269 				else
2270 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2271 				tg3_writephy(tp, reg, phy);
2272 			}
2273 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2274 		}
2275 	} else {
2276 		int ret;
2277 
2278 		ret = tg3_phy_auxctl_read(tp,
2279 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2280 		if (!ret) {
2281 			if (enable)
2282 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2283 			else
2284 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2285 			tg3_phy_auxctl_write(tp,
2286 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2287 		}
2288 	}
2289 }
2290 
tg3_phy_set_wirespeed(struct tg3 * tp)2291 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2292 {
2293 	int ret;
2294 	u32 val;
2295 
2296 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2297 		return;
2298 
2299 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2300 	if (!ret)
2301 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2302 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2303 }
2304 
tg3_phy_apply_otp(struct tg3 * tp)2305 static void tg3_phy_apply_otp(struct tg3 *tp)
2306 {
2307 	u32 otp, phy;
2308 
2309 	if (!tp->phy_otp)
2310 		return;
2311 
2312 	otp = tp->phy_otp;
2313 
2314 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2315 		return;
2316 
2317 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2318 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2319 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2320 
2321 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2322 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2323 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2324 
2325 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2326 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2327 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2328 
2329 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2330 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2331 
2332 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2333 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2334 
2335 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2336 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2337 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2338 
2339 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2340 }
2341 
tg3_eee_pull_config(struct tg3 * tp,struct ethtool_eee * eee)2342 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2343 {
2344 	u32 val;
2345 	struct ethtool_eee *dest = &tp->eee;
2346 
2347 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2348 		return;
2349 
2350 	if (eee)
2351 		dest = eee;
2352 
2353 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2354 		return;
2355 
2356 	/* Pull eee_active */
2357 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2358 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2359 		dest->eee_active = 1;
2360 	} else
2361 		dest->eee_active = 0;
2362 
2363 	/* Pull lp advertised settings */
2364 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2365 		return;
2366 	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2367 
2368 	/* Pull advertised and eee_enabled settings */
2369 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2370 		return;
2371 	dest->eee_enabled = !!val;
2372 	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2373 
2374 	/* Pull tx_lpi_enabled */
2375 	val = tr32(TG3_CPMU_EEE_MODE);
2376 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2377 
2378 	/* Pull lpi timer value */
2379 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2380 }
2381 
tg3_phy_eee_adjust(struct tg3 * tp,bool current_link_up)2382 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2383 {
2384 	u32 val;
2385 
2386 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2387 		return;
2388 
2389 	tp->setlpicnt = 0;
2390 
2391 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2392 	    current_link_up &&
2393 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2394 	    (tp->link_config.active_speed == SPEED_100 ||
2395 	     tp->link_config.active_speed == SPEED_1000)) {
2396 		u32 eeectl;
2397 
2398 		if (tp->link_config.active_speed == SPEED_1000)
2399 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2400 		else
2401 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2402 
2403 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2404 
2405 		tg3_eee_pull_config(tp, NULL);
2406 		if (tp->eee.eee_active)
2407 			tp->setlpicnt = 2;
2408 	}
2409 
2410 	if (!tp->setlpicnt) {
2411 		if (current_link_up &&
2412 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2413 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2414 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2415 		}
2416 
2417 		val = tr32(TG3_CPMU_EEE_MODE);
2418 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2419 	}
2420 }
2421 
tg3_phy_eee_enable(struct tg3 * tp)2422 static void tg3_phy_eee_enable(struct tg3 *tp)
2423 {
2424 	u32 val;
2425 
2426 	if (tp->link_config.active_speed == SPEED_1000 &&
2427 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2428 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2429 	     tg3_flag(tp, 57765_CLASS)) &&
2430 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2431 		val = MII_TG3_DSP_TAP26_ALNOKO |
2432 		      MII_TG3_DSP_TAP26_RMRXSTO;
2433 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2434 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2435 	}
2436 
2437 	val = tr32(TG3_CPMU_EEE_MODE);
2438 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2439 }
2440 
tg3_wait_macro_done(struct tg3 * tp)2441 static int tg3_wait_macro_done(struct tg3 *tp)
2442 {
2443 	int limit = 100;
2444 
2445 	while (limit--) {
2446 		u32 tmp32;
2447 
2448 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2449 			if ((tmp32 & 0x1000) == 0)
2450 				break;
2451 		}
2452 	}
2453 	if (limit < 0)
2454 		return -EBUSY;
2455 
2456 	return 0;
2457 }
2458 
tg3_phy_write_and_check_testpat(struct tg3 * tp,int * resetp)2459 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2460 {
2461 	static const u32 test_pat[4][6] = {
2462 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2463 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2464 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2465 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2466 	};
2467 	int chan;
2468 
2469 	for (chan = 0; chan < 4; chan++) {
2470 		int i;
2471 
2472 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2473 			     (chan * 0x2000) | 0x0200);
2474 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2475 
2476 		for (i = 0; i < 6; i++)
2477 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2478 				     test_pat[chan][i]);
2479 
2480 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2481 		if (tg3_wait_macro_done(tp)) {
2482 			*resetp = 1;
2483 			return -EBUSY;
2484 		}
2485 
2486 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2487 			     (chan * 0x2000) | 0x0200);
2488 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2489 		if (tg3_wait_macro_done(tp)) {
2490 			*resetp = 1;
2491 			return -EBUSY;
2492 		}
2493 
2494 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2495 		if (tg3_wait_macro_done(tp)) {
2496 			*resetp = 1;
2497 			return -EBUSY;
2498 		}
2499 
2500 		for (i = 0; i < 6; i += 2) {
2501 			u32 low, high;
2502 
2503 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2504 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2505 			    tg3_wait_macro_done(tp)) {
2506 				*resetp = 1;
2507 				return -EBUSY;
2508 			}
2509 			low &= 0x7fff;
2510 			high &= 0x000f;
2511 			if (low != test_pat[chan][i] ||
2512 			    high != test_pat[chan][i+1]) {
2513 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2514 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2515 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2516 
2517 				return -EBUSY;
2518 			}
2519 		}
2520 	}
2521 
2522 	return 0;
2523 }
2524 
tg3_phy_reset_chanpat(struct tg3 * tp)2525 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2526 {
2527 	int chan;
2528 
2529 	for (chan = 0; chan < 4; chan++) {
2530 		int i;
2531 
2532 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2533 			     (chan * 0x2000) | 0x0200);
2534 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2535 		for (i = 0; i < 6; i++)
2536 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2537 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2538 		if (tg3_wait_macro_done(tp))
2539 			return -EBUSY;
2540 	}
2541 
2542 	return 0;
2543 }
2544 
tg3_phy_reset_5703_4_5(struct tg3 * tp)2545 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2546 {
2547 	u32 reg32, phy9_orig;
2548 	int retries, do_phy_reset, err;
2549 
2550 	retries = 10;
2551 	do_phy_reset = 1;
2552 	do {
2553 		if (do_phy_reset) {
2554 			err = tg3_bmcr_reset(tp);
2555 			if (err)
2556 				return err;
2557 			do_phy_reset = 0;
2558 		}
2559 
2560 		/* Disable transmitter and interrupt.  */
2561 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2562 			continue;
2563 
2564 		reg32 |= 0x3000;
2565 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2566 
2567 		/* Set full-duplex, 1000 mbps.  */
2568 		tg3_writephy(tp, MII_BMCR,
2569 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2570 
2571 		/* Set to master mode.  */
2572 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2573 			continue;
2574 
2575 		tg3_writephy(tp, MII_CTRL1000,
2576 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2577 
2578 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2579 		if (err)
2580 			return err;
2581 
2582 		/* Block the PHY control access.  */
2583 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2584 
2585 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2586 		if (!err)
2587 			break;
2588 	} while (--retries);
2589 
2590 	err = tg3_phy_reset_chanpat(tp);
2591 	if (err)
2592 		return err;
2593 
2594 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2595 
2596 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2597 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2598 
2599 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2600 
2601 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2602 
2603 	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2604 	if (err)
2605 		return err;
2606 
2607 	reg32 &= ~0x3000;
2608 	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2609 
2610 	return 0;
2611 }
2612 
tg3_carrier_off(struct tg3 * tp)2613 static void tg3_carrier_off(struct tg3 *tp)
2614 {
2615 	netif_carrier_off(tp->dev);
2616 	tp->link_up = false;
2617 }
2618 
tg3_warn_mgmt_link_flap(struct tg3 * tp)2619 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2620 {
2621 	if (tg3_flag(tp, ENABLE_ASF))
2622 		netdev_warn(tp->dev,
2623 			    "Management side-band traffic will be interrupted during phy settings change\n");
2624 }
2625 
2626 /* This will reset the tigon3 PHY if there is no valid
2627  * link unless the FORCE argument is non-zero.
2628  */
tg3_phy_reset(struct tg3 * tp)2629 static int tg3_phy_reset(struct tg3 *tp)
2630 {
2631 	u32 val, cpmuctrl;
2632 	int err;
2633 
2634 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2635 		val = tr32(GRC_MISC_CFG);
2636 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2637 		udelay(40);
2638 	}
2639 	err  = tg3_readphy(tp, MII_BMSR, &val);
2640 	err |= tg3_readphy(tp, MII_BMSR, &val);
2641 	if (err != 0)
2642 		return -EBUSY;
2643 
2644 	if (netif_running(tp->dev) && tp->link_up) {
2645 		netif_carrier_off(tp->dev);
2646 		tg3_link_report(tp);
2647 	}
2648 
2649 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2650 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2651 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2652 		err = tg3_phy_reset_5703_4_5(tp);
2653 		if (err)
2654 			return err;
2655 		goto out;
2656 	}
2657 
2658 	cpmuctrl = 0;
2659 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2660 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2661 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2662 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2663 			tw32(TG3_CPMU_CTRL,
2664 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2665 	}
2666 
2667 	err = tg3_bmcr_reset(tp);
2668 	if (err)
2669 		return err;
2670 
2671 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2672 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2673 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2674 
2675 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2676 	}
2677 
2678 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2679 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2680 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2681 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2682 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2683 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2684 			udelay(40);
2685 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2686 		}
2687 	}
2688 
2689 	if (tg3_flag(tp, 5717_PLUS) &&
2690 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2691 		return 0;
2692 
2693 	tg3_phy_apply_otp(tp);
2694 
2695 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2696 		tg3_phy_toggle_apd(tp, true);
2697 	else
2698 		tg3_phy_toggle_apd(tp, false);
2699 
2700 out:
2701 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2702 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2703 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2704 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2705 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2706 	}
2707 
2708 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2709 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2710 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2711 	}
2712 
2713 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2714 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2715 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2716 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2717 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2718 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2719 		}
2720 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2721 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2722 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2723 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2724 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2725 				tg3_writephy(tp, MII_TG3_TEST1,
2726 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2727 			} else
2728 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2729 
2730 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2731 		}
2732 	}
2733 
2734 	/* Set Extended packet length bit (bit 14) on all chips that */
2735 	/* support jumbo frames */
2736 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2737 		/* Cannot do read-modify-write on 5401 */
2738 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2739 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2740 		/* Set bit 14 with read-modify-write to preserve other bits */
2741 		err = tg3_phy_auxctl_read(tp,
2742 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2743 		if (!err)
2744 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2745 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2746 	}
2747 
2748 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2749 	 * jumbo frames transmission.
2750 	 */
2751 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2752 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2753 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2754 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2755 	}
2756 
2757 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2758 		/* adjust output voltage */
2759 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2760 	}
2761 
2762 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2763 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2764 
2765 	tg3_phy_toggle_automdix(tp, true);
2766 	tg3_phy_set_wirespeed(tp);
2767 	return 0;
2768 }
2769 
2770 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2771 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2772 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2773 					  TG3_GPIO_MSG_NEED_VAUX)
2774 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2775 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2776 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2777 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2778 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2779 
2780 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2781 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2782 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2783 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2784 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2785 
tg3_set_function_status(struct tg3 * tp,u32 newstat)2786 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2787 {
2788 	u32 status, shift;
2789 
2790 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2791 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2792 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2793 	else
2794 		status = tr32(TG3_CPMU_DRV_STATUS);
2795 
2796 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2797 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2798 	status |= (newstat << shift);
2799 
2800 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2801 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2802 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2803 	else
2804 		tw32(TG3_CPMU_DRV_STATUS, status);
2805 
2806 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2807 }
2808 
tg3_pwrsrc_switch_to_vmain(struct tg3 * tp)2809 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2810 {
2811 	if (!tg3_flag(tp, IS_NIC))
2812 		return 0;
2813 
2814 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2815 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2816 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2817 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2818 			return -EIO;
2819 
2820 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2821 
2822 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2823 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2824 
2825 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2826 	} else {
2827 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2828 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2829 	}
2830 
2831 	return 0;
2832 }
2833 
tg3_pwrsrc_die_with_vmain(struct tg3 * tp)2834 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2835 {
2836 	u32 grc_local_ctrl;
2837 
2838 	if (!tg3_flag(tp, IS_NIC) ||
2839 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2840 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2841 		return;
2842 
2843 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2844 
2845 	tw32_wait_f(GRC_LOCAL_CTRL,
2846 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2847 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2848 
2849 	tw32_wait_f(GRC_LOCAL_CTRL,
2850 		    grc_local_ctrl,
2851 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2852 
2853 	tw32_wait_f(GRC_LOCAL_CTRL,
2854 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2855 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2856 }
2857 
tg3_pwrsrc_switch_to_vaux(struct tg3 * tp)2858 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2859 {
2860 	if (!tg3_flag(tp, IS_NIC))
2861 		return;
2862 
2863 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2864 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2865 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2866 			    (GRC_LCLCTRL_GPIO_OE0 |
2867 			     GRC_LCLCTRL_GPIO_OE1 |
2868 			     GRC_LCLCTRL_GPIO_OE2 |
2869 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2870 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2871 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2872 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2873 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2874 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2875 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2876 				     GRC_LCLCTRL_GPIO_OE1 |
2877 				     GRC_LCLCTRL_GPIO_OE2 |
2878 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2879 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2880 				     tp->grc_local_ctrl;
2881 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2882 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2883 
2884 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2885 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2886 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2887 
2888 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2889 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2890 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2891 	} else {
2892 		u32 no_gpio2;
2893 		u32 grc_local_ctrl = 0;
2894 
2895 		/* Workaround to prevent overdrawing Amps. */
2896 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2897 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2898 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2899 				    grc_local_ctrl,
2900 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2901 		}
2902 
2903 		/* On 5753 and variants, GPIO2 cannot be used. */
2904 		no_gpio2 = tp->nic_sram_data_cfg &
2905 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2906 
2907 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2908 				  GRC_LCLCTRL_GPIO_OE1 |
2909 				  GRC_LCLCTRL_GPIO_OE2 |
2910 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2911 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2912 		if (no_gpio2) {
2913 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2914 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2915 		}
2916 		tw32_wait_f(GRC_LOCAL_CTRL,
2917 			    tp->grc_local_ctrl | grc_local_ctrl,
2918 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2919 
2920 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2921 
2922 		tw32_wait_f(GRC_LOCAL_CTRL,
2923 			    tp->grc_local_ctrl | grc_local_ctrl,
2924 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2925 
2926 		if (!no_gpio2) {
2927 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2928 			tw32_wait_f(GRC_LOCAL_CTRL,
2929 				    tp->grc_local_ctrl | grc_local_ctrl,
2930 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2931 		}
2932 	}
2933 }
2934 
tg3_frob_aux_power_5717(struct tg3 * tp,bool wol_enable)2935 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2936 {
2937 	u32 msg = 0;
2938 
2939 	/* Serialize power state transitions */
2940 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2941 		return;
2942 
2943 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2944 		msg = TG3_GPIO_MSG_NEED_VAUX;
2945 
2946 	msg = tg3_set_function_status(tp, msg);
2947 
2948 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2949 		goto done;
2950 
2951 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2952 		tg3_pwrsrc_switch_to_vaux(tp);
2953 	else
2954 		tg3_pwrsrc_die_with_vmain(tp);
2955 
2956 done:
2957 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2958 }
2959 
tg3_frob_aux_power(struct tg3 * tp,bool include_wol)2960 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2961 {
2962 	bool need_vaux = false;
2963 
2964 	/* The GPIOs do something completely different on 57765. */
2965 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2966 		return;
2967 
2968 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2969 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2970 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2971 		tg3_frob_aux_power_5717(tp, include_wol ?
2972 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2973 		return;
2974 	}
2975 
2976 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2977 		struct net_device *dev_peer;
2978 
2979 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2980 
2981 		/* remove_one() may have been run on the peer. */
2982 		if (dev_peer) {
2983 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2984 
2985 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2986 				return;
2987 
2988 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2989 			    tg3_flag(tp_peer, ENABLE_ASF))
2990 				need_vaux = true;
2991 		}
2992 	}
2993 
2994 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2995 	    tg3_flag(tp, ENABLE_ASF))
2996 		need_vaux = true;
2997 
2998 	if (need_vaux)
2999 		tg3_pwrsrc_switch_to_vaux(tp);
3000 	else
3001 		tg3_pwrsrc_die_with_vmain(tp);
3002 }
3003 
tg3_5700_link_polarity(struct tg3 * tp,u32 speed)3004 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3005 {
3006 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3007 		return 1;
3008 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3009 		if (speed != SPEED_10)
3010 			return 1;
3011 	} else if (speed == SPEED_10)
3012 		return 1;
3013 
3014 	return 0;
3015 }
3016 
tg3_phy_power_bug(struct tg3 * tp)3017 static bool tg3_phy_power_bug(struct tg3 *tp)
3018 {
3019 	switch (tg3_asic_rev(tp)) {
3020 	case ASIC_REV_5700:
3021 	case ASIC_REV_5704:
3022 		return true;
3023 	case ASIC_REV_5780:
3024 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3025 			return true;
3026 		return false;
3027 	case ASIC_REV_5717:
3028 		if (!tp->pci_fn)
3029 			return true;
3030 		return false;
3031 	case ASIC_REV_5719:
3032 	case ASIC_REV_5720:
3033 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3034 		    !tp->pci_fn)
3035 			return true;
3036 		return false;
3037 	}
3038 
3039 	return false;
3040 }
3041 
tg3_phy_led_bug(struct tg3 * tp)3042 static bool tg3_phy_led_bug(struct tg3 *tp)
3043 {
3044 	switch (tg3_asic_rev(tp)) {
3045 	case ASIC_REV_5719:
3046 	case ASIC_REV_5720:
3047 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3048 		    !tp->pci_fn)
3049 			return true;
3050 		return false;
3051 	}
3052 
3053 	return false;
3054 }
3055 
tg3_power_down_phy(struct tg3 * tp,bool do_low_power)3056 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3057 {
3058 	u32 val;
3059 
3060 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3061 		return;
3062 
3063 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3064 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3065 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3066 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3067 
3068 			sg_dig_ctrl |=
3069 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3070 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3071 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3072 		}
3073 		return;
3074 	}
3075 
3076 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3077 		tg3_bmcr_reset(tp);
3078 		val = tr32(GRC_MISC_CFG);
3079 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3080 		udelay(40);
3081 		return;
3082 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3083 		u32 phytest;
3084 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3085 			u32 phy;
3086 
3087 			tg3_writephy(tp, MII_ADVERTISE, 0);
3088 			tg3_writephy(tp, MII_BMCR,
3089 				     BMCR_ANENABLE | BMCR_ANRESTART);
3090 
3091 			tg3_writephy(tp, MII_TG3_FET_TEST,
3092 				     phytest | MII_TG3_FET_SHADOW_EN);
3093 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3094 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3095 				tg3_writephy(tp,
3096 					     MII_TG3_FET_SHDW_AUXMODE4,
3097 					     phy);
3098 			}
3099 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3100 		}
3101 		return;
3102 	} else if (do_low_power) {
3103 		if (!tg3_phy_led_bug(tp))
3104 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3105 				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3106 
3107 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3108 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3109 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3110 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3111 	}
3112 
3113 	/* The PHY should not be powered down on some chips because
3114 	 * of bugs.
3115 	 */
3116 	if (tg3_phy_power_bug(tp))
3117 		return;
3118 
3119 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3120 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3121 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3122 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3123 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3124 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3125 	}
3126 
3127 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3128 }
3129 
3130 /* tp->lock is held. */
tg3_nvram_lock(struct tg3 * tp)3131 static int tg3_nvram_lock(struct tg3 *tp)
3132 {
3133 	if (tg3_flag(tp, NVRAM)) {
3134 		int i;
3135 
3136 		if (tp->nvram_lock_cnt == 0) {
3137 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3138 			for (i = 0; i < 8000; i++) {
3139 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3140 					break;
3141 				udelay(20);
3142 			}
3143 			if (i == 8000) {
3144 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3145 				return -ENODEV;
3146 			}
3147 		}
3148 		tp->nvram_lock_cnt++;
3149 	}
3150 	return 0;
3151 }
3152 
3153 /* tp->lock is held. */
tg3_nvram_unlock(struct tg3 * tp)3154 static void tg3_nvram_unlock(struct tg3 *tp)
3155 {
3156 	if (tg3_flag(tp, NVRAM)) {
3157 		if (tp->nvram_lock_cnt > 0)
3158 			tp->nvram_lock_cnt--;
3159 		if (tp->nvram_lock_cnt == 0)
3160 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3161 	}
3162 }
3163 
3164 /* tp->lock is held. */
tg3_enable_nvram_access(struct tg3 * tp)3165 static void tg3_enable_nvram_access(struct tg3 *tp)
3166 {
3167 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3168 		u32 nvaccess = tr32(NVRAM_ACCESS);
3169 
3170 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3171 	}
3172 }
3173 
3174 /* tp->lock is held. */
tg3_disable_nvram_access(struct tg3 * tp)3175 static void tg3_disable_nvram_access(struct tg3 *tp)
3176 {
3177 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3178 		u32 nvaccess = tr32(NVRAM_ACCESS);
3179 
3180 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3181 	}
3182 }
3183 
tg3_nvram_read_using_eeprom(struct tg3 * tp,u32 offset,u32 * val)3184 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3185 					u32 offset, u32 *val)
3186 {
3187 	u32 tmp;
3188 	int i;
3189 
3190 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3191 		return -EINVAL;
3192 
3193 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3194 					EEPROM_ADDR_DEVID_MASK |
3195 					EEPROM_ADDR_READ);
3196 	tw32(GRC_EEPROM_ADDR,
3197 	     tmp |
3198 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3199 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3200 	      EEPROM_ADDR_ADDR_MASK) |
3201 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3202 
3203 	for (i = 0; i < 1000; i++) {
3204 		tmp = tr32(GRC_EEPROM_ADDR);
3205 
3206 		if (tmp & EEPROM_ADDR_COMPLETE)
3207 			break;
3208 		msleep(1);
3209 	}
3210 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3211 		return -EBUSY;
3212 
3213 	tmp = tr32(GRC_EEPROM_DATA);
3214 
3215 	/*
3216 	 * The data will always be opposite the native endian
3217 	 * format.  Perform a blind byteswap to compensate.
3218 	 */
3219 	*val = swab32(tmp);
3220 
3221 	return 0;
3222 }
3223 
3224 #define NVRAM_CMD_TIMEOUT 10000
3225 
tg3_nvram_exec_cmd(struct tg3 * tp,u32 nvram_cmd)3226 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3227 {
3228 	int i;
3229 
3230 	tw32(NVRAM_CMD, nvram_cmd);
3231 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3232 		usleep_range(10, 40);
3233 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3234 			udelay(10);
3235 			break;
3236 		}
3237 	}
3238 
3239 	if (i == NVRAM_CMD_TIMEOUT)
3240 		return -EBUSY;
3241 
3242 	return 0;
3243 }
3244 
tg3_nvram_phys_addr(struct tg3 * tp,u32 addr)3245 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3246 {
3247 	if (tg3_flag(tp, NVRAM) &&
3248 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3249 	    tg3_flag(tp, FLASH) &&
3250 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3251 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3252 
3253 		addr = ((addr / tp->nvram_pagesize) <<
3254 			ATMEL_AT45DB0X1B_PAGE_POS) +
3255 		       (addr % tp->nvram_pagesize);
3256 
3257 	return addr;
3258 }
3259 
tg3_nvram_logical_addr(struct tg3 * tp,u32 addr)3260 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3261 {
3262 	if (tg3_flag(tp, NVRAM) &&
3263 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3264 	    tg3_flag(tp, FLASH) &&
3265 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3266 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3267 
3268 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3269 			tp->nvram_pagesize) +
3270 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3271 
3272 	return addr;
3273 }
3274 
3275 /* NOTE: Data read in from NVRAM is byteswapped according to
3276  * the byteswapping settings for all other register accesses.
3277  * tg3 devices are BE devices, so on a BE machine, the data
3278  * returned will be exactly as it is seen in NVRAM.  On a LE
3279  * machine, the 32-bit value will be byteswapped.
3280  */
tg3_nvram_read(struct tg3 * tp,u32 offset,u32 * val)3281 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3282 {
3283 	int ret;
3284 
3285 	if (!tg3_flag(tp, NVRAM))
3286 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3287 
3288 	offset = tg3_nvram_phys_addr(tp, offset);
3289 
3290 	if (offset > NVRAM_ADDR_MSK)
3291 		return -EINVAL;
3292 
3293 	ret = tg3_nvram_lock(tp);
3294 	if (ret)
3295 		return ret;
3296 
3297 	tg3_enable_nvram_access(tp);
3298 
3299 	tw32(NVRAM_ADDR, offset);
3300 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3301 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3302 
3303 	if (ret == 0)
3304 		*val = tr32(NVRAM_RDDATA);
3305 
3306 	tg3_disable_nvram_access(tp);
3307 
3308 	tg3_nvram_unlock(tp);
3309 
3310 	return ret;
3311 }
3312 
3313 /* Ensures NVRAM data is in bytestream format. */
tg3_nvram_read_be32(struct tg3 * tp,u32 offset,__be32 * val)3314 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3315 {
3316 	u32 v;
3317 	int res = tg3_nvram_read(tp, offset, &v);
3318 	if (!res)
3319 		*val = cpu_to_be32(v);
3320 	return res;
3321 }
3322 
tg3_nvram_write_block_using_eeprom(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3323 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3324 				    u32 offset, u32 len, u8 *buf)
3325 {
3326 	int i, j, rc = 0;
3327 	u32 val;
3328 
3329 	for (i = 0; i < len; i += 4) {
3330 		u32 addr;
3331 		__be32 data;
3332 
3333 		addr = offset + i;
3334 
3335 		memcpy(&data, buf + i, 4);
3336 
3337 		/*
3338 		 * The SEEPROM interface expects the data to always be opposite
3339 		 * the native endian format.  We accomplish this by reversing
3340 		 * all the operations that would have been performed on the
3341 		 * data from a call to tg3_nvram_read_be32().
3342 		 */
3343 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3344 
3345 		val = tr32(GRC_EEPROM_ADDR);
3346 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3347 
3348 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3349 			EEPROM_ADDR_READ);
3350 		tw32(GRC_EEPROM_ADDR, val |
3351 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3352 			(addr & EEPROM_ADDR_ADDR_MASK) |
3353 			EEPROM_ADDR_START |
3354 			EEPROM_ADDR_WRITE);
3355 
3356 		for (j = 0; j < 1000; j++) {
3357 			val = tr32(GRC_EEPROM_ADDR);
3358 
3359 			if (val & EEPROM_ADDR_COMPLETE)
3360 				break;
3361 			msleep(1);
3362 		}
3363 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3364 			rc = -EBUSY;
3365 			break;
3366 		}
3367 	}
3368 
3369 	return rc;
3370 }
3371 
3372 /* offset and length are dword aligned */
tg3_nvram_write_block_unbuffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3373 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3374 		u8 *buf)
3375 {
3376 	int ret = 0;
3377 	u32 pagesize = tp->nvram_pagesize;
3378 	u32 pagemask = pagesize - 1;
3379 	u32 nvram_cmd;
3380 	u8 *tmp;
3381 
3382 	tmp = kmalloc(pagesize, GFP_KERNEL);
3383 	if (tmp == NULL)
3384 		return -ENOMEM;
3385 
3386 	while (len) {
3387 		int j;
3388 		u32 phy_addr, page_off, size;
3389 
3390 		phy_addr = offset & ~pagemask;
3391 
3392 		for (j = 0; j < pagesize; j += 4) {
3393 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3394 						  (__be32 *) (tmp + j));
3395 			if (ret)
3396 				break;
3397 		}
3398 		if (ret)
3399 			break;
3400 
3401 		page_off = offset & pagemask;
3402 		size = pagesize;
3403 		if (len < size)
3404 			size = len;
3405 
3406 		len -= size;
3407 
3408 		memcpy(tmp + page_off, buf, size);
3409 
3410 		offset = offset + (pagesize - page_off);
3411 
3412 		tg3_enable_nvram_access(tp);
3413 
3414 		/*
3415 		 * Before we can erase the flash page, we need
3416 		 * to issue a special "write enable" command.
3417 		 */
3418 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3419 
3420 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3421 			break;
3422 
3423 		/* Erase the target page */
3424 		tw32(NVRAM_ADDR, phy_addr);
3425 
3426 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3427 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3428 
3429 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3430 			break;
3431 
3432 		/* Issue another write enable to start the write. */
3433 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3434 
3435 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3436 			break;
3437 
3438 		for (j = 0; j < pagesize; j += 4) {
3439 			__be32 data;
3440 
3441 			data = *((__be32 *) (tmp + j));
3442 
3443 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3444 
3445 			tw32(NVRAM_ADDR, phy_addr + j);
3446 
3447 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3448 				NVRAM_CMD_WR;
3449 
3450 			if (j == 0)
3451 				nvram_cmd |= NVRAM_CMD_FIRST;
3452 			else if (j == (pagesize - 4))
3453 				nvram_cmd |= NVRAM_CMD_LAST;
3454 
3455 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3456 			if (ret)
3457 				break;
3458 		}
3459 		if (ret)
3460 			break;
3461 	}
3462 
3463 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3464 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3465 
3466 	kfree(tmp);
3467 
3468 	return ret;
3469 }
3470 
3471 /* offset and length are dword aligned */
tg3_nvram_write_block_buffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3472 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3473 		u8 *buf)
3474 {
3475 	int i, ret = 0;
3476 
3477 	for (i = 0; i < len; i += 4, offset += 4) {
3478 		u32 page_off, phy_addr, nvram_cmd;
3479 		__be32 data;
3480 
3481 		memcpy(&data, buf + i, 4);
3482 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3483 
3484 		page_off = offset % tp->nvram_pagesize;
3485 
3486 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3487 
3488 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3489 
3490 		if (page_off == 0 || i == 0)
3491 			nvram_cmd |= NVRAM_CMD_FIRST;
3492 		if (page_off == (tp->nvram_pagesize - 4))
3493 			nvram_cmd |= NVRAM_CMD_LAST;
3494 
3495 		if (i == (len - 4))
3496 			nvram_cmd |= NVRAM_CMD_LAST;
3497 
3498 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3499 		    !tg3_flag(tp, FLASH) ||
3500 		    !tg3_flag(tp, 57765_PLUS))
3501 			tw32(NVRAM_ADDR, phy_addr);
3502 
3503 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3504 		    !tg3_flag(tp, 5755_PLUS) &&
3505 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3506 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3507 			u32 cmd;
3508 
3509 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3510 			ret = tg3_nvram_exec_cmd(tp, cmd);
3511 			if (ret)
3512 				break;
3513 		}
3514 		if (!tg3_flag(tp, FLASH)) {
3515 			/* We always do complete word writes to eeprom. */
3516 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3517 		}
3518 
3519 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3520 		if (ret)
3521 			break;
3522 	}
3523 	return ret;
3524 }
3525 
3526 /* offset and length are dword aligned */
tg3_nvram_write_block(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3527 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3528 {
3529 	int ret;
3530 
3531 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3532 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3533 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3534 		udelay(40);
3535 	}
3536 
3537 	if (!tg3_flag(tp, NVRAM)) {
3538 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3539 	} else {
3540 		u32 grc_mode;
3541 
3542 		ret = tg3_nvram_lock(tp);
3543 		if (ret)
3544 			return ret;
3545 
3546 		tg3_enable_nvram_access(tp);
3547 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3548 			tw32(NVRAM_WRITE1, 0x406);
3549 
3550 		grc_mode = tr32(GRC_MODE);
3551 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3552 
3553 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3554 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3555 				buf);
3556 		} else {
3557 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3558 				buf);
3559 		}
3560 
3561 		grc_mode = tr32(GRC_MODE);
3562 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3563 
3564 		tg3_disable_nvram_access(tp);
3565 		tg3_nvram_unlock(tp);
3566 	}
3567 
3568 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3569 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3570 		udelay(40);
3571 	}
3572 
3573 	return ret;
3574 }
3575 
3576 #define RX_CPU_SCRATCH_BASE	0x30000
3577 #define RX_CPU_SCRATCH_SIZE	0x04000
3578 #define TX_CPU_SCRATCH_BASE	0x34000
3579 #define TX_CPU_SCRATCH_SIZE	0x04000
3580 
3581 /* tp->lock is held. */
tg3_pause_cpu(struct tg3 * tp,u32 cpu_base)3582 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3583 {
3584 	int i;
3585 	const int iters = 10000;
3586 
3587 	for (i = 0; i < iters; i++) {
3588 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3589 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3590 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3591 			break;
3592 		if (pci_channel_offline(tp->pdev))
3593 			return -EBUSY;
3594 	}
3595 
3596 	return (i == iters) ? -EBUSY : 0;
3597 }
3598 
3599 /* tp->lock is held. */
tg3_rxcpu_pause(struct tg3 * tp)3600 static int tg3_rxcpu_pause(struct tg3 *tp)
3601 {
3602 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3603 
3604 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3605 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3606 	udelay(10);
3607 
3608 	return rc;
3609 }
3610 
3611 /* tp->lock is held. */
tg3_txcpu_pause(struct tg3 * tp)3612 static int tg3_txcpu_pause(struct tg3 *tp)
3613 {
3614 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3615 }
3616 
3617 /* tp->lock is held. */
tg3_resume_cpu(struct tg3 * tp,u32 cpu_base)3618 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3619 {
3620 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3621 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3622 }
3623 
3624 /* tp->lock is held. */
tg3_rxcpu_resume(struct tg3 * tp)3625 static void tg3_rxcpu_resume(struct tg3 *tp)
3626 {
3627 	tg3_resume_cpu(tp, RX_CPU_BASE);
3628 }
3629 
3630 /* tp->lock is held. */
tg3_halt_cpu(struct tg3 * tp,u32 cpu_base)3631 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3632 {
3633 	int rc;
3634 
3635 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3636 
3637 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3638 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3639 
3640 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3641 		return 0;
3642 	}
3643 	if (cpu_base == RX_CPU_BASE) {
3644 		rc = tg3_rxcpu_pause(tp);
3645 	} else {
3646 		/*
3647 		 * There is only an Rx CPU for the 5750 derivative in the
3648 		 * BCM4785.
3649 		 */
3650 		if (tg3_flag(tp, IS_SSB_CORE))
3651 			return 0;
3652 
3653 		rc = tg3_txcpu_pause(tp);
3654 	}
3655 
3656 	if (rc) {
3657 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3658 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3659 		return -ENODEV;
3660 	}
3661 
3662 	/* Clear firmware's nvram arbitration. */
3663 	if (tg3_flag(tp, NVRAM))
3664 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3665 	return 0;
3666 }
3667 
tg3_fw_data_len(struct tg3 * tp,const struct tg3_firmware_hdr * fw_hdr)3668 static int tg3_fw_data_len(struct tg3 *tp,
3669 			   const struct tg3_firmware_hdr *fw_hdr)
3670 {
3671 	int fw_len;
3672 
3673 	/* Non fragmented firmware have one firmware header followed by a
3674 	 * contiguous chunk of data to be written. The length field in that
3675 	 * header is not the length of data to be written but the complete
3676 	 * length of the bss. The data length is determined based on
3677 	 * tp->fw->size minus headers.
3678 	 *
3679 	 * Fragmented firmware have a main header followed by multiple
3680 	 * fragments. Each fragment is identical to non fragmented firmware
3681 	 * with a firmware header followed by a contiguous chunk of data. In
3682 	 * the main header, the length field is unused and set to 0xffffffff.
3683 	 * In each fragment header the length is the entire size of that
3684 	 * fragment i.e. fragment data + header length. Data length is
3685 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3686 	 */
3687 	if (tp->fw_len == 0xffffffff)
3688 		fw_len = be32_to_cpu(fw_hdr->len);
3689 	else
3690 		fw_len = tp->fw->size;
3691 
3692 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3693 }
3694 
3695 /* tp->lock is held. */
tg3_load_firmware_cpu(struct tg3 * tp,u32 cpu_base,u32 cpu_scratch_base,int cpu_scratch_size,const struct tg3_firmware_hdr * fw_hdr)3696 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3697 				 u32 cpu_scratch_base, int cpu_scratch_size,
3698 				 const struct tg3_firmware_hdr *fw_hdr)
3699 {
3700 	int err, i;
3701 	void (*write_op)(struct tg3 *, u32, u32);
3702 	int total_len = tp->fw->size;
3703 
3704 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3705 		netdev_err(tp->dev,
3706 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3707 			   __func__);
3708 		return -EINVAL;
3709 	}
3710 
3711 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3712 		write_op = tg3_write_mem;
3713 	else
3714 		write_op = tg3_write_indirect_reg32;
3715 
3716 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3717 		/* It is possible that bootcode is still loading at this point.
3718 		 * Get the nvram lock first before halting the cpu.
3719 		 */
3720 		int lock_err = tg3_nvram_lock(tp);
3721 		err = tg3_halt_cpu(tp, cpu_base);
3722 		if (!lock_err)
3723 			tg3_nvram_unlock(tp);
3724 		if (err)
3725 			goto out;
3726 
3727 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3728 			write_op(tp, cpu_scratch_base + i, 0);
3729 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3730 		tw32(cpu_base + CPU_MODE,
3731 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3732 	} else {
3733 		/* Subtract additional main header for fragmented firmware and
3734 		 * advance to the first fragment
3735 		 */
3736 		total_len -= TG3_FW_HDR_LEN;
3737 		fw_hdr++;
3738 	}
3739 
3740 	do {
3741 		u32 *fw_data = (u32 *)(fw_hdr + 1);
3742 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3743 			write_op(tp, cpu_scratch_base +
3744 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3745 				     (i * sizeof(u32)),
3746 				 be32_to_cpu(fw_data[i]));
3747 
3748 		total_len -= be32_to_cpu(fw_hdr->len);
3749 
3750 		/* Advance to next fragment */
3751 		fw_hdr = (struct tg3_firmware_hdr *)
3752 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3753 	} while (total_len > 0);
3754 
3755 	err = 0;
3756 
3757 out:
3758 	return err;
3759 }
3760 
3761 /* tp->lock is held. */
tg3_pause_cpu_and_set_pc(struct tg3 * tp,u32 cpu_base,u32 pc)3762 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3763 {
3764 	int i;
3765 	const int iters = 5;
3766 
3767 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3768 	tw32_f(cpu_base + CPU_PC, pc);
3769 
3770 	for (i = 0; i < iters; i++) {
3771 		if (tr32(cpu_base + CPU_PC) == pc)
3772 			break;
3773 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3774 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3775 		tw32_f(cpu_base + CPU_PC, pc);
3776 		udelay(1000);
3777 	}
3778 
3779 	return (i == iters) ? -EBUSY : 0;
3780 }
3781 
3782 /* tp->lock is held. */
tg3_load_5701_a0_firmware_fix(struct tg3 * tp)3783 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3784 {
3785 	const struct tg3_firmware_hdr *fw_hdr;
3786 	int err;
3787 
3788 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3789 
3790 	/* Firmware blob starts with version numbers, followed by
3791 	   start address and length. We are setting complete length.
3792 	   length = end_address_of_bss - start_address_of_text.
3793 	   Remainder is the blob to be loaded contiguously
3794 	   from start address. */
3795 
3796 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3797 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3798 				    fw_hdr);
3799 	if (err)
3800 		return err;
3801 
3802 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3803 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3804 				    fw_hdr);
3805 	if (err)
3806 		return err;
3807 
3808 	/* Now startup only the RX cpu. */
3809 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3810 				       be32_to_cpu(fw_hdr->base_addr));
3811 	if (err) {
3812 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3813 			   "should be %08x\n", __func__,
3814 			   tr32(RX_CPU_BASE + CPU_PC),
3815 				be32_to_cpu(fw_hdr->base_addr));
3816 		return -ENODEV;
3817 	}
3818 
3819 	tg3_rxcpu_resume(tp);
3820 
3821 	return 0;
3822 }
3823 
tg3_validate_rxcpu_state(struct tg3 * tp)3824 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3825 {
3826 	const int iters = 1000;
3827 	int i;
3828 	u32 val;
3829 
3830 	/* Wait for boot code to complete initialization and enter service
3831 	 * loop. It is then safe to download service patches
3832 	 */
3833 	for (i = 0; i < iters; i++) {
3834 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3835 			break;
3836 
3837 		udelay(10);
3838 	}
3839 
3840 	if (i == iters) {
3841 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3842 		return -EBUSY;
3843 	}
3844 
3845 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3846 	if (val & 0xff) {
3847 		netdev_warn(tp->dev,
3848 			    "Other patches exist. Not downloading EEE patch\n");
3849 		return -EEXIST;
3850 	}
3851 
3852 	return 0;
3853 }
3854 
3855 /* tp->lock is held. */
tg3_load_57766_firmware(struct tg3 * tp)3856 static void tg3_load_57766_firmware(struct tg3 *tp)
3857 {
3858 	struct tg3_firmware_hdr *fw_hdr;
3859 
3860 	if (!tg3_flag(tp, NO_NVRAM))
3861 		return;
3862 
3863 	if (tg3_validate_rxcpu_state(tp))
3864 		return;
3865 
3866 	if (!tp->fw)
3867 		return;
3868 
3869 	/* This firmware blob has a different format than older firmware
3870 	 * releases as given below. The main difference is we have fragmented
3871 	 * data to be written to non-contiguous locations.
3872 	 *
3873 	 * In the beginning we have a firmware header identical to other
3874 	 * firmware which consists of version, base addr and length. The length
3875 	 * here is unused and set to 0xffffffff.
3876 	 *
3877 	 * This is followed by a series of firmware fragments which are
3878 	 * individually identical to previous firmware. i.e. they have the
3879 	 * firmware header and followed by data for that fragment. The version
3880 	 * field of the individual fragment header is unused.
3881 	 */
3882 
3883 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3884 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3885 		return;
3886 
3887 	if (tg3_rxcpu_pause(tp))
3888 		return;
3889 
3890 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3891 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3892 
3893 	tg3_rxcpu_resume(tp);
3894 }
3895 
3896 /* tp->lock is held. */
tg3_load_tso_firmware(struct tg3 * tp)3897 static int tg3_load_tso_firmware(struct tg3 *tp)
3898 {
3899 	const struct tg3_firmware_hdr *fw_hdr;
3900 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3901 	int err;
3902 
3903 	if (!tg3_flag(tp, FW_TSO))
3904 		return 0;
3905 
3906 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3907 
3908 	/* Firmware blob starts with version numbers, followed by
3909 	   start address and length. We are setting complete length.
3910 	   length = end_address_of_bss - start_address_of_text.
3911 	   Remainder is the blob to be loaded contiguously
3912 	   from start address. */
3913 
3914 	cpu_scratch_size = tp->fw_len;
3915 
3916 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3917 		cpu_base = RX_CPU_BASE;
3918 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3919 	} else {
3920 		cpu_base = TX_CPU_BASE;
3921 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3922 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3923 	}
3924 
3925 	err = tg3_load_firmware_cpu(tp, cpu_base,
3926 				    cpu_scratch_base, cpu_scratch_size,
3927 				    fw_hdr);
3928 	if (err)
3929 		return err;
3930 
3931 	/* Now startup the cpu. */
3932 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3933 				       be32_to_cpu(fw_hdr->base_addr));
3934 	if (err) {
3935 		netdev_err(tp->dev,
3936 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3937 			   __func__, tr32(cpu_base + CPU_PC),
3938 			   be32_to_cpu(fw_hdr->base_addr));
3939 		return -ENODEV;
3940 	}
3941 
3942 	tg3_resume_cpu(tp, cpu_base);
3943 	return 0;
3944 }
3945 
3946 /* tp->lock is held. */
__tg3_set_one_mac_addr(struct tg3 * tp,const u8 * mac_addr,int index)3947 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3948 				   int index)
3949 {
3950 	u32 addr_high, addr_low;
3951 
3952 	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3953 	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3954 		    (mac_addr[4] <<  8) | mac_addr[5]);
3955 
3956 	if (index < 4) {
3957 		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3958 		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3959 	} else {
3960 		index -= 4;
3961 		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3962 		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3963 	}
3964 }
3965 
3966 /* tp->lock is held. */
__tg3_set_mac_addr(struct tg3 * tp,bool skip_mac_1)3967 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3968 {
3969 	u32 addr_high;
3970 	int i;
3971 
3972 	for (i = 0; i < 4; i++) {
3973 		if (i == 1 && skip_mac_1)
3974 			continue;
3975 		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3976 	}
3977 
3978 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3979 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3980 		for (i = 4; i < 16; i++)
3981 			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3982 	}
3983 
3984 	addr_high = (tp->dev->dev_addr[0] +
3985 		     tp->dev->dev_addr[1] +
3986 		     tp->dev->dev_addr[2] +
3987 		     tp->dev->dev_addr[3] +
3988 		     tp->dev->dev_addr[4] +
3989 		     tp->dev->dev_addr[5]) &
3990 		TX_BACKOFF_SEED_MASK;
3991 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3992 }
3993 
tg3_enable_register_access(struct tg3 * tp)3994 static void tg3_enable_register_access(struct tg3 *tp)
3995 {
3996 	/*
3997 	 * Make sure register accesses (indirect or otherwise) will function
3998 	 * correctly.
3999 	 */
4000 	pci_write_config_dword(tp->pdev,
4001 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4002 }
4003 
tg3_power_up(struct tg3 * tp)4004 static int tg3_power_up(struct tg3 *tp)
4005 {
4006 	int err;
4007 
4008 	tg3_enable_register_access(tp);
4009 
4010 	err = pci_set_power_state(tp->pdev, PCI_D0);
4011 	if (!err) {
4012 		/* Switch out of Vaux if it is a NIC */
4013 		tg3_pwrsrc_switch_to_vmain(tp);
4014 	} else {
4015 		netdev_err(tp->dev, "Transition to D0 failed\n");
4016 	}
4017 
4018 	return err;
4019 }
4020 
4021 static int tg3_setup_phy(struct tg3 *, bool);
4022 
tg3_power_down_prepare(struct tg3 * tp)4023 static int tg3_power_down_prepare(struct tg3 *tp)
4024 {
4025 	u32 misc_host_ctrl;
4026 	bool device_should_wake, do_low_power;
4027 
4028 	tg3_enable_register_access(tp);
4029 
4030 	/* Restore the CLKREQ setting. */
4031 	if (tg3_flag(tp, CLKREQ_BUG))
4032 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4033 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4034 
4035 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4036 	tw32(TG3PCI_MISC_HOST_CTRL,
4037 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4038 
4039 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4040 			     tg3_flag(tp, WOL_ENABLE);
4041 
4042 	if (tg3_flag(tp, USE_PHYLIB)) {
4043 		do_low_power = false;
4044 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4045 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4046 			__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4047 			struct phy_device *phydev;
4048 			u32 phyid;
4049 
4050 			phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4051 
4052 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4053 
4054 			tp->link_config.speed = phydev->speed;
4055 			tp->link_config.duplex = phydev->duplex;
4056 			tp->link_config.autoneg = phydev->autoneg;
4057 			ethtool_convert_link_mode_to_legacy_u32(
4058 				&tp->link_config.advertising,
4059 				phydev->advertising);
4060 
4061 			linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4062 			linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4063 					 advertising);
4064 			linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4065 					 advertising);
4066 			linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4067 					 advertising);
4068 
4069 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4070 				if (tg3_flag(tp, WOL_SPEED_100MB)) {
4071 					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4072 							 advertising);
4073 					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4074 							 advertising);
4075 					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4076 							 advertising);
4077 				} else {
4078 					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4079 							 advertising);
4080 				}
4081 			}
4082 
4083 			linkmode_copy(phydev->advertising, advertising);
4084 			phy_start_aneg(phydev);
4085 
4086 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4087 			if (phyid != PHY_ID_BCMAC131) {
4088 				phyid &= PHY_BCM_OUI_MASK;
4089 				if (phyid == PHY_BCM_OUI_1 ||
4090 				    phyid == PHY_BCM_OUI_2 ||
4091 				    phyid == PHY_BCM_OUI_3)
4092 					do_low_power = true;
4093 			}
4094 		}
4095 	} else {
4096 		do_low_power = true;
4097 
4098 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4099 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4100 
4101 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4102 			tg3_setup_phy(tp, false);
4103 	}
4104 
4105 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4106 		u32 val;
4107 
4108 		val = tr32(GRC_VCPU_EXT_CTRL);
4109 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4110 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4111 		int i;
4112 		u32 val;
4113 
4114 		for (i = 0; i < 200; i++) {
4115 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4116 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4117 				break;
4118 			msleep(1);
4119 		}
4120 	}
4121 	if (tg3_flag(tp, WOL_CAP))
4122 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4123 						     WOL_DRV_STATE_SHUTDOWN |
4124 						     WOL_DRV_WOL |
4125 						     WOL_SET_MAGIC_PKT);
4126 
4127 	if (device_should_wake) {
4128 		u32 mac_mode;
4129 
4130 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4131 			if (do_low_power &&
4132 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4133 				tg3_phy_auxctl_write(tp,
4134 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4135 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4136 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4137 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4138 				udelay(40);
4139 			}
4140 
4141 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4142 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4143 			else if (tp->phy_flags &
4144 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4145 				if (tp->link_config.active_speed == SPEED_1000)
4146 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4147 				else
4148 					mac_mode = MAC_MODE_PORT_MODE_MII;
4149 			} else
4150 				mac_mode = MAC_MODE_PORT_MODE_MII;
4151 
4152 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4153 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4154 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4155 					     SPEED_100 : SPEED_10;
4156 				if (tg3_5700_link_polarity(tp, speed))
4157 					mac_mode |= MAC_MODE_LINK_POLARITY;
4158 				else
4159 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4160 			}
4161 		} else {
4162 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4163 		}
4164 
4165 		if (!tg3_flag(tp, 5750_PLUS))
4166 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4167 
4168 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4169 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4170 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4171 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4172 
4173 		if (tg3_flag(tp, ENABLE_APE))
4174 			mac_mode |= MAC_MODE_APE_TX_EN |
4175 				    MAC_MODE_APE_RX_EN |
4176 				    MAC_MODE_TDE_ENABLE;
4177 
4178 		tw32_f(MAC_MODE, mac_mode);
4179 		udelay(100);
4180 
4181 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4182 		udelay(10);
4183 	}
4184 
4185 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4186 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4187 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4188 		u32 base_val;
4189 
4190 		base_val = tp->pci_clock_ctrl;
4191 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4192 			     CLOCK_CTRL_TXCLK_DISABLE);
4193 
4194 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4195 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4196 	} else if (tg3_flag(tp, 5780_CLASS) ||
4197 		   tg3_flag(tp, CPMU_PRESENT) ||
4198 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4199 		/* do nothing */
4200 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4201 		u32 newbits1, newbits2;
4202 
4203 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4204 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4205 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4206 				    CLOCK_CTRL_TXCLK_DISABLE |
4207 				    CLOCK_CTRL_ALTCLK);
4208 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4209 		} else if (tg3_flag(tp, 5705_PLUS)) {
4210 			newbits1 = CLOCK_CTRL_625_CORE;
4211 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4212 		} else {
4213 			newbits1 = CLOCK_CTRL_ALTCLK;
4214 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4215 		}
4216 
4217 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4218 			    40);
4219 
4220 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4221 			    40);
4222 
4223 		if (!tg3_flag(tp, 5705_PLUS)) {
4224 			u32 newbits3;
4225 
4226 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4227 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4228 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4229 					    CLOCK_CTRL_TXCLK_DISABLE |
4230 					    CLOCK_CTRL_44MHZ_CORE);
4231 			} else {
4232 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4233 			}
4234 
4235 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4236 				    tp->pci_clock_ctrl | newbits3, 40);
4237 		}
4238 	}
4239 
4240 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4241 		tg3_power_down_phy(tp, do_low_power);
4242 
4243 	tg3_frob_aux_power(tp, true);
4244 
4245 	/* Workaround for unstable PLL clock */
4246 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4247 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4248 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4249 		u32 val = tr32(0x7d00);
4250 
4251 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4252 		tw32(0x7d00, val);
4253 		if (!tg3_flag(tp, ENABLE_ASF)) {
4254 			int err;
4255 
4256 			err = tg3_nvram_lock(tp);
4257 			tg3_halt_cpu(tp, RX_CPU_BASE);
4258 			if (!err)
4259 				tg3_nvram_unlock(tp);
4260 		}
4261 	}
4262 
4263 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4264 
4265 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4266 
4267 	return 0;
4268 }
4269 
tg3_power_down(struct tg3 * tp)4270 static void tg3_power_down(struct tg3 *tp)
4271 {
4272 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4273 	pci_set_power_state(tp->pdev, PCI_D3hot);
4274 }
4275 
tg3_aux_stat_to_speed_duplex(struct tg3 * tp,u32 val,u32 * speed,u8 * duplex)4276 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4277 {
4278 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4279 	case MII_TG3_AUX_STAT_10HALF:
4280 		*speed = SPEED_10;
4281 		*duplex = DUPLEX_HALF;
4282 		break;
4283 
4284 	case MII_TG3_AUX_STAT_10FULL:
4285 		*speed = SPEED_10;
4286 		*duplex = DUPLEX_FULL;
4287 		break;
4288 
4289 	case MII_TG3_AUX_STAT_100HALF:
4290 		*speed = SPEED_100;
4291 		*duplex = DUPLEX_HALF;
4292 		break;
4293 
4294 	case MII_TG3_AUX_STAT_100FULL:
4295 		*speed = SPEED_100;
4296 		*duplex = DUPLEX_FULL;
4297 		break;
4298 
4299 	case MII_TG3_AUX_STAT_1000HALF:
4300 		*speed = SPEED_1000;
4301 		*duplex = DUPLEX_HALF;
4302 		break;
4303 
4304 	case MII_TG3_AUX_STAT_1000FULL:
4305 		*speed = SPEED_1000;
4306 		*duplex = DUPLEX_FULL;
4307 		break;
4308 
4309 	default:
4310 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4311 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4312 				 SPEED_10;
4313 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4314 				  DUPLEX_HALF;
4315 			break;
4316 		}
4317 		*speed = SPEED_UNKNOWN;
4318 		*duplex = DUPLEX_UNKNOWN;
4319 		break;
4320 	}
4321 }
4322 
tg3_phy_autoneg_cfg(struct tg3 * tp,u32 advertise,u32 flowctrl)4323 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4324 {
4325 	int err = 0;
4326 	u32 val, new_adv;
4327 
4328 	new_adv = ADVERTISE_CSMA;
4329 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4330 	new_adv |= mii_advertise_flowctrl(flowctrl);
4331 
4332 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4333 	if (err)
4334 		goto done;
4335 
4336 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4337 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4338 
4339 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4340 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4341 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4342 
4343 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4344 		if (err)
4345 			goto done;
4346 	}
4347 
4348 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4349 		goto done;
4350 
4351 	tw32(TG3_CPMU_EEE_MODE,
4352 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4353 
4354 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4355 	if (!err) {
4356 		u32 err2;
4357 
4358 		val = 0;
4359 		/* Advertise 100-BaseTX EEE ability */
4360 		if (advertise & ADVERTISED_100baseT_Full)
4361 			val |= MDIO_AN_EEE_ADV_100TX;
4362 		/* Advertise 1000-BaseT EEE ability */
4363 		if (advertise & ADVERTISED_1000baseT_Full)
4364 			val |= MDIO_AN_EEE_ADV_1000T;
4365 
4366 		if (!tp->eee.eee_enabled) {
4367 			val = 0;
4368 			tp->eee.advertised = 0;
4369 		} else {
4370 			tp->eee.advertised = advertise &
4371 					     (ADVERTISED_100baseT_Full |
4372 					      ADVERTISED_1000baseT_Full);
4373 		}
4374 
4375 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4376 		if (err)
4377 			val = 0;
4378 
4379 		switch (tg3_asic_rev(tp)) {
4380 		case ASIC_REV_5717:
4381 		case ASIC_REV_57765:
4382 		case ASIC_REV_57766:
4383 		case ASIC_REV_5719:
4384 			/* If we advertised any eee advertisements above... */
4385 			if (val)
4386 				val = MII_TG3_DSP_TAP26_ALNOKO |
4387 				      MII_TG3_DSP_TAP26_RMRXSTO |
4388 				      MII_TG3_DSP_TAP26_OPCSINPT;
4389 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4390 			fallthrough;
4391 		case ASIC_REV_5720:
4392 		case ASIC_REV_5762:
4393 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4394 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4395 						 MII_TG3_DSP_CH34TP2_HIBW01);
4396 		}
4397 
4398 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4399 		if (!err)
4400 			err = err2;
4401 	}
4402 
4403 done:
4404 	return err;
4405 }
4406 
tg3_phy_copper_begin(struct tg3 * tp)4407 static void tg3_phy_copper_begin(struct tg3 *tp)
4408 {
4409 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4410 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4411 		u32 adv, fc;
4412 
4413 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4414 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4415 			adv = ADVERTISED_10baseT_Half |
4416 			      ADVERTISED_10baseT_Full;
4417 			if (tg3_flag(tp, WOL_SPEED_100MB))
4418 				adv |= ADVERTISED_100baseT_Half |
4419 				       ADVERTISED_100baseT_Full;
4420 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4421 				if (!(tp->phy_flags &
4422 				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
4423 					adv |= ADVERTISED_1000baseT_Half;
4424 				adv |= ADVERTISED_1000baseT_Full;
4425 			}
4426 
4427 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4428 		} else {
4429 			adv = tp->link_config.advertising;
4430 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4431 				adv &= ~(ADVERTISED_1000baseT_Half |
4432 					 ADVERTISED_1000baseT_Full);
4433 
4434 			fc = tp->link_config.flowctrl;
4435 		}
4436 
4437 		tg3_phy_autoneg_cfg(tp, adv, fc);
4438 
4439 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4440 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4441 			/* Normally during power down we want to autonegotiate
4442 			 * the lowest possible speed for WOL. However, to avoid
4443 			 * link flap, we leave it untouched.
4444 			 */
4445 			return;
4446 		}
4447 
4448 		tg3_writephy(tp, MII_BMCR,
4449 			     BMCR_ANENABLE | BMCR_ANRESTART);
4450 	} else {
4451 		int i;
4452 		u32 bmcr, orig_bmcr;
4453 
4454 		tp->link_config.active_speed = tp->link_config.speed;
4455 		tp->link_config.active_duplex = tp->link_config.duplex;
4456 
4457 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4458 			/* With autoneg disabled, 5715 only links up when the
4459 			 * advertisement register has the configured speed
4460 			 * enabled.
4461 			 */
4462 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4463 		}
4464 
4465 		bmcr = 0;
4466 		switch (tp->link_config.speed) {
4467 		default:
4468 		case SPEED_10:
4469 			break;
4470 
4471 		case SPEED_100:
4472 			bmcr |= BMCR_SPEED100;
4473 			break;
4474 
4475 		case SPEED_1000:
4476 			bmcr |= BMCR_SPEED1000;
4477 			break;
4478 		}
4479 
4480 		if (tp->link_config.duplex == DUPLEX_FULL)
4481 			bmcr |= BMCR_FULLDPLX;
4482 
4483 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4484 		    (bmcr != orig_bmcr)) {
4485 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4486 			for (i = 0; i < 1500; i++) {
4487 				u32 tmp;
4488 
4489 				udelay(10);
4490 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4491 				    tg3_readphy(tp, MII_BMSR, &tmp))
4492 					continue;
4493 				if (!(tmp & BMSR_LSTATUS)) {
4494 					udelay(40);
4495 					break;
4496 				}
4497 			}
4498 			tg3_writephy(tp, MII_BMCR, bmcr);
4499 			udelay(40);
4500 		}
4501 	}
4502 }
4503 
tg3_phy_pull_config(struct tg3 * tp)4504 static int tg3_phy_pull_config(struct tg3 *tp)
4505 {
4506 	int err;
4507 	u32 val;
4508 
4509 	err = tg3_readphy(tp, MII_BMCR, &val);
4510 	if (err)
4511 		goto done;
4512 
4513 	if (!(val & BMCR_ANENABLE)) {
4514 		tp->link_config.autoneg = AUTONEG_DISABLE;
4515 		tp->link_config.advertising = 0;
4516 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4517 
4518 		err = -EIO;
4519 
4520 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4521 		case 0:
4522 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4523 				goto done;
4524 
4525 			tp->link_config.speed = SPEED_10;
4526 			break;
4527 		case BMCR_SPEED100:
4528 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4529 				goto done;
4530 
4531 			tp->link_config.speed = SPEED_100;
4532 			break;
4533 		case BMCR_SPEED1000:
4534 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4535 				tp->link_config.speed = SPEED_1000;
4536 				break;
4537 			}
4538 			fallthrough;
4539 		default:
4540 			goto done;
4541 		}
4542 
4543 		if (val & BMCR_FULLDPLX)
4544 			tp->link_config.duplex = DUPLEX_FULL;
4545 		else
4546 			tp->link_config.duplex = DUPLEX_HALF;
4547 
4548 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4549 
4550 		err = 0;
4551 		goto done;
4552 	}
4553 
4554 	tp->link_config.autoneg = AUTONEG_ENABLE;
4555 	tp->link_config.advertising = ADVERTISED_Autoneg;
4556 	tg3_flag_set(tp, PAUSE_AUTONEG);
4557 
4558 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4559 		u32 adv;
4560 
4561 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4562 		if (err)
4563 			goto done;
4564 
4565 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4566 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4567 
4568 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4569 	} else {
4570 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4571 	}
4572 
4573 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4574 		u32 adv;
4575 
4576 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4577 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4578 			if (err)
4579 				goto done;
4580 
4581 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4582 		} else {
4583 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4584 			if (err)
4585 				goto done;
4586 
4587 			adv = tg3_decode_flowctrl_1000X(val);
4588 			tp->link_config.flowctrl = adv;
4589 
4590 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4591 			adv = mii_adv_to_ethtool_adv_x(val);
4592 		}
4593 
4594 		tp->link_config.advertising |= adv;
4595 	}
4596 
4597 done:
4598 	return err;
4599 }
4600 
tg3_init_5401phy_dsp(struct tg3 * tp)4601 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4602 {
4603 	int err;
4604 
4605 	/* Turn off tap power management. */
4606 	/* Set Extended packet length bit */
4607 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4608 
4609 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4610 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4611 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4612 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4613 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4614 
4615 	udelay(40);
4616 
4617 	return err;
4618 }
4619 
tg3_phy_eee_config_ok(struct tg3 * tp)4620 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4621 {
4622 	struct ethtool_eee eee;
4623 
4624 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4625 		return true;
4626 
4627 	tg3_eee_pull_config(tp, &eee);
4628 
4629 	if (tp->eee.eee_enabled) {
4630 		if (tp->eee.advertised != eee.advertised ||
4631 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4632 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4633 			return false;
4634 	} else {
4635 		/* EEE is disabled but we're advertising */
4636 		if (eee.advertised)
4637 			return false;
4638 	}
4639 
4640 	return true;
4641 }
4642 
tg3_phy_copper_an_config_ok(struct tg3 * tp,u32 * lcladv)4643 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4644 {
4645 	u32 advmsk, tgtadv, advertising;
4646 
4647 	advertising = tp->link_config.advertising;
4648 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4649 
4650 	advmsk = ADVERTISE_ALL;
4651 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4652 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4653 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4654 	}
4655 
4656 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4657 		return false;
4658 
4659 	if ((*lcladv & advmsk) != tgtadv)
4660 		return false;
4661 
4662 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4663 		u32 tg3_ctrl;
4664 
4665 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4666 
4667 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4668 			return false;
4669 
4670 		if (tgtadv &&
4671 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4672 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4673 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4674 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4675 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4676 		} else {
4677 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4678 		}
4679 
4680 		if (tg3_ctrl != tgtadv)
4681 			return false;
4682 	}
4683 
4684 	return true;
4685 }
4686 
tg3_phy_copper_fetch_rmtadv(struct tg3 * tp,u32 * rmtadv)4687 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4688 {
4689 	u32 lpeth = 0;
4690 
4691 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4692 		u32 val;
4693 
4694 		if (tg3_readphy(tp, MII_STAT1000, &val))
4695 			return false;
4696 
4697 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4698 	}
4699 
4700 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4701 		return false;
4702 
4703 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4704 	tp->link_config.rmt_adv = lpeth;
4705 
4706 	return true;
4707 }
4708 
tg3_test_and_report_link_chg(struct tg3 * tp,bool curr_link_up)4709 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4710 {
4711 	if (curr_link_up != tp->link_up) {
4712 		if (curr_link_up) {
4713 			netif_carrier_on(tp->dev);
4714 		} else {
4715 			netif_carrier_off(tp->dev);
4716 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4717 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4718 		}
4719 
4720 		tg3_link_report(tp);
4721 		return true;
4722 	}
4723 
4724 	return false;
4725 }
4726 
tg3_clear_mac_status(struct tg3 * tp)4727 static void tg3_clear_mac_status(struct tg3 *tp)
4728 {
4729 	tw32(MAC_EVENT, 0);
4730 
4731 	tw32_f(MAC_STATUS,
4732 	       MAC_STATUS_SYNC_CHANGED |
4733 	       MAC_STATUS_CFG_CHANGED |
4734 	       MAC_STATUS_MI_COMPLETION |
4735 	       MAC_STATUS_LNKSTATE_CHANGED);
4736 	udelay(40);
4737 }
4738 
tg3_setup_eee(struct tg3 * tp)4739 static void tg3_setup_eee(struct tg3 *tp)
4740 {
4741 	u32 val;
4742 
4743 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4744 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4745 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4746 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4747 
4748 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4749 
4750 	tw32_f(TG3_CPMU_EEE_CTRL,
4751 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4752 
4753 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4754 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4755 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4756 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4757 
4758 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4759 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4760 
4761 	if (tg3_flag(tp, ENABLE_APE))
4762 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4763 
4764 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4765 
4766 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4767 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4768 	       (tp->eee.tx_lpi_timer & 0xffff));
4769 
4770 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4771 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4772 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4773 }
4774 
tg3_setup_copper_phy(struct tg3 * tp,bool force_reset)4775 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4776 {
4777 	bool current_link_up;
4778 	u32 bmsr, val;
4779 	u32 lcl_adv, rmt_adv;
4780 	u32 current_speed;
4781 	u8 current_duplex;
4782 	int i, err;
4783 
4784 	tg3_clear_mac_status(tp);
4785 
4786 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4787 		tw32_f(MAC_MI_MODE,
4788 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4789 		udelay(80);
4790 	}
4791 
4792 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4793 
4794 	/* Some third-party PHYs need to be reset on link going
4795 	 * down.
4796 	 */
4797 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4798 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4799 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4800 	    tp->link_up) {
4801 		tg3_readphy(tp, MII_BMSR, &bmsr);
4802 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4803 		    !(bmsr & BMSR_LSTATUS))
4804 			force_reset = true;
4805 	}
4806 	if (force_reset)
4807 		tg3_phy_reset(tp);
4808 
4809 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4810 		tg3_readphy(tp, MII_BMSR, &bmsr);
4811 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4812 		    !tg3_flag(tp, INIT_COMPLETE))
4813 			bmsr = 0;
4814 
4815 		if (!(bmsr & BMSR_LSTATUS)) {
4816 			err = tg3_init_5401phy_dsp(tp);
4817 			if (err)
4818 				return err;
4819 
4820 			tg3_readphy(tp, MII_BMSR, &bmsr);
4821 			for (i = 0; i < 1000; i++) {
4822 				udelay(10);
4823 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4824 				    (bmsr & BMSR_LSTATUS)) {
4825 					udelay(40);
4826 					break;
4827 				}
4828 			}
4829 
4830 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4831 			    TG3_PHY_REV_BCM5401_B0 &&
4832 			    !(bmsr & BMSR_LSTATUS) &&
4833 			    tp->link_config.active_speed == SPEED_1000) {
4834 				err = tg3_phy_reset(tp);
4835 				if (!err)
4836 					err = tg3_init_5401phy_dsp(tp);
4837 				if (err)
4838 					return err;
4839 			}
4840 		}
4841 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4842 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4843 		/* 5701 {A0,B0} CRC bug workaround */
4844 		tg3_writephy(tp, 0x15, 0x0a75);
4845 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4846 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4847 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4848 	}
4849 
4850 	/* Clear pending interrupts... */
4851 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4852 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4853 
4854 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4855 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4856 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4857 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4858 
4859 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4860 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4861 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4862 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4863 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4864 		else
4865 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4866 	}
4867 
4868 	current_link_up = false;
4869 	current_speed = SPEED_UNKNOWN;
4870 	current_duplex = DUPLEX_UNKNOWN;
4871 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4872 	tp->link_config.rmt_adv = 0;
4873 
4874 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4875 		err = tg3_phy_auxctl_read(tp,
4876 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4877 					  &val);
4878 		if (!err && !(val & (1 << 10))) {
4879 			tg3_phy_auxctl_write(tp,
4880 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4881 					     val | (1 << 10));
4882 			goto relink;
4883 		}
4884 	}
4885 
4886 	bmsr = 0;
4887 	for (i = 0; i < 100; i++) {
4888 		tg3_readphy(tp, MII_BMSR, &bmsr);
4889 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4890 		    (bmsr & BMSR_LSTATUS))
4891 			break;
4892 		udelay(40);
4893 	}
4894 
4895 	if (bmsr & BMSR_LSTATUS) {
4896 		u32 aux_stat, bmcr;
4897 
4898 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4899 		for (i = 0; i < 2000; i++) {
4900 			udelay(10);
4901 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4902 			    aux_stat)
4903 				break;
4904 		}
4905 
4906 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4907 					     &current_speed,
4908 					     &current_duplex);
4909 
4910 		bmcr = 0;
4911 		for (i = 0; i < 200; i++) {
4912 			tg3_readphy(tp, MII_BMCR, &bmcr);
4913 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4914 				continue;
4915 			if (bmcr && bmcr != 0x7fff)
4916 				break;
4917 			udelay(10);
4918 		}
4919 
4920 		lcl_adv = 0;
4921 		rmt_adv = 0;
4922 
4923 		tp->link_config.active_speed = current_speed;
4924 		tp->link_config.active_duplex = current_duplex;
4925 
4926 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4927 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4928 
4929 			if ((bmcr & BMCR_ANENABLE) &&
4930 			    eee_config_ok &&
4931 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4932 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4933 				current_link_up = true;
4934 
4935 			/* EEE settings changes take effect only after a phy
4936 			 * reset.  If we have skipped a reset due to Link Flap
4937 			 * Avoidance being enabled, do it now.
4938 			 */
4939 			if (!eee_config_ok &&
4940 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4941 			    !force_reset) {
4942 				tg3_setup_eee(tp);
4943 				tg3_phy_reset(tp);
4944 			}
4945 		} else {
4946 			if (!(bmcr & BMCR_ANENABLE) &&
4947 			    tp->link_config.speed == current_speed &&
4948 			    tp->link_config.duplex == current_duplex) {
4949 				current_link_up = true;
4950 			}
4951 		}
4952 
4953 		if (current_link_up &&
4954 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4955 			u32 reg, bit;
4956 
4957 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4958 				reg = MII_TG3_FET_GEN_STAT;
4959 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4960 			} else {
4961 				reg = MII_TG3_EXT_STAT;
4962 				bit = MII_TG3_EXT_STAT_MDIX;
4963 			}
4964 
4965 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4966 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4967 
4968 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4969 		}
4970 	}
4971 
4972 relink:
4973 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4974 		tg3_phy_copper_begin(tp);
4975 
4976 		if (tg3_flag(tp, ROBOSWITCH)) {
4977 			current_link_up = true;
4978 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4979 			current_speed = SPEED_1000;
4980 			current_duplex = DUPLEX_FULL;
4981 			tp->link_config.active_speed = current_speed;
4982 			tp->link_config.active_duplex = current_duplex;
4983 		}
4984 
4985 		tg3_readphy(tp, MII_BMSR, &bmsr);
4986 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4987 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4988 			current_link_up = true;
4989 	}
4990 
4991 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4992 	if (current_link_up) {
4993 		if (tp->link_config.active_speed == SPEED_100 ||
4994 		    tp->link_config.active_speed == SPEED_10)
4995 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4996 		else
4997 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4998 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4999 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5000 	else
5001 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5002 
5003 	/* In order for the 5750 core in BCM4785 chip to work properly
5004 	 * in RGMII mode, the Led Control Register must be set up.
5005 	 */
5006 	if (tg3_flag(tp, RGMII_MODE)) {
5007 		u32 led_ctrl = tr32(MAC_LED_CTRL);
5008 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5009 
5010 		if (tp->link_config.active_speed == SPEED_10)
5011 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5012 		else if (tp->link_config.active_speed == SPEED_100)
5013 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5014 				     LED_CTRL_100MBPS_ON);
5015 		else if (tp->link_config.active_speed == SPEED_1000)
5016 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5017 				     LED_CTRL_1000MBPS_ON);
5018 
5019 		tw32(MAC_LED_CTRL, led_ctrl);
5020 		udelay(40);
5021 	}
5022 
5023 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5024 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5025 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5026 
5027 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5028 		if (current_link_up &&
5029 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5030 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5031 		else
5032 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5033 	}
5034 
5035 	/* ??? Without this setting Netgear GA302T PHY does not
5036 	 * ??? send/receive packets...
5037 	 */
5038 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5039 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5040 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5041 		tw32_f(MAC_MI_MODE, tp->mi_mode);
5042 		udelay(80);
5043 	}
5044 
5045 	tw32_f(MAC_MODE, tp->mac_mode);
5046 	udelay(40);
5047 
5048 	tg3_phy_eee_adjust(tp, current_link_up);
5049 
5050 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5051 		/* Polled via timer. */
5052 		tw32_f(MAC_EVENT, 0);
5053 	} else {
5054 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5055 	}
5056 	udelay(40);
5057 
5058 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5059 	    current_link_up &&
5060 	    tp->link_config.active_speed == SPEED_1000 &&
5061 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5062 		udelay(120);
5063 		tw32_f(MAC_STATUS,
5064 		     (MAC_STATUS_SYNC_CHANGED |
5065 		      MAC_STATUS_CFG_CHANGED));
5066 		udelay(40);
5067 		tg3_write_mem(tp,
5068 			      NIC_SRAM_FIRMWARE_MBOX,
5069 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5070 	}
5071 
5072 	/* Prevent send BD corruption. */
5073 	if (tg3_flag(tp, CLKREQ_BUG)) {
5074 		if (tp->link_config.active_speed == SPEED_100 ||
5075 		    tp->link_config.active_speed == SPEED_10)
5076 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5077 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5078 		else
5079 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5080 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5081 	}
5082 
5083 	tg3_test_and_report_link_chg(tp, current_link_up);
5084 
5085 	return 0;
5086 }
5087 
5088 struct tg3_fiber_aneginfo {
5089 	int state;
5090 #define ANEG_STATE_UNKNOWN		0
5091 #define ANEG_STATE_AN_ENABLE		1
5092 #define ANEG_STATE_RESTART_INIT		2
5093 #define ANEG_STATE_RESTART		3
5094 #define ANEG_STATE_DISABLE_LINK_OK	4
5095 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5096 #define ANEG_STATE_ABILITY_DETECT	6
5097 #define ANEG_STATE_ACK_DETECT_INIT	7
5098 #define ANEG_STATE_ACK_DETECT		8
5099 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5100 #define ANEG_STATE_COMPLETE_ACK		10
5101 #define ANEG_STATE_IDLE_DETECT_INIT	11
5102 #define ANEG_STATE_IDLE_DETECT		12
5103 #define ANEG_STATE_LINK_OK		13
5104 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5105 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5106 
5107 	u32 flags;
5108 #define MR_AN_ENABLE		0x00000001
5109 #define MR_RESTART_AN		0x00000002
5110 #define MR_AN_COMPLETE		0x00000004
5111 #define MR_PAGE_RX		0x00000008
5112 #define MR_NP_LOADED		0x00000010
5113 #define MR_TOGGLE_TX		0x00000020
5114 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5115 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5116 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5117 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5118 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5119 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5120 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5121 #define MR_TOGGLE_RX		0x00002000
5122 #define MR_NP_RX		0x00004000
5123 
5124 #define MR_LINK_OK		0x80000000
5125 
5126 	unsigned long link_time, cur_time;
5127 
5128 	u32 ability_match_cfg;
5129 	int ability_match_count;
5130 
5131 	char ability_match, idle_match, ack_match;
5132 
5133 	u32 txconfig, rxconfig;
5134 #define ANEG_CFG_NP		0x00000080
5135 #define ANEG_CFG_ACK		0x00000040
5136 #define ANEG_CFG_RF2		0x00000020
5137 #define ANEG_CFG_RF1		0x00000010
5138 #define ANEG_CFG_PS2		0x00000001
5139 #define ANEG_CFG_PS1		0x00008000
5140 #define ANEG_CFG_HD		0x00004000
5141 #define ANEG_CFG_FD		0x00002000
5142 #define ANEG_CFG_INVAL		0x00001f06
5143 
5144 };
5145 #define ANEG_OK		0
5146 #define ANEG_DONE	1
5147 #define ANEG_TIMER_ENAB	2
5148 #define ANEG_FAILED	-1
5149 
5150 #define ANEG_STATE_SETTLE_TIME	10000
5151 
tg3_fiber_aneg_smachine(struct tg3 * tp,struct tg3_fiber_aneginfo * ap)5152 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5153 				   struct tg3_fiber_aneginfo *ap)
5154 {
5155 	u16 flowctrl;
5156 	unsigned long delta;
5157 	u32 rx_cfg_reg;
5158 	int ret;
5159 
5160 	if (ap->state == ANEG_STATE_UNKNOWN) {
5161 		ap->rxconfig = 0;
5162 		ap->link_time = 0;
5163 		ap->cur_time = 0;
5164 		ap->ability_match_cfg = 0;
5165 		ap->ability_match_count = 0;
5166 		ap->ability_match = 0;
5167 		ap->idle_match = 0;
5168 		ap->ack_match = 0;
5169 	}
5170 	ap->cur_time++;
5171 
5172 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5173 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5174 
5175 		if (rx_cfg_reg != ap->ability_match_cfg) {
5176 			ap->ability_match_cfg = rx_cfg_reg;
5177 			ap->ability_match = 0;
5178 			ap->ability_match_count = 0;
5179 		} else {
5180 			if (++ap->ability_match_count > 1) {
5181 				ap->ability_match = 1;
5182 				ap->ability_match_cfg = rx_cfg_reg;
5183 			}
5184 		}
5185 		if (rx_cfg_reg & ANEG_CFG_ACK)
5186 			ap->ack_match = 1;
5187 		else
5188 			ap->ack_match = 0;
5189 
5190 		ap->idle_match = 0;
5191 	} else {
5192 		ap->idle_match = 1;
5193 		ap->ability_match_cfg = 0;
5194 		ap->ability_match_count = 0;
5195 		ap->ability_match = 0;
5196 		ap->ack_match = 0;
5197 
5198 		rx_cfg_reg = 0;
5199 	}
5200 
5201 	ap->rxconfig = rx_cfg_reg;
5202 	ret = ANEG_OK;
5203 
5204 	switch (ap->state) {
5205 	case ANEG_STATE_UNKNOWN:
5206 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5207 			ap->state = ANEG_STATE_AN_ENABLE;
5208 
5209 		fallthrough;
5210 	case ANEG_STATE_AN_ENABLE:
5211 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5212 		if (ap->flags & MR_AN_ENABLE) {
5213 			ap->link_time = 0;
5214 			ap->cur_time = 0;
5215 			ap->ability_match_cfg = 0;
5216 			ap->ability_match_count = 0;
5217 			ap->ability_match = 0;
5218 			ap->idle_match = 0;
5219 			ap->ack_match = 0;
5220 
5221 			ap->state = ANEG_STATE_RESTART_INIT;
5222 		} else {
5223 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5224 		}
5225 		break;
5226 
5227 	case ANEG_STATE_RESTART_INIT:
5228 		ap->link_time = ap->cur_time;
5229 		ap->flags &= ~(MR_NP_LOADED);
5230 		ap->txconfig = 0;
5231 		tw32(MAC_TX_AUTO_NEG, 0);
5232 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5233 		tw32_f(MAC_MODE, tp->mac_mode);
5234 		udelay(40);
5235 
5236 		ret = ANEG_TIMER_ENAB;
5237 		ap->state = ANEG_STATE_RESTART;
5238 
5239 		fallthrough;
5240 	case ANEG_STATE_RESTART:
5241 		delta = ap->cur_time - ap->link_time;
5242 		if (delta > ANEG_STATE_SETTLE_TIME)
5243 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5244 		else
5245 			ret = ANEG_TIMER_ENAB;
5246 		break;
5247 
5248 	case ANEG_STATE_DISABLE_LINK_OK:
5249 		ret = ANEG_DONE;
5250 		break;
5251 
5252 	case ANEG_STATE_ABILITY_DETECT_INIT:
5253 		ap->flags &= ~(MR_TOGGLE_TX);
5254 		ap->txconfig = ANEG_CFG_FD;
5255 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5256 		if (flowctrl & ADVERTISE_1000XPAUSE)
5257 			ap->txconfig |= ANEG_CFG_PS1;
5258 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5259 			ap->txconfig |= ANEG_CFG_PS2;
5260 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5261 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5262 		tw32_f(MAC_MODE, tp->mac_mode);
5263 		udelay(40);
5264 
5265 		ap->state = ANEG_STATE_ABILITY_DETECT;
5266 		break;
5267 
5268 	case ANEG_STATE_ABILITY_DETECT:
5269 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5270 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5271 		break;
5272 
5273 	case ANEG_STATE_ACK_DETECT_INIT:
5274 		ap->txconfig |= ANEG_CFG_ACK;
5275 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5276 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5277 		tw32_f(MAC_MODE, tp->mac_mode);
5278 		udelay(40);
5279 
5280 		ap->state = ANEG_STATE_ACK_DETECT;
5281 
5282 		fallthrough;
5283 	case ANEG_STATE_ACK_DETECT:
5284 		if (ap->ack_match != 0) {
5285 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5286 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5287 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5288 			} else {
5289 				ap->state = ANEG_STATE_AN_ENABLE;
5290 			}
5291 		} else if (ap->ability_match != 0 &&
5292 			   ap->rxconfig == 0) {
5293 			ap->state = ANEG_STATE_AN_ENABLE;
5294 		}
5295 		break;
5296 
5297 	case ANEG_STATE_COMPLETE_ACK_INIT:
5298 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5299 			ret = ANEG_FAILED;
5300 			break;
5301 		}
5302 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5303 			       MR_LP_ADV_HALF_DUPLEX |
5304 			       MR_LP_ADV_SYM_PAUSE |
5305 			       MR_LP_ADV_ASYM_PAUSE |
5306 			       MR_LP_ADV_REMOTE_FAULT1 |
5307 			       MR_LP_ADV_REMOTE_FAULT2 |
5308 			       MR_LP_ADV_NEXT_PAGE |
5309 			       MR_TOGGLE_RX |
5310 			       MR_NP_RX);
5311 		if (ap->rxconfig & ANEG_CFG_FD)
5312 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5313 		if (ap->rxconfig & ANEG_CFG_HD)
5314 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5315 		if (ap->rxconfig & ANEG_CFG_PS1)
5316 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5317 		if (ap->rxconfig & ANEG_CFG_PS2)
5318 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5319 		if (ap->rxconfig & ANEG_CFG_RF1)
5320 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5321 		if (ap->rxconfig & ANEG_CFG_RF2)
5322 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5323 		if (ap->rxconfig & ANEG_CFG_NP)
5324 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5325 
5326 		ap->link_time = ap->cur_time;
5327 
5328 		ap->flags ^= (MR_TOGGLE_TX);
5329 		if (ap->rxconfig & 0x0008)
5330 			ap->flags |= MR_TOGGLE_RX;
5331 		if (ap->rxconfig & ANEG_CFG_NP)
5332 			ap->flags |= MR_NP_RX;
5333 		ap->flags |= MR_PAGE_RX;
5334 
5335 		ap->state = ANEG_STATE_COMPLETE_ACK;
5336 		ret = ANEG_TIMER_ENAB;
5337 		break;
5338 
5339 	case ANEG_STATE_COMPLETE_ACK:
5340 		if (ap->ability_match != 0 &&
5341 		    ap->rxconfig == 0) {
5342 			ap->state = ANEG_STATE_AN_ENABLE;
5343 			break;
5344 		}
5345 		delta = ap->cur_time - ap->link_time;
5346 		if (delta > ANEG_STATE_SETTLE_TIME) {
5347 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5348 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5349 			} else {
5350 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5351 				    !(ap->flags & MR_NP_RX)) {
5352 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5353 				} else {
5354 					ret = ANEG_FAILED;
5355 				}
5356 			}
5357 		}
5358 		break;
5359 
5360 	case ANEG_STATE_IDLE_DETECT_INIT:
5361 		ap->link_time = ap->cur_time;
5362 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5363 		tw32_f(MAC_MODE, tp->mac_mode);
5364 		udelay(40);
5365 
5366 		ap->state = ANEG_STATE_IDLE_DETECT;
5367 		ret = ANEG_TIMER_ENAB;
5368 		break;
5369 
5370 	case ANEG_STATE_IDLE_DETECT:
5371 		if (ap->ability_match != 0 &&
5372 		    ap->rxconfig == 0) {
5373 			ap->state = ANEG_STATE_AN_ENABLE;
5374 			break;
5375 		}
5376 		delta = ap->cur_time - ap->link_time;
5377 		if (delta > ANEG_STATE_SETTLE_TIME) {
5378 			/* XXX another gem from the Broadcom driver :( */
5379 			ap->state = ANEG_STATE_LINK_OK;
5380 		}
5381 		break;
5382 
5383 	case ANEG_STATE_LINK_OK:
5384 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5385 		ret = ANEG_DONE;
5386 		break;
5387 
5388 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5389 		/* ??? unimplemented */
5390 		break;
5391 
5392 	case ANEG_STATE_NEXT_PAGE_WAIT:
5393 		/* ??? unimplemented */
5394 		break;
5395 
5396 	default:
5397 		ret = ANEG_FAILED;
5398 		break;
5399 	}
5400 
5401 	return ret;
5402 }
5403 
fiber_autoneg(struct tg3 * tp,u32 * txflags,u32 * rxflags)5404 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5405 {
5406 	int res = 0;
5407 	struct tg3_fiber_aneginfo aninfo;
5408 	int status = ANEG_FAILED;
5409 	unsigned int tick;
5410 	u32 tmp;
5411 
5412 	tw32_f(MAC_TX_AUTO_NEG, 0);
5413 
5414 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5415 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5416 	udelay(40);
5417 
5418 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5419 	udelay(40);
5420 
5421 	memset(&aninfo, 0, sizeof(aninfo));
5422 	aninfo.flags |= MR_AN_ENABLE;
5423 	aninfo.state = ANEG_STATE_UNKNOWN;
5424 	aninfo.cur_time = 0;
5425 	tick = 0;
5426 	while (++tick < 195000) {
5427 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5428 		if (status == ANEG_DONE || status == ANEG_FAILED)
5429 			break;
5430 
5431 		udelay(1);
5432 	}
5433 
5434 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5435 	tw32_f(MAC_MODE, tp->mac_mode);
5436 	udelay(40);
5437 
5438 	*txflags = aninfo.txconfig;
5439 	*rxflags = aninfo.flags;
5440 
5441 	if (status == ANEG_DONE &&
5442 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5443 			     MR_LP_ADV_FULL_DUPLEX)))
5444 		res = 1;
5445 
5446 	return res;
5447 }
5448 
tg3_init_bcm8002(struct tg3 * tp)5449 static void tg3_init_bcm8002(struct tg3 *tp)
5450 {
5451 	u32 mac_status = tr32(MAC_STATUS);
5452 	int i;
5453 
5454 	/* Reset when initting first time or we have a link. */
5455 	if (tg3_flag(tp, INIT_COMPLETE) &&
5456 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5457 		return;
5458 
5459 	/* Set PLL lock range. */
5460 	tg3_writephy(tp, 0x16, 0x8007);
5461 
5462 	/* SW reset */
5463 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5464 
5465 	/* Wait for reset to complete. */
5466 	/* XXX schedule_timeout() ... */
5467 	for (i = 0; i < 500; i++)
5468 		udelay(10);
5469 
5470 	/* Config mode; select PMA/Ch 1 regs. */
5471 	tg3_writephy(tp, 0x10, 0x8411);
5472 
5473 	/* Enable auto-lock and comdet, select txclk for tx. */
5474 	tg3_writephy(tp, 0x11, 0x0a10);
5475 
5476 	tg3_writephy(tp, 0x18, 0x00a0);
5477 	tg3_writephy(tp, 0x16, 0x41ff);
5478 
5479 	/* Assert and deassert POR. */
5480 	tg3_writephy(tp, 0x13, 0x0400);
5481 	udelay(40);
5482 	tg3_writephy(tp, 0x13, 0x0000);
5483 
5484 	tg3_writephy(tp, 0x11, 0x0a50);
5485 	udelay(40);
5486 	tg3_writephy(tp, 0x11, 0x0a10);
5487 
5488 	/* Wait for signal to stabilize */
5489 	/* XXX schedule_timeout() ... */
5490 	for (i = 0; i < 15000; i++)
5491 		udelay(10);
5492 
5493 	/* Deselect the channel register so we can read the PHYID
5494 	 * later.
5495 	 */
5496 	tg3_writephy(tp, 0x10, 0x8011);
5497 }
5498 
tg3_setup_fiber_hw_autoneg(struct tg3 * tp,u32 mac_status)5499 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5500 {
5501 	u16 flowctrl;
5502 	bool current_link_up;
5503 	u32 sg_dig_ctrl, sg_dig_status;
5504 	u32 serdes_cfg, expected_sg_dig_ctrl;
5505 	int workaround, port_a;
5506 
5507 	serdes_cfg = 0;
5508 	workaround = 0;
5509 	port_a = 1;
5510 	current_link_up = false;
5511 
5512 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5513 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5514 		workaround = 1;
5515 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5516 			port_a = 0;
5517 
5518 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5519 		/* preserve bits 20-23 for voltage regulator */
5520 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5521 	}
5522 
5523 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5524 
5525 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5526 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5527 			if (workaround) {
5528 				u32 val = serdes_cfg;
5529 
5530 				if (port_a)
5531 					val |= 0xc010000;
5532 				else
5533 					val |= 0x4010000;
5534 				tw32_f(MAC_SERDES_CFG, val);
5535 			}
5536 
5537 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5538 		}
5539 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5540 			tg3_setup_flow_control(tp, 0, 0);
5541 			current_link_up = true;
5542 		}
5543 		goto out;
5544 	}
5545 
5546 	/* Want auto-negotiation.  */
5547 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5548 
5549 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5550 	if (flowctrl & ADVERTISE_1000XPAUSE)
5551 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5552 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5553 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5554 
5555 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5556 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5557 		    tp->serdes_counter &&
5558 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5559 				    MAC_STATUS_RCVD_CFG)) ==
5560 		     MAC_STATUS_PCS_SYNCED)) {
5561 			tp->serdes_counter--;
5562 			current_link_up = true;
5563 			goto out;
5564 		}
5565 restart_autoneg:
5566 		if (workaround)
5567 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5568 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5569 		udelay(5);
5570 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5571 
5572 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5573 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5574 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5575 				 MAC_STATUS_SIGNAL_DET)) {
5576 		sg_dig_status = tr32(SG_DIG_STATUS);
5577 		mac_status = tr32(MAC_STATUS);
5578 
5579 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5580 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5581 			u32 local_adv = 0, remote_adv = 0;
5582 
5583 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5584 				local_adv |= ADVERTISE_1000XPAUSE;
5585 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5586 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5587 
5588 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5589 				remote_adv |= LPA_1000XPAUSE;
5590 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5591 				remote_adv |= LPA_1000XPAUSE_ASYM;
5592 
5593 			tp->link_config.rmt_adv =
5594 					   mii_adv_to_ethtool_adv_x(remote_adv);
5595 
5596 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5597 			current_link_up = true;
5598 			tp->serdes_counter = 0;
5599 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5600 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5601 			if (tp->serdes_counter)
5602 				tp->serdes_counter--;
5603 			else {
5604 				if (workaround) {
5605 					u32 val = serdes_cfg;
5606 
5607 					if (port_a)
5608 						val |= 0xc010000;
5609 					else
5610 						val |= 0x4010000;
5611 
5612 					tw32_f(MAC_SERDES_CFG, val);
5613 				}
5614 
5615 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5616 				udelay(40);
5617 
5618 				/* Link parallel detection - link is up */
5619 				/* only if we have PCS_SYNC and not */
5620 				/* receiving config code words */
5621 				mac_status = tr32(MAC_STATUS);
5622 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5623 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5624 					tg3_setup_flow_control(tp, 0, 0);
5625 					current_link_up = true;
5626 					tp->phy_flags |=
5627 						TG3_PHYFLG_PARALLEL_DETECT;
5628 					tp->serdes_counter =
5629 						SERDES_PARALLEL_DET_TIMEOUT;
5630 				} else
5631 					goto restart_autoneg;
5632 			}
5633 		}
5634 	} else {
5635 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5636 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5637 	}
5638 
5639 out:
5640 	return current_link_up;
5641 }
5642 
tg3_setup_fiber_by_hand(struct tg3 * tp,u32 mac_status)5643 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5644 {
5645 	bool current_link_up = false;
5646 
5647 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5648 		goto out;
5649 
5650 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5651 		u32 txflags, rxflags;
5652 		int i;
5653 
5654 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5655 			u32 local_adv = 0, remote_adv = 0;
5656 
5657 			if (txflags & ANEG_CFG_PS1)
5658 				local_adv |= ADVERTISE_1000XPAUSE;
5659 			if (txflags & ANEG_CFG_PS2)
5660 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5661 
5662 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5663 				remote_adv |= LPA_1000XPAUSE;
5664 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5665 				remote_adv |= LPA_1000XPAUSE_ASYM;
5666 
5667 			tp->link_config.rmt_adv =
5668 					   mii_adv_to_ethtool_adv_x(remote_adv);
5669 
5670 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5671 
5672 			current_link_up = true;
5673 		}
5674 		for (i = 0; i < 30; i++) {
5675 			udelay(20);
5676 			tw32_f(MAC_STATUS,
5677 			       (MAC_STATUS_SYNC_CHANGED |
5678 				MAC_STATUS_CFG_CHANGED));
5679 			udelay(40);
5680 			if ((tr32(MAC_STATUS) &
5681 			     (MAC_STATUS_SYNC_CHANGED |
5682 			      MAC_STATUS_CFG_CHANGED)) == 0)
5683 				break;
5684 		}
5685 
5686 		mac_status = tr32(MAC_STATUS);
5687 		if (!current_link_up &&
5688 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5689 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5690 			current_link_up = true;
5691 	} else {
5692 		tg3_setup_flow_control(tp, 0, 0);
5693 
5694 		/* Forcing 1000FD link up. */
5695 		current_link_up = true;
5696 
5697 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5698 		udelay(40);
5699 
5700 		tw32_f(MAC_MODE, tp->mac_mode);
5701 		udelay(40);
5702 	}
5703 
5704 out:
5705 	return current_link_up;
5706 }
5707 
tg3_setup_fiber_phy(struct tg3 * tp,bool force_reset)5708 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5709 {
5710 	u32 orig_pause_cfg;
5711 	u32 orig_active_speed;
5712 	u8 orig_active_duplex;
5713 	u32 mac_status;
5714 	bool current_link_up;
5715 	int i;
5716 
5717 	orig_pause_cfg = tp->link_config.active_flowctrl;
5718 	orig_active_speed = tp->link_config.active_speed;
5719 	orig_active_duplex = tp->link_config.active_duplex;
5720 
5721 	if (!tg3_flag(tp, HW_AUTONEG) &&
5722 	    tp->link_up &&
5723 	    tg3_flag(tp, INIT_COMPLETE)) {
5724 		mac_status = tr32(MAC_STATUS);
5725 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5726 			       MAC_STATUS_SIGNAL_DET |
5727 			       MAC_STATUS_CFG_CHANGED |
5728 			       MAC_STATUS_RCVD_CFG);
5729 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5730 				   MAC_STATUS_SIGNAL_DET)) {
5731 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5732 					    MAC_STATUS_CFG_CHANGED));
5733 			return 0;
5734 		}
5735 	}
5736 
5737 	tw32_f(MAC_TX_AUTO_NEG, 0);
5738 
5739 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5740 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5741 	tw32_f(MAC_MODE, tp->mac_mode);
5742 	udelay(40);
5743 
5744 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5745 		tg3_init_bcm8002(tp);
5746 
5747 	/* Enable link change event even when serdes polling.  */
5748 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5749 	udelay(40);
5750 
5751 	tp->link_config.rmt_adv = 0;
5752 	mac_status = tr32(MAC_STATUS);
5753 
5754 	if (tg3_flag(tp, HW_AUTONEG))
5755 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5756 	else
5757 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5758 
5759 	tp->napi[0].hw_status->status =
5760 		(SD_STATUS_UPDATED |
5761 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5762 
5763 	for (i = 0; i < 100; i++) {
5764 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5765 				    MAC_STATUS_CFG_CHANGED));
5766 		udelay(5);
5767 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5768 					 MAC_STATUS_CFG_CHANGED |
5769 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5770 			break;
5771 	}
5772 
5773 	mac_status = tr32(MAC_STATUS);
5774 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5775 		current_link_up = false;
5776 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5777 		    tp->serdes_counter == 0) {
5778 			tw32_f(MAC_MODE, (tp->mac_mode |
5779 					  MAC_MODE_SEND_CONFIGS));
5780 			udelay(1);
5781 			tw32_f(MAC_MODE, tp->mac_mode);
5782 		}
5783 	}
5784 
5785 	if (current_link_up) {
5786 		tp->link_config.active_speed = SPEED_1000;
5787 		tp->link_config.active_duplex = DUPLEX_FULL;
5788 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5789 				    LED_CTRL_LNKLED_OVERRIDE |
5790 				    LED_CTRL_1000MBPS_ON));
5791 	} else {
5792 		tp->link_config.active_speed = SPEED_UNKNOWN;
5793 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5794 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5795 				    LED_CTRL_LNKLED_OVERRIDE |
5796 				    LED_CTRL_TRAFFIC_OVERRIDE));
5797 	}
5798 
5799 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5800 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5801 		if (orig_pause_cfg != now_pause_cfg ||
5802 		    orig_active_speed != tp->link_config.active_speed ||
5803 		    orig_active_duplex != tp->link_config.active_duplex)
5804 			tg3_link_report(tp);
5805 	}
5806 
5807 	return 0;
5808 }
5809 
tg3_setup_fiber_mii_phy(struct tg3 * tp,bool force_reset)5810 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5811 {
5812 	int err = 0;
5813 	u32 bmsr, bmcr;
5814 	u32 current_speed = SPEED_UNKNOWN;
5815 	u8 current_duplex = DUPLEX_UNKNOWN;
5816 	bool current_link_up = false;
5817 	u32 local_adv, remote_adv, sgsr;
5818 
5819 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5820 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5821 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5822 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5823 
5824 		if (force_reset)
5825 			tg3_phy_reset(tp);
5826 
5827 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5828 
5829 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5830 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5831 		} else {
5832 			current_link_up = true;
5833 			if (sgsr & SERDES_TG3_SPEED_1000) {
5834 				current_speed = SPEED_1000;
5835 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5836 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5837 				current_speed = SPEED_100;
5838 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5839 			} else {
5840 				current_speed = SPEED_10;
5841 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5842 			}
5843 
5844 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5845 				current_duplex = DUPLEX_FULL;
5846 			else
5847 				current_duplex = DUPLEX_HALF;
5848 		}
5849 
5850 		tw32_f(MAC_MODE, tp->mac_mode);
5851 		udelay(40);
5852 
5853 		tg3_clear_mac_status(tp);
5854 
5855 		goto fiber_setup_done;
5856 	}
5857 
5858 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5859 	tw32_f(MAC_MODE, tp->mac_mode);
5860 	udelay(40);
5861 
5862 	tg3_clear_mac_status(tp);
5863 
5864 	if (force_reset)
5865 		tg3_phy_reset(tp);
5866 
5867 	tp->link_config.rmt_adv = 0;
5868 
5869 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5870 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5871 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5872 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5873 			bmsr |= BMSR_LSTATUS;
5874 		else
5875 			bmsr &= ~BMSR_LSTATUS;
5876 	}
5877 
5878 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5879 
5880 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5881 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5882 		/* do nothing, just check for link up at the end */
5883 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5884 		u32 adv, newadv;
5885 
5886 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5887 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5888 				 ADVERTISE_1000XPAUSE |
5889 				 ADVERTISE_1000XPSE_ASYM |
5890 				 ADVERTISE_SLCT);
5891 
5892 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5893 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5894 
5895 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5896 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5897 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5898 			tg3_writephy(tp, MII_BMCR, bmcr);
5899 
5900 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5901 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5902 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5903 
5904 			return err;
5905 		}
5906 	} else {
5907 		u32 new_bmcr;
5908 
5909 		bmcr &= ~BMCR_SPEED1000;
5910 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5911 
5912 		if (tp->link_config.duplex == DUPLEX_FULL)
5913 			new_bmcr |= BMCR_FULLDPLX;
5914 
5915 		if (new_bmcr != bmcr) {
5916 			/* BMCR_SPEED1000 is a reserved bit that needs
5917 			 * to be set on write.
5918 			 */
5919 			new_bmcr |= BMCR_SPEED1000;
5920 
5921 			/* Force a linkdown */
5922 			if (tp->link_up) {
5923 				u32 adv;
5924 
5925 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5926 				adv &= ~(ADVERTISE_1000XFULL |
5927 					 ADVERTISE_1000XHALF |
5928 					 ADVERTISE_SLCT);
5929 				tg3_writephy(tp, MII_ADVERTISE, adv);
5930 				tg3_writephy(tp, MII_BMCR, bmcr |
5931 							   BMCR_ANRESTART |
5932 							   BMCR_ANENABLE);
5933 				udelay(10);
5934 				tg3_carrier_off(tp);
5935 			}
5936 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5937 			bmcr = new_bmcr;
5938 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5939 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5940 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5941 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5942 					bmsr |= BMSR_LSTATUS;
5943 				else
5944 					bmsr &= ~BMSR_LSTATUS;
5945 			}
5946 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5947 		}
5948 	}
5949 
5950 	if (bmsr & BMSR_LSTATUS) {
5951 		current_speed = SPEED_1000;
5952 		current_link_up = true;
5953 		if (bmcr & BMCR_FULLDPLX)
5954 			current_duplex = DUPLEX_FULL;
5955 		else
5956 			current_duplex = DUPLEX_HALF;
5957 
5958 		local_adv = 0;
5959 		remote_adv = 0;
5960 
5961 		if (bmcr & BMCR_ANENABLE) {
5962 			u32 common;
5963 
5964 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5965 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5966 			common = local_adv & remote_adv;
5967 			if (common & (ADVERTISE_1000XHALF |
5968 				      ADVERTISE_1000XFULL)) {
5969 				if (common & ADVERTISE_1000XFULL)
5970 					current_duplex = DUPLEX_FULL;
5971 				else
5972 					current_duplex = DUPLEX_HALF;
5973 
5974 				tp->link_config.rmt_adv =
5975 					   mii_adv_to_ethtool_adv_x(remote_adv);
5976 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5977 				/* Link is up via parallel detect */
5978 			} else {
5979 				current_link_up = false;
5980 			}
5981 		}
5982 	}
5983 
5984 fiber_setup_done:
5985 	if (current_link_up && current_duplex == DUPLEX_FULL)
5986 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5987 
5988 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5989 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5990 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5991 
5992 	tw32_f(MAC_MODE, tp->mac_mode);
5993 	udelay(40);
5994 
5995 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5996 
5997 	tp->link_config.active_speed = current_speed;
5998 	tp->link_config.active_duplex = current_duplex;
5999 
6000 	tg3_test_and_report_link_chg(tp, current_link_up);
6001 	return err;
6002 }
6003 
tg3_serdes_parallel_detect(struct tg3 * tp)6004 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6005 {
6006 	if (tp->serdes_counter) {
6007 		/* Give autoneg time to complete. */
6008 		tp->serdes_counter--;
6009 		return;
6010 	}
6011 
6012 	if (!tp->link_up &&
6013 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6014 		u32 bmcr;
6015 
6016 		tg3_readphy(tp, MII_BMCR, &bmcr);
6017 		if (bmcr & BMCR_ANENABLE) {
6018 			u32 phy1, phy2;
6019 
6020 			/* Select shadow register 0x1f */
6021 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6022 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6023 
6024 			/* Select expansion interrupt status register */
6025 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6026 					 MII_TG3_DSP_EXP1_INT_STAT);
6027 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6028 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6029 
6030 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6031 				/* We have signal detect and not receiving
6032 				 * config code words, link is up by parallel
6033 				 * detection.
6034 				 */
6035 
6036 				bmcr &= ~BMCR_ANENABLE;
6037 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6038 				tg3_writephy(tp, MII_BMCR, bmcr);
6039 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6040 			}
6041 		}
6042 	} else if (tp->link_up &&
6043 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6044 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6045 		u32 phy2;
6046 
6047 		/* Select expansion interrupt status register */
6048 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6049 				 MII_TG3_DSP_EXP1_INT_STAT);
6050 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6051 		if (phy2 & 0x20) {
6052 			u32 bmcr;
6053 
6054 			/* Config code words received, turn on autoneg. */
6055 			tg3_readphy(tp, MII_BMCR, &bmcr);
6056 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6057 
6058 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6059 
6060 		}
6061 	}
6062 }
6063 
tg3_setup_phy(struct tg3 * tp,bool force_reset)6064 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6065 {
6066 	u32 val;
6067 	int err;
6068 
6069 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6070 		err = tg3_setup_fiber_phy(tp, force_reset);
6071 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6072 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6073 	else
6074 		err = tg3_setup_copper_phy(tp, force_reset);
6075 
6076 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6077 		u32 scale;
6078 
6079 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6080 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6081 			scale = 65;
6082 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6083 			scale = 6;
6084 		else
6085 			scale = 12;
6086 
6087 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6088 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6089 		tw32(GRC_MISC_CFG, val);
6090 	}
6091 
6092 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6093 	      (6 << TX_LENGTHS_IPG_SHIFT);
6094 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6095 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6096 		val |= tr32(MAC_TX_LENGTHS) &
6097 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6098 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6099 
6100 	if (tp->link_config.active_speed == SPEED_1000 &&
6101 	    tp->link_config.active_duplex == DUPLEX_HALF)
6102 		tw32(MAC_TX_LENGTHS, val |
6103 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6104 	else
6105 		tw32(MAC_TX_LENGTHS, val |
6106 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6107 
6108 	if (!tg3_flag(tp, 5705_PLUS)) {
6109 		if (tp->link_up) {
6110 			tw32(HOSTCC_STAT_COAL_TICKS,
6111 			     tp->coal.stats_block_coalesce_usecs);
6112 		} else {
6113 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6114 		}
6115 	}
6116 
6117 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6118 		val = tr32(PCIE_PWR_MGMT_THRESH);
6119 		if (!tp->link_up)
6120 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6121 			      tp->pwrmgmt_thresh;
6122 		else
6123 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6124 		tw32(PCIE_PWR_MGMT_THRESH, val);
6125 	}
6126 
6127 	return err;
6128 }
6129 
6130 /* tp->lock must be held */
tg3_refclk_read(struct tg3 * tp,struct ptp_system_timestamp * sts)6131 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6132 {
6133 	u64 stamp;
6134 
6135 	ptp_read_system_prets(sts);
6136 	stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6137 	ptp_read_system_postts(sts);
6138 	stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6139 
6140 	return stamp;
6141 }
6142 
6143 /* tp->lock must be held */
tg3_refclk_write(struct tg3 * tp,u64 newval)6144 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6145 {
6146 	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6147 
6148 	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6149 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6150 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6151 	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6152 }
6153 
6154 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6155 static inline void tg3_full_unlock(struct tg3 *tp);
tg3_get_ts_info(struct net_device * dev,struct ethtool_ts_info * info)6156 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6157 {
6158 	struct tg3 *tp = netdev_priv(dev);
6159 
6160 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6161 				SOF_TIMESTAMPING_RX_SOFTWARE |
6162 				SOF_TIMESTAMPING_SOFTWARE;
6163 
6164 	if (tg3_flag(tp, PTP_CAPABLE)) {
6165 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6166 					SOF_TIMESTAMPING_RX_HARDWARE |
6167 					SOF_TIMESTAMPING_RAW_HARDWARE;
6168 	}
6169 
6170 	if (tp->ptp_clock)
6171 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6172 	else
6173 		info->phc_index = -1;
6174 
6175 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6176 
6177 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6178 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6179 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6180 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6181 	return 0;
6182 }
6183 
tg3_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)6184 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6185 {
6186 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6187 	u64 correction;
6188 	bool neg_adj;
6189 
6190 	/* Frequency adjustment is performed using hardware with a 24 bit
6191 	 * accumulator and a programmable correction value. On each clk, the
6192 	 * correction value gets added to the accumulator and when it
6193 	 * overflows, the time counter is incremented/decremented.
6194 	 */
6195 	neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6196 
6197 	tg3_full_lock(tp, 0);
6198 
6199 	if (correction)
6200 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6201 		     TG3_EAV_REF_CLK_CORRECT_EN |
6202 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6203 		     ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6204 	else
6205 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6206 
6207 	tg3_full_unlock(tp);
6208 
6209 	return 0;
6210 }
6211 
tg3_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)6212 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6213 {
6214 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6215 
6216 	tg3_full_lock(tp, 0);
6217 	tp->ptp_adjust += delta;
6218 	tg3_full_unlock(tp);
6219 
6220 	return 0;
6221 }
6222 
tg3_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)6223 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6224 			    struct ptp_system_timestamp *sts)
6225 {
6226 	u64 ns;
6227 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6228 
6229 	tg3_full_lock(tp, 0);
6230 	ns = tg3_refclk_read(tp, sts);
6231 	ns += tp->ptp_adjust;
6232 	tg3_full_unlock(tp);
6233 
6234 	*ts = ns_to_timespec64(ns);
6235 
6236 	return 0;
6237 }
6238 
tg3_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)6239 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6240 			   const struct timespec64 *ts)
6241 {
6242 	u64 ns;
6243 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6244 
6245 	ns = timespec64_to_ns(ts);
6246 
6247 	tg3_full_lock(tp, 0);
6248 	tg3_refclk_write(tp, ns);
6249 	tp->ptp_adjust = 0;
6250 	tg3_full_unlock(tp);
6251 
6252 	return 0;
6253 }
6254 
tg3_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)6255 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6256 			  struct ptp_clock_request *rq, int on)
6257 {
6258 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6259 	u32 clock_ctl;
6260 	int rval = 0;
6261 
6262 	switch (rq->type) {
6263 	case PTP_CLK_REQ_PEROUT:
6264 		/* Reject requests with unsupported flags */
6265 		if (rq->perout.flags)
6266 			return -EOPNOTSUPP;
6267 
6268 		if (rq->perout.index != 0)
6269 			return -EINVAL;
6270 
6271 		tg3_full_lock(tp, 0);
6272 		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6273 		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6274 
6275 		if (on) {
6276 			u64 nsec;
6277 
6278 			nsec = rq->perout.start.sec * 1000000000ULL +
6279 			       rq->perout.start.nsec;
6280 
6281 			if (rq->perout.period.sec || rq->perout.period.nsec) {
6282 				netdev_warn(tp->dev,
6283 					    "Device supports only a one-shot timesync output, period must be 0\n");
6284 				rval = -EINVAL;
6285 				goto err_out;
6286 			}
6287 
6288 			if (nsec & (1ULL << 63)) {
6289 				netdev_warn(tp->dev,
6290 					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6291 				rval = -EINVAL;
6292 				goto err_out;
6293 			}
6294 
6295 			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6296 			tw32(TG3_EAV_WATCHDOG0_MSB,
6297 			     TG3_EAV_WATCHDOG0_EN |
6298 			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6299 
6300 			tw32(TG3_EAV_REF_CLCK_CTL,
6301 			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6302 		} else {
6303 			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6304 			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6305 		}
6306 
6307 err_out:
6308 		tg3_full_unlock(tp);
6309 		return rval;
6310 
6311 	default:
6312 		break;
6313 	}
6314 
6315 	return -EOPNOTSUPP;
6316 }
6317 
6318 static const struct ptp_clock_info tg3_ptp_caps = {
6319 	.owner		= THIS_MODULE,
6320 	.name		= "tg3 clock",
6321 	.max_adj	= 250000000,
6322 	.n_alarm	= 0,
6323 	.n_ext_ts	= 0,
6324 	.n_per_out	= 1,
6325 	.n_pins		= 0,
6326 	.pps		= 0,
6327 	.adjfine	= tg3_ptp_adjfine,
6328 	.adjtime	= tg3_ptp_adjtime,
6329 	.gettimex64	= tg3_ptp_gettimex,
6330 	.settime64	= tg3_ptp_settime,
6331 	.enable		= tg3_ptp_enable,
6332 };
6333 
tg3_hwclock_to_timestamp(struct tg3 * tp,u64 hwclock,struct skb_shared_hwtstamps * timestamp)6334 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6335 				     struct skb_shared_hwtstamps *timestamp)
6336 {
6337 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6338 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6339 					   tp->ptp_adjust);
6340 }
6341 
6342 /* tp->lock must be held */
tg3_ptp_init(struct tg3 * tp)6343 static void tg3_ptp_init(struct tg3 *tp)
6344 {
6345 	if (!tg3_flag(tp, PTP_CAPABLE))
6346 		return;
6347 
6348 	/* Initialize the hardware clock to the system time. */
6349 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6350 	tp->ptp_adjust = 0;
6351 	tp->ptp_info = tg3_ptp_caps;
6352 }
6353 
6354 /* tp->lock must be held */
tg3_ptp_resume(struct tg3 * tp)6355 static void tg3_ptp_resume(struct tg3 *tp)
6356 {
6357 	if (!tg3_flag(tp, PTP_CAPABLE))
6358 		return;
6359 
6360 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6361 	tp->ptp_adjust = 0;
6362 }
6363 
tg3_ptp_fini(struct tg3 * tp)6364 static void tg3_ptp_fini(struct tg3 *tp)
6365 {
6366 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6367 		return;
6368 
6369 	ptp_clock_unregister(tp->ptp_clock);
6370 	tp->ptp_clock = NULL;
6371 	tp->ptp_adjust = 0;
6372 }
6373 
tg3_irq_sync(struct tg3 * tp)6374 static inline int tg3_irq_sync(struct tg3 *tp)
6375 {
6376 	return tp->irq_sync;
6377 }
6378 
tg3_rd32_loop(struct tg3 * tp,u32 * dst,u32 off,u32 len)6379 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6380 {
6381 	int i;
6382 
6383 	dst = (u32 *)((u8 *)dst + off);
6384 	for (i = 0; i < len; i += sizeof(u32))
6385 		*dst++ = tr32(off + i);
6386 }
6387 
tg3_dump_legacy_regs(struct tg3 * tp,u32 * regs)6388 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6389 {
6390 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6391 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6392 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6393 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6394 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6395 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6396 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6397 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6398 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6399 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6400 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6401 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6402 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6403 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6404 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6405 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6406 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6407 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6408 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6409 
6410 	if (tg3_flag(tp, SUPPORT_MSIX))
6411 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6412 
6413 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6414 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6415 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6416 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6417 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6418 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6419 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6420 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6421 
6422 	if (!tg3_flag(tp, 5705_PLUS)) {
6423 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6424 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6425 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6426 	}
6427 
6428 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6429 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6430 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6431 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6432 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6433 
6434 	if (tg3_flag(tp, NVRAM))
6435 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6436 }
6437 
tg3_dump_state(struct tg3 * tp)6438 static void tg3_dump_state(struct tg3 *tp)
6439 {
6440 	int i;
6441 	u32 *regs;
6442 
6443 	/* If it is a PCI error, all registers will be 0xffff,
6444 	 * we don't dump them out, just report the error and return
6445 	 */
6446 	if (tp->pdev->error_state != pci_channel_io_normal) {
6447 		netdev_err(tp->dev, "PCI channel ERROR!\n");
6448 		return;
6449 	}
6450 
6451 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6452 	if (!regs)
6453 		return;
6454 
6455 	if (tg3_flag(tp, PCI_EXPRESS)) {
6456 		/* Read up to but not including private PCI registers */
6457 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6458 			regs[i / sizeof(u32)] = tr32(i);
6459 	} else
6460 		tg3_dump_legacy_regs(tp, regs);
6461 
6462 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6463 		if (!regs[i + 0] && !regs[i + 1] &&
6464 		    !regs[i + 2] && !regs[i + 3])
6465 			continue;
6466 
6467 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6468 			   i * 4,
6469 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6470 	}
6471 
6472 	kfree(regs);
6473 
6474 	for (i = 0; i < tp->irq_cnt; i++) {
6475 		struct tg3_napi *tnapi = &tp->napi[i];
6476 
6477 		/* SW status block */
6478 		netdev_err(tp->dev,
6479 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6480 			   i,
6481 			   tnapi->hw_status->status,
6482 			   tnapi->hw_status->status_tag,
6483 			   tnapi->hw_status->rx_jumbo_consumer,
6484 			   tnapi->hw_status->rx_consumer,
6485 			   tnapi->hw_status->rx_mini_consumer,
6486 			   tnapi->hw_status->idx[0].rx_producer,
6487 			   tnapi->hw_status->idx[0].tx_consumer);
6488 
6489 		netdev_err(tp->dev,
6490 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6491 			   i,
6492 			   tnapi->last_tag, tnapi->last_irq_tag,
6493 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6494 			   tnapi->rx_rcb_ptr,
6495 			   tnapi->prodring.rx_std_prod_idx,
6496 			   tnapi->prodring.rx_std_cons_idx,
6497 			   tnapi->prodring.rx_jmb_prod_idx,
6498 			   tnapi->prodring.rx_jmb_cons_idx);
6499 	}
6500 }
6501 
6502 /* This is called whenever we suspect that the system chipset is re-
6503  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6504  * is bogus tx completions. We try to recover by setting the
6505  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6506  * in the workqueue.
6507  */
tg3_tx_recover(struct tg3 * tp)6508 static void tg3_tx_recover(struct tg3 *tp)
6509 {
6510 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6511 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6512 
6513 	netdev_warn(tp->dev,
6514 		    "The system may be re-ordering memory-mapped I/O "
6515 		    "cycles to the network device, attempting to recover. "
6516 		    "Please report the problem to the driver maintainer "
6517 		    "and include system chipset information.\n");
6518 
6519 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6520 }
6521 
tg3_tx_avail(struct tg3_napi * tnapi)6522 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6523 {
6524 	/* Tell compiler to fetch tx indices from memory. */
6525 	barrier();
6526 	return tnapi->tx_pending -
6527 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6528 }
6529 
6530 /* Tigon3 never reports partial packet sends.  So we do not
6531  * need special logic to handle SKBs that have not had all
6532  * of their frags sent yet, like SunGEM does.
6533  */
tg3_tx(struct tg3_napi * tnapi)6534 static void tg3_tx(struct tg3_napi *tnapi)
6535 {
6536 	struct tg3 *tp = tnapi->tp;
6537 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6538 	u32 sw_idx = tnapi->tx_cons;
6539 	struct netdev_queue *txq;
6540 	int index = tnapi - tp->napi;
6541 	unsigned int pkts_compl = 0, bytes_compl = 0;
6542 
6543 	if (tg3_flag(tp, ENABLE_TSS))
6544 		index--;
6545 
6546 	txq = netdev_get_tx_queue(tp->dev, index);
6547 
6548 	while (sw_idx != hw_idx) {
6549 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6550 		struct sk_buff *skb = ri->skb;
6551 		int i, tx_bug = 0;
6552 
6553 		if (unlikely(skb == NULL)) {
6554 			tg3_tx_recover(tp);
6555 			return;
6556 		}
6557 
6558 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6559 			struct skb_shared_hwtstamps timestamp;
6560 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6561 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6562 
6563 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6564 
6565 			skb_tstamp_tx(skb, &timestamp);
6566 		}
6567 
6568 		dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6569 				 skb_headlen(skb), DMA_TO_DEVICE);
6570 
6571 		ri->skb = NULL;
6572 
6573 		while (ri->fragmented) {
6574 			ri->fragmented = false;
6575 			sw_idx = NEXT_TX(sw_idx);
6576 			ri = &tnapi->tx_buffers[sw_idx];
6577 		}
6578 
6579 		sw_idx = NEXT_TX(sw_idx);
6580 
6581 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6582 			ri = &tnapi->tx_buffers[sw_idx];
6583 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6584 				tx_bug = 1;
6585 
6586 			dma_unmap_page(&tp->pdev->dev,
6587 				       dma_unmap_addr(ri, mapping),
6588 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6589 				       DMA_TO_DEVICE);
6590 
6591 			while (ri->fragmented) {
6592 				ri->fragmented = false;
6593 				sw_idx = NEXT_TX(sw_idx);
6594 				ri = &tnapi->tx_buffers[sw_idx];
6595 			}
6596 
6597 			sw_idx = NEXT_TX(sw_idx);
6598 		}
6599 
6600 		pkts_compl++;
6601 		bytes_compl += skb->len;
6602 
6603 		dev_consume_skb_any(skb);
6604 
6605 		if (unlikely(tx_bug)) {
6606 			tg3_tx_recover(tp);
6607 			return;
6608 		}
6609 	}
6610 
6611 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6612 
6613 	tnapi->tx_cons = sw_idx;
6614 
6615 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6616 	 * before checking for netif_queue_stopped().  Without the
6617 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6618 	 * will miss it and cause the queue to be stopped forever.
6619 	 */
6620 	smp_mb();
6621 
6622 	if (unlikely(netif_tx_queue_stopped(txq) &&
6623 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6624 		__netif_tx_lock(txq, smp_processor_id());
6625 		if (netif_tx_queue_stopped(txq) &&
6626 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6627 			netif_tx_wake_queue(txq);
6628 		__netif_tx_unlock(txq);
6629 	}
6630 }
6631 
tg3_frag_free(bool is_frag,void * data)6632 static void tg3_frag_free(bool is_frag, void *data)
6633 {
6634 	if (is_frag)
6635 		skb_free_frag(data);
6636 	else
6637 		kfree(data);
6638 }
6639 
tg3_rx_data_free(struct tg3 * tp,struct ring_info * ri,u32 map_sz)6640 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6641 {
6642 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6643 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6644 
6645 	if (!ri->data)
6646 		return;
6647 
6648 	dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6649 			 DMA_FROM_DEVICE);
6650 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6651 	ri->data = NULL;
6652 }
6653 
6654 
6655 /* Returns size of skb allocated or < 0 on error.
6656  *
6657  * We only need to fill in the address because the other members
6658  * of the RX descriptor are invariant, see tg3_init_rings.
6659  *
6660  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6661  * posting buffers we only dirty the first cache line of the RX
6662  * descriptor (containing the address).  Whereas for the RX status
6663  * buffers the cpu only reads the last cacheline of the RX descriptor
6664  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6665  */
tg3_alloc_rx_data(struct tg3 * tp,struct tg3_rx_prodring_set * tpr,u32 opaque_key,u32 dest_idx_unmasked,unsigned int * frag_size)6666 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6667 			     u32 opaque_key, u32 dest_idx_unmasked,
6668 			     unsigned int *frag_size)
6669 {
6670 	struct tg3_rx_buffer_desc *desc;
6671 	struct ring_info *map;
6672 	u8 *data;
6673 	dma_addr_t mapping;
6674 	int skb_size, data_size, dest_idx;
6675 
6676 	switch (opaque_key) {
6677 	case RXD_OPAQUE_RING_STD:
6678 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6679 		desc = &tpr->rx_std[dest_idx];
6680 		map = &tpr->rx_std_buffers[dest_idx];
6681 		data_size = tp->rx_pkt_map_sz;
6682 		break;
6683 
6684 	case RXD_OPAQUE_RING_JUMBO:
6685 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6686 		desc = &tpr->rx_jmb[dest_idx].std;
6687 		map = &tpr->rx_jmb_buffers[dest_idx];
6688 		data_size = TG3_RX_JMB_MAP_SZ;
6689 		break;
6690 
6691 	default:
6692 		return -EINVAL;
6693 	}
6694 
6695 	/* Do not overwrite any of the map or rp information
6696 	 * until we are sure we can commit to a new buffer.
6697 	 *
6698 	 * Callers depend upon this behavior and assume that
6699 	 * we leave everything unchanged if we fail.
6700 	 */
6701 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6702 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6703 	if (skb_size <= PAGE_SIZE) {
6704 		data = napi_alloc_frag(skb_size);
6705 		*frag_size = skb_size;
6706 	} else {
6707 		data = kmalloc(skb_size, GFP_ATOMIC);
6708 		*frag_size = 0;
6709 	}
6710 	if (!data)
6711 		return -ENOMEM;
6712 
6713 	mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6714 				 data_size, DMA_FROM_DEVICE);
6715 	if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6716 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6717 		return -EIO;
6718 	}
6719 
6720 	map->data = data;
6721 	dma_unmap_addr_set(map, mapping, mapping);
6722 
6723 	desc->addr_hi = ((u64)mapping >> 32);
6724 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6725 
6726 	return data_size;
6727 }
6728 
6729 /* We only need to move over in the address because the other
6730  * members of the RX descriptor are invariant.  See notes above
6731  * tg3_alloc_rx_data for full details.
6732  */
tg3_recycle_rx(struct tg3_napi * tnapi,struct tg3_rx_prodring_set * dpr,u32 opaque_key,int src_idx,u32 dest_idx_unmasked)6733 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6734 			   struct tg3_rx_prodring_set *dpr,
6735 			   u32 opaque_key, int src_idx,
6736 			   u32 dest_idx_unmasked)
6737 {
6738 	struct tg3 *tp = tnapi->tp;
6739 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6740 	struct ring_info *src_map, *dest_map;
6741 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6742 	int dest_idx;
6743 
6744 	switch (opaque_key) {
6745 	case RXD_OPAQUE_RING_STD:
6746 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6747 		dest_desc = &dpr->rx_std[dest_idx];
6748 		dest_map = &dpr->rx_std_buffers[dest_idx];
6749 		src_desc = &spr->rx_std[src_idx];
6750 		src_map = &spr->rx_std_buffers[src_idx];
6751 		break;
6752 
6753 	case RXD_OPAQUE_RING_JUMBO:
6754 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6755 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6756 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6757 		src_desc = &spr->rx_jmb[src_idx].std;
6758 		src_map = &spr->rx_jmb_buffers[src_idx];
6759 		break;
6760 
6761 	default:
6762 		return;
6763 	}
6764 
6765 	dest_map->data = src_map->data;
6766 	dma_unmap_addr_set(dest_map, mapping,
6767 			   dma_unmap_addr(src_map, mapping));
6768 	dest_desc->addr_hi = src_desc->addr_hi;
6769 	dest_desc->addr_lo = src_desc->addr_lo;
6770 
6771 	/* Ensure that the update to the skb happens after the physical
6772 	 * addresses have been transferred to the new BD location.
6773 	 */
6774 	smp_wmb();
6775 
6776 	src_map->data = NULL;
6777 }
6778 
6779 /* The RX ring scheme is composed of multiple rings which post fresh
6780  * buffers to the chip, and one special ring the chip uses to report
6781  * status back to the host.
6782  *
6783  * The special ring reports the status of received packets to the
6784  * host.  The chip does not write into the original descriptor the
6785  * RX buffer was obtained from.  The chip simply takes the original
6786  * descriptor as provided by the host, updates the status and length
6787  * field, then writes this into the next status ring entry.
6788  *
6789  * Each ring the host uses to post buffers to the chip is described
6790  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6791  * it is first placed into the on-chip ram.  When the packet's length
6792  * is known, it walks down the TG3_BDINFO entries to select the ring.
6793  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6794  * which is within the range of the new packet's length is chosen.
6795  *
6796  * The "separate ring for rx status" scheme may sound queer, but it makes
6797  * sense from a cache coherency perspective.  If only the host writes
6798  * to the buffer post rings, and only the chip writes to the rx status
6799  * rings, then cache lines never move beyond shared-modified state.
6800  * If both the host and chip were to write into the same ring, cache line
6801  * eviction could occur since both entities want it in an exclusive state.
6802  */
tg3_rx(struct tg3_napi * tnapi,int budget)6803 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6804 {
6805 	struct tg3 *tp = tnapi->tp;
6806 	u32 work_mask, rx_std_posted = 0;
6807 	u32 std_prod_idx, jmb_prod_idx;
6808 	u32 sw_idx = tnapi->rx_rcb_ptr;
6809 	u16 hw_idx;
6810 	int received;
6811 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6812 
6813 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6814 	/*
6815 	 * We need to order the read of hw_idx and the read of
6816 	 * the opaque cookie.
6817 	 */
6818 	rmb();
6819 	work_mask = 0;
6820 	received = 0;
6821 	std_prod_idx = tpr->rx_std_prod_idx;
6822 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6823 	while (sw_idx != hw_idx && budget > 0) {
6824 		struct ring_info *ri;
6825 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6826 		unsigned int len;
6827 		struct sk_buff *skb;
6828 		dma_addr_t dma_addr;
6829 		u32 opaque_key, desc_idx, *post_ptr;
6830 		u8 *data;
6831 		u64 tstamp = 0;
6832 
6833 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6834 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6835 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6836 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6837 			dma_addr = dma_unmap_addr(ri, mapping);
6838 			data = ri->data;
6839 			post_ptr = &std_prod_idx;
6840 			rx_std_posted++;
6841 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6842 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6843 			dma_addr = dma_unmap_addr(ri, mapping);
6844 			data = ri->data;
6845 			post_ptr = &jmb_prod_idx;
6846 		} else
6847 			goto next_pkt_nopost;
6848 
6849 		work_mask |= opaque_key;
6850 
6851 		if (desc->err_vlan & RXD_ERR_MASK) {
6852 		drop_it:
6853 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6854 				       desc_idx, *post_ptr);
6855 		drop_it_no_recycle:
6856 			/* Other statistics kept track of by card. */
6857 			tnapi->rx_dropped++;
6858 			goto next_pkt;
6859 		}
6860 
6861 		prefetch(data + TG3_RX_OFFSET(tp));
6862 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6863 		      ETH_FCS_LEN;
6864 
6865 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6866 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6867 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6868 		     RXD_FLAG_PTPSTAT_PTPV2) {
6869 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6870 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6871 		}
6872 
6873 		if (len > TG3_RX_COPY_THRESH(tp)) {
6874 			int skb_size;
6875 			unsigned int frag_size;
6876 
6877 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6878 						    *post_ptr, &frag_size);
6879 			if (skb_size < 0)
6880 				goto drop_it;
6881 
6882 			dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6883 					 DMA_FROM_DEVICE);
6884 
6885 			/* Ensure that the update to the data happens
6886 			 * after the usage of the old DMA mapping.
6887 			 */
6888 			smp_wmb();
6889 
6890 			ri->data = NULL;
6891 
6892 			if (frag_size)
6893 				skb = build_skb(data, frag_size);
6894 			else
6895 				skb = slab_build_skb(data);
6896 			if (!skb) {
6897 				tg3_frag_free(frag_size != 0, data);
6898 				goto drop_it_no_recycle;
6899 			}
6900 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6901 		} else {
6902 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6903 				       desc_idx, *post_ptr);
6904 
6905 			skb = netdev_alloc_skb(tp->dev,
6906 					       len + TG3_RAW_IP_ALIGN);
6907 			if (skb == NULL)
6908 				goto drop_it_no_recycle;
6909 
6910 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6911 			dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6912 						DMA_FROM_DEVICE);
6913 			memcpy(skb->data,
6914 			       data + TG3_RX_OFFSET(tp),
6915 			       len);
6916 			dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6917 						   len, DMA_FROM_DEVICE);
6918 		}
6919 
6920 		skb_put(skb, len);
6921 		if (tstamp)
6922 			tg3_hwclock_to_timestamp(tp, tstamp,
6923 						 skb_hwtstamps(skb));
6924 
6925 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6926 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6927 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6928 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6929 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6930 		else
6931 			skb_checksum_none_assert(skb);
6932 
6933 		skb->protocol = eth_type_trans(skb, tp->dev);
6934 
6935 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6936 		    skb->protocol != htons(ETH_P_8021Q) &&
6937 		    skb->protocol != htons(ETH_P_8021AD)) {
6938 			dev_kfree_skb_any(skb);
6939 			goto drop_it_no_recycle;
6940 		}
6941 
6942 		if (desc->type_flags & RXD_FLAG_VLAN &&
6943 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6944 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6945 					       desc->err_vlan & RXD_VLAN_MASK);
6946 
6947 		napi_gro_receive(&tnapi->napi, skb);
6948 
6949 		received++;
6950 		budget--;
6951 
6952 next_pkt:
6953 		(*post_ptr)++;
6954 
6955 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6956 			tpr->rx_std_prod_idx = std_prod_idx &
6957 					       tp->rx_std_ring_mask;
6958 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6959 				     tpr->rx_std_prod_idx);
6960 			work_mask &= ~RXD_OPAQUE_RING_STD;
6961 			rx_std_posted = 0;
6962 		}
6963 next_pkt_nopost:
6964 		sw_idx++;
6965 		sw_idx &= tp->rx_ret_ring_mask;
6966 
6967 		/* Refresh hw_idx to see if there is new work */
6968 		if (sw_idx == hw_idx) {
6969 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6970 			rmb();
6971 		}
6972 	}
6973 
6974 	/* ACK the status ring. */
6975 	tnapi->rx_rcb_ptr = sw_idx;
6976 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6977 
6978 	/* Refill RX ring(s). */
6979 	if (!tg3_flag(tp, ENABLE_RSS)) {
6980 		/* Sync BD data before updating mailbox */
6981 		wmb();
6982 
6983 		if (work_mask & RXD_OPAQUE_RING_STD) {
6984 			tpr->rx_std_prod_idx = std_prod_idx &
6985 					       tp->rx_std_ring_mask;
6986 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6987 				     tpr->rx_std_prod_idx);
6988 		}
6989 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6990 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6991 					       tp->rx_jmb_ring_mask;
6992 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6993 				     tpr->rx_jmb_prod_idx);
6994 		}
6995 	} else if (work_mask) {
6996 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6997 		 * updated before the producer indices can be updated.
6998 		 */
6999 		smp_wmb();
7000 
7001 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7002 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7003 
7004 		if (tnapi != &tp->napi[1]) {
7005 			tp->rx_refill = true;
7006 			napi_schedule(&tp->napi[1].napi);
7007 		}
7008 	}
7009 
7010 	return received;
7011 }
7012 
tg3_poll_link(struct tg3 * tp)7013 static void tg3_poll_link(struct tg3 *tp)
7014 {
7015 	/* handle link change and other phy events */
7016 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7017 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7018 
7019 		if (sblk->status & SD_STATUS_LINK_CHG) {
7020 			sblk->status = SD_STATUS_UPDATED |
7021 				       (sblk->status & ~SD_STATUS_LINK_CHG);
7022 			spin_lock(&tp->lock);
7023 			if (tg3_flag(tp, USE_PHYLIB)) {
7024 				tw32_f(MAC_STATUS,
7025 				     (MAC_STATUS_SYNC_CHANGED |
7026 				      MAC_STATUS_CFG_CHANGED |
7027 				      MAC_STATUS_MI_COMPLETION |
7028 				      MAC_STATUS_LNKSTATE_CHANGED));
7029 				udelay(40);
7030 			} else
7031 				tg3_setup_phy(tp, false);
7032 			spin_unlock(&tp->lock);
7033 		}
7034 	}
7035 }
7036 
tg3_rx_prodring_xfer(struct tg3 * tp,struct tg3_rx_prodring_set * dpr,struct tg3_rx_prodring_set * spr)7037 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7038 				struct tg3_rx_prodring_set *dpr,
7039 				struct tg3_rx_prodring_set *spr)
7040 {
7041 	u32 si, di, cpycnt, src_prod_idx;
7042 	int i, err = 0;
7043 
7044 	while (1) {
7045 		src_prod_idx = spr->rx_std_prod_idx;
7046 
7047 		/* Make sure updates to the rx_std_buffers[] entries and the
7048 		 * standard producer index are seen in the correct order.
7049 		 */
7050 		smp_rmb();
7051 
7052 		if (spr->rx_std_cons_idx == src_prod_idx)
7053 			break;
7054 
7055 		if (spr->rx_std_cons_idx < src_prod_idx)
7056 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7057 		else
7058 			cpycnt = tp->rx_std_ring_mask + 1 -
7059 				 spr->rx_std_cons_idx;
7060 
7061 		cpycnt = min(cpycnt,
7062 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7063 
7064 		si = spr->rx_std_cons_idx;
7065 		di = dpr->rx_std_prod_idx;
7066 
7067 		for (i = di; i < di + cpycnt; i++) {
7068 			if (dpr->rx_std_buffers[i].data) {
7069 				cpycnt = i - di;
7070 				err = -ENOSPC;
7071 				break;
7072 			}
7073 		}
7074 
7075 		if (!cpycnt)
7076 			break;
7077 
7078 		/* Ensure that updates to the rx_std_buffers ring and the
7079 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7080 		 * ordered correctly WRT the skb check above.
7081 		 */
7082 		smp_rmb();
7083 
7084 		memcpy(&dpr->rx_std_buffers[di],
7085 		       &spr->rx_std_buffers[si],
7086 		       cpycnt * sizeof(struct ring_info));
7087 
7088 		for (i = 0; i < cpycnt; i++, di++, si++) {
7089 			struct tg3_rx_buffer_desc *sbd, *dbd;
7090 			sbd = &spr->rx_std[si];
7091 			dbd = &dpr->rx_std[di];
7092 			dbd->addr_hi = sbd->addr_hi;
7093 			dbd->addr_lo = sbd->addr_lo;
7094 		}
7095 
7096 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7097 				       tp->rx_std_ring_mask;
7098 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7099 				       tp->rx_std_ring_mask;
7100 	}
7101 
7102 	while (1) {
7103 		src_prod_idx = spr->rx_jmb_prod_idx;
7104 
7105 		/* Make sure updates to the rx_jmb_buffers[] entries and
7106 		 * the jumbo producer index are seen in the correct order.
7107 		 */
7108 		smp_rmb();
7109 
7110 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7111 			break;
7112 
7113 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7114 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7115 		else
7116 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7117 				 spr->rx_jmb_cons_idx;
7118 
7119 		cpycnt = min(cpycnt,
7120 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7121 
7122 		si = spr->rx_jmb_cons_idx;
7123 		di = dpr->rx_jmb_prod_idx;
7124 
7125 		for (i = di; i < di + cpycnt; i++) {
7126 			if (dpr->rx_jmb_buffers[i].data) {
7127 				cpycnt = i - di;
7128 				err = -ENOSPC;
7129 				break;
7130 			}
7131 		}
7132 
7133 		if (!cpycnt)
7134 			break;
7135 
7136 		/* Ensure that updates to the rx_jmb_buffers ring and the
7137 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7138 		 * ordered correctly WRT the skb check above.
7139 		 */
7140 		smp_rmb();
7141 
7142 		memcpy(&dpr->rx_jmb_buffers[di],
7143 		       &spr->rx_jmb_buffers[si],
7144 		       cpycnt * sizeof(struct ring_info));
7145 
7146 		for (i = 0; i < cpycnt; i++, di++, si++) {
7147 			struct tg3_rx_buffer_desc *sbd, *dbd;
7148 			sbd = &spr->rx_jmb[si].std;
7149 			dbd = &dpr->rx_jmb[di].std;
7150 			dbd->addr_hi = sbd->addr_hi;
7151 			dbd->addr_lo = sbd->addr_lo;
7152 		}
7153 
7154 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7155 				       tp->rx_jmb_ring_mask;
7156 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7157 				       tp->rx_jmb_ring_mask;
7158 	}
7159 
7160 	return err;
7161 }
7162 
tg3_poll_work(struct tg3_napi * tnapi,int work_done,int budget)7163 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7164 {
7165 	struct tg3 *tp = tnapi->tp;
7166 
7167 	/* run TX completion thread */
7168 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7169 		tg3_tx(tnapi);
7170 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7171 			return work_done;
7172 	}
7173 
7174 	if (!tnapi->rx_rcb_prod_idx)
7175 		return work_done;
7176 
7177 	/* run RX thread, within the bounds set by NAPI.
7178 	 * All RX "locking" is done by ensuring outside
7179 	 * code synchronizes with tg3->napi.poll()
7180 	 */
7181 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7182 		work_done += tg3_rx(tnapi, budget - work_done);
7183 
7184 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7185 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7186 		int i, err = 0;
7187 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7188 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7189 
7190 		tp->rx_refill = false;
7191 		for (i = 1; i <= tp->rxq_cnt; i++)
7192 			err |= tg3_rx_prodring_xfer(tp, dpr,
7193 						    &tp->napi[i].prodring);
7194 
7195 		wmb();
7196 
7197 		if (std_prod_idx != dpr->rx_std_prod_idx)
7198 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7199 				     dpr->rx_std_prod_idx);
7200 
7201 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7202 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7203 				     dpr->rx_jmb_prod_idx);
7204 
7205 		if (err)
7206 			tw32_f(HOSTCC_MODE, tp->coal_now);
7207 	}
7208 
7209 	return work_done;
7210 }
7211 
tg3_reset_task_schedule(struct tg3 * tp)7212 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7213 {
7214 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7215 		schedule_work(&tp->reset_task);
7216 }
7217 
tg3_reset_task_cancel(struct tg3 * tp)7218 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7219 {
7220 	if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7221 		cancel_work_sync(&tp->reset_task);
7222 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7223 }
7224 
tg3_poll_msix(struct napi_struct * napi,int budget)7225 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7226 {
7227 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7228 	struct tg3 *tp = tnapi->tp;
7229 	int work_done = 0;
7230 	struct tg3_hw_status *sblk = tnapi->hw_status;
7231 
7232 	while (1) {
7233 		work_done = tg3_poll_work(tnapi, work_done, budget);
7234 
7235 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7236 			goto tx_recovery;
7237 
7238 		if (unlikely(work_done >= budget))
7239 			break;
7240 
7241 		/* tp->last_tag is used in tg3_int_reenable() below
7242 		 * to tell the hw how much work has been processed,
7243 		 * so we must read it before checking for more work.
7244 		 */
7245 		tnapi->last_tag = sblk->status_tag;
7246 		tnapi->last_irq_tag = tnapi->last_tag;
7247 		rmb();
7248 
7249 		/* check for RX/TX work to do */
7250 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7251 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7252 
7253 			/* This test here is not race free, but will reduce
7254 			 * the number of interrupts by looping again.
7255 			 */
7256 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7257 				continue;
7258 
7259 			napi_complete_done(napi, work_done);
7260 			/* Reenable interrupts. */
7261 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7262 
7263 			/* This test here is synchronized by napi_schedule()
7264 			 * and napi_complete() to close the race condition.
7265 			 */
7266 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7267 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7268 						  HOSTCC_MODE_ENABLE |
7269 						  tnapi->coal_now);
7270 			}
7271 			break;
7272 		}
7273 	}
7274 
7275 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7276 	return work_done;
7277 
7278 tx_recovery:
7279 	/* work_done is guaranteed to be less than budget. */
7280 	napi_complete(napi);
7281 	tg3_reset_task_schedule(tp);
7282 	return work_done;
7283 }
7284 
tg3_process_error(struct tg3 * tp)7285 static void tg3_process_error(struct tg3 *tp)
7286 {
7287 	u32 val;
7288 	bool real_error = false;
7289 
7290 	if (tg3_flag(tp, ERROR_PROCESSED))
7291 		return;
7292 
7293 	/* Check Flow Attention register */
7294 	val = tr32(HOSTCC_FLOW_ATTN);
7295 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7296 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7297 		real_error = true;
7298 	}
7299 
7300 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7301 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7302 		real_error = true;
7303 	}
7304 
7305 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7306 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7307 		real_error = true;
7308 	}
7309 
7310 	if (!real_error)
7311 		return;
7312 
7313 	tg3_dump_state(tp);
7314 
7315 	tg3_flag_set(tp, ERROR_PROCESSED);
7316 	tg3_reset_task_schedule(tp);
7317 }
7318 
tg3_poll(struct napi_struct * napi,int budget)7319 static int tg3_poll(struct napi_struct *napi, int budget)
7320 {
7321 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7322 	struct tg3 *tp = tnapi->tp;
7323 	int work_done = 0;
7324 	struct tg3_hw_status *sblk = tnapi->hw_status;
7325 
7326 	while (1) {
7327 		if (sblk->status & SD_STATUS_ERROR)
7328 			tg3_process_error(tp);
7329 
7330 		tg3_poll_link(tp);
7331 
7332 		work_done = tg3_poll_work(tnapi, work_done, budget);
7333 
7334 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7335 			goto tx_recovery;
7336 
7337 		if (unlikely(work_done >= budget))
7338 			break;
7339 
7340 		if (tg3_flag(tp, TAGGED_STATUS)) {
7341 			/* tp->last_tag is used in tg3_int_reenable() below
7342 			 * to tell the hw how much work has been processed,
7343 			 * so we must read it before checking for more work.
7344 			 */
7345 			tnapi->last_tag = sblk->status_tag;
7346 			tnapi->last_irq_tag = tnapi->last_tag;
7347 			rmb();
7348 		} else
7349 			sblk->status &= ~SD_STATUS_UPDATED;
7350 
7351 		if (likely(!tg3_has_work(tnapi))) {
7352 			napi_complete_done(napi, work_done);
7353 			tg3_int_reenable(tnapi);
7354 			break;
7355 		}
7356 	}
7357 
7358 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7359 	return work_done;
7360 
7361 tx_recovery:
7362 	/* work_done is guaranteed to be less than budget. */
7363 	napi_complete(napi);
7364 	tg3_reset_task_schedule(tp);
7365 	return work_done;
7366 }
7367 
tg3_napi_disable(struct tg3 * tp)7368 static void tg3_napi_disable(struct tg3 *tp)
7369 {
7370 	int i;
7371 
7372 	for (i = tp->irq_cnt - 1; i >= 0; i--)
7373 		napi_disable(&tp->napi[i].napi);
7374 }
7375 
tg3_napi_enable(struct tg3 * tp)7376 static void tg3_napi_enable(struct tg3 *tp)
7377 {
7378 	int i;
7379 
7380 	for (i = 0; i < tp->irq_cnt; i++)
7381 		napi_enable(&tp->napi[i].napi);
7382 }
7383 
tg3_napi_init(struct tg3 * tp)7384 static void tg3_napi_init(struct tg3 *tp)
7385 {
7386 	int i;
7387 
7388 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
7389 	for (i = 1; i < tp->irq_cnt; i++)
7390 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
7391 }
7392 
tg3_napi_fini(struct tg3 * tp)7393 static void tg3_napi_fini(struct tg3 *tp)
7394 {
7395 	int i;
7396 
7397 	for (i = 0; i < tp->irq_cnt; i++)
7398 		netif_napi_del(&tp->napi[i].napi);
7399 }
7400 
tg3_netif_stop(struct tg3 * tp)7401 static inline void tg3_netif_stop(struct tg3 *tp)
7402 {
7403 	netif_trans_update(tp->dev);	/* prevent tx timeout */
7404 	tg3_napi_disable(tp);
7405 	netif_carrier_off(tp->dev);
7406 	netif_tx_disable(tp->dev);
7407 }
7408 
7409 /* tp->lock must be held */
tg3_netif_start(struct tg3 * tp)7410 static inline void tg3_netif_start(struct tg3 *tp)
7411 {
7412 	tg3_ptp_resume(tp);
7413 
7414 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7415 	 * appropriate so long as all callers are assured to
7416 	 * have free tx slots (such as after tg3_init_hw)
7417 	 */
7418 	netif_tx_wake_all_queues(tp->dev);
7419 
7420 	if (tp->link_up)
7421 		netif_carrier_on(tp->dev);
7422 
7423 	tg3_napi_enable(tp);
7424 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7425 	tg3_enable_ints(tp);
7426 }
7427 
tg3_irq_quiesce(struct tg3 * tp)7428 static void tg3_irq_quiesce(struct tg3 *tp)
7429 	__releases(tp->lock)
7430 	__acquires(tp->lock)
7431 {
7432 	int i;
7433 
7434 	BUG_ON(tp->irq_sync);
7435 
7436 	tp->irq_sync = 1;
7437 	smp_mb();
7438 
7439 	spin_unlock_bh(&tp->lock);
7440 
7441 	for (i = 0; i < tp->irq_cnt; i++)
7442 		synchronize_irq(tp->napi[i].irq_vec);
7443 
7444 	spin_lock_bh(&tp->lock);
7445 }
7446 
7447 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7448  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7449  * with as well.  Most of the time, this is not necessary except when
7450  * shutting down the device.
7451  */
tg3_full_lock(struct tg3 * tp,int irq_sync)7452 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7453 {
7454 	spin_lock_bh(&tp->lock);
7455 	if (irq_sync)
7456 		tg3_irq_quiesce(tp);
7457 }
7458 
tg3_full_unlock(struct tg3 * tp)7459 static inline void tg3_full_unlock(struct tg3 *tp)
7460 {
7461 	spin_unlock_bh(&tp->lock);
7462 }
7463 
7464 /* One-shot MSI handler - Chip automatically disables interrupt
7465  * after sending MSI so driver doesn't have to do it.
7466  */
tg3_msi_1shot(int irq,void * dev_id)7467 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7468 {
7469 	struct tg3_napi *tnapi = dev_id;
7470 	struct tg3 *tp = tnapi->tp;
7471 
7472 	prefetch(tnapi->hw_status);
7473 	if (tnapi->rx_rcb)
7474 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7475 
7476 	if (likely(!tg3_irq_sync(tp)))
7477 		napi_schedule(&tnapi->napi);
7478 
7479 	return IRQ_HANDLED;
7480 }
7481 
7482 /* MSI ISR - No need to check for interrupt sharing and no need to
7483  * flush status block and interrupt mailbox. PCI ordering rules
7484  * guarantee that MSI will arrive after the status block.
7485  */
tg3_msi(int irq,void * dev_id)7486 static irqreturn_t tg3_msi(int irq, void *dev_id)
7487 {
7488 	struct tg3_napi *tnapi = dev_id;
7489 	struct tg3 *tp = tnapi->tp;
7490 
7491 	prefetch(tnapi->hw_status);
7492 	if (tnapi->rx_rcb)
7493 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7494 	/*
7495 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7496 	 * chip-internal interrupt pending events.
7497 	 * Writing non-zero to intr-mbox-0 additional tells the
7498 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7499 	 * event coalescing.
7500 	 */
7501 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7502 	if (likely(!tg3_irq_sync(tp)))
7503 		napi_schedule(&tnapi->napi);
7504 
7505 	return IRQ_RETVAL(1);
7506 }
7507 
tg3_interrupt(int irq,void * dev_id)7508 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7509 {
7510 	struct tg3_napi *tnapi = dev_id;
7511 	struct tg3 *tp = tnapi->tp;
7512 	struct tg3_hw_status *sblk = tnapi->hw_status;
7513 	unsigned int handled = 1;
7514 
7515 	/* In INTx mode, it is possible for the interrupt to arrive at
7516 	 * the CPU before the status block posted prior to the interrupt.
7517 	 * Reading the PCI State register will confirm whether the
7518 	 * interrupt is ours and will flush the status block.
7519 	 */
7520 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7521 		if (tg3_flag(tp, CHIP_RESETTING) ||
7522 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7523 			handled = 0;
7524 			goto out;
7525 		}
7526 	}
7527 
7528 	/*
7529 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7530 	 * chip-internal interrupt pending events.
7531 	 * Writing non-zero to intr-mbox-0 additional tells the
7532 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7533 	 * event coalescing.
7534 	 *
7535 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7536 	 * spurious interrupts.  The flush impacts performance but
7537 	 * excessive spurious interrupts can be worse in some cases.
7538 	 */
7539 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7540 	if (tg3_irq_sync(tp))
7541 		goto out;
7542 	sblk->status &= ~SD_STATUS_UPDATED;
7543 	if (likely(tg3_has_work(tnapi))) {
7544 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7545 		napi_schedule(&tnapi->napi);
7546 	} else {
7547 		/* No work, shared interrupt perhaps?  re-enable
7548 		 * interrupts, and flush that PCI write
7549 		 */
7550 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7551 			       0x00000000);
7552 	}
7553 out:
7554 	return IRQ_RETVAL(handled);
7555 }
7556 
tg3_interrupt_tagged(int irq,void * dev_id)7557 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7558 {
7559 	struct tg3_napi *tnapi = dev_id;
7560 	struct tg3 *tp = tnapi->tp;
7561 	struct tg3_hw_status *sblk = tnapi->hw_status;
7562 	unsigned int handled = 1;
7563 
7564 	/* In INTx mode, it is possible for the interrupt to arrive at
7565 	 * the CPU before the status block posted prior to the interrupt.
7566 	 * Reading the PCI State register will confirm whether the
7567 	 * interrupt is ours and will flush the status block.
7568 	 */
7569 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7570 		if (tg3_flag(tp, CHIP_RESETTING) ||
7571 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7572 			handled = 0;
7573 			goto out;
7574 		}
7575 	}
7576 
7577 	/*
7578 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7579 	 * chip-internal interrupt pending events.
7580 	 * writing non-zero to intr-mbox-0 additional tells the
7581 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7582 	 * event coalescing.
7583 	 *
7584 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7585 	 * spurious interrupts.  The flush impacts performance but
7586 	 * excessive spurious interrupts can be worse in some cases.
7587 	 */
7588 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7589 
7590 	/*
7591 	 * In a shared interrupt configuration, sometimes other devices'
7592 	 * interrupts will scream.  We record the current status tag here
7593 	 * so that the above check can report that the screaming interrupts
7594 	 * are unhandled.  Eventually they will be silenced.
7595 	 */
7596 	tnapi->last_irq_tag = sblk->status_tag;
7597 
7598 	if (tg3_irq_sync(tp))
7599 		goto out;
7600 
7601 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7602 
7603 	napi_schedule(&tnapi->napi);
7604 
7605 out:
7606 	return IRQ_RETVAL(handled);
7607 }
7608 
7609 /* ISR for interrupt test */
tg3_test_isr(int irq,void * dev_id)7610 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7611 {
7612 	struct tg3_napi *tnapi = dev_id;
7613 	struct tg3 *tp = tnapi->tp;
7614 	struct tg3_hw_status *sblk = tnapi->hw_status;
7615 
7616 	if ((sblk->status & SD_STATUS_UPDATED) ||
7617 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7618 		tg3_disable_ints(tp);
7619 		return IRQ_RETVAL(1);
7620 	}
7621 	return IRQ_RETVAL(0);
7622 }
7623 
7624 #ifdef CONFIG_NET_POLL_CONTROLLER
tg3_poll_controller(struct net_device * dev)7625 static void tg3_poll_controller(struct net_device *dev)
7626 {
7627 	int i;
7628 	struct tg3 *tp = netdev_priv(dev);
7629 
7630 	if (tg3_irq_sync(tp))
7631 		return;
7632 
7633 	for (i = 0; i < tp->irq_cnt; i++)
7634 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7635 }
7636 #endif
7637 
tg3_tx_timeout(struct net_device * dev,unsigned int txqueue)7638 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7639 {
7640 	struct tg3 *tp = netdev_priv(dev);
7641 
7642 	if (netif_msg_tx_err(tp)) {
7643 		netdev_err(dev, "transmit timed out, resetting\n");
7644 		tg3_dump_state(tp);
7645 	}
7646 
7647 	tg3_reset_task_schedule(tp);
7648 }
7649 
7650 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
tg3_4g_overflow_test(dma_addr_t mapping,int len)7651 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7652 {
7653 	u32 base = (u32) mapping & 0xffffffff;
7654 
7655 	return base + len + 8 < base;
7656 }
7657 
7658 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7659  * of any 4GB boundaries: 4G, 8G, etc
7660  */
tg3_4g_tso_overflow_test(struct tg3 * tp,dma_addr_t mapping,u32 len,u32 mss)7661 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7662 					   u32 len, u32 mss)
7663 {
7664 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7665 		u32 base = (u32) mapping & 0xffffffff;
7666 
7667 		return ((base + len + (mss & 0x3fff)) < base);
7668 	}
7669 	return 0;
7670 }
7671 
7672 /* Test for DMA addresses > 40-bit */
tg3_40bit_overflow_test(struct tg3 * tp,dma_addr_t mapping,int len)7673 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7674 					  int len)
7675 {
7676 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7677 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7678 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7679 	return 0;
7680 #else
7681 	return 0;
7682 #endif
7683 }
7684 
tg3_tx_set_bd(struct tg3_tx_buffer_desc * txbd,dma_addr_t mapping,u32 len,u32 flags,u32 mss,u32 vlan)7685 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7686 				 dma_addr_t mapping, u32 len, u32 flags,
7687 				 u32 mss, u32 vlan)
7688 {
7689 	txbd->addr_hi = ((u64) mapping >> 32);
7690 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7691 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7692 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7693 }
7694 
tg3_tx_frag_set(struct tg3_napi * tnapi,u32 * entry,u32 * budget,dma_addr_t map,u32 len,u32 flags,u32 mss,u32 vlan)7695 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7696 			    dma_addr_t map, u32 len, u32 flags,
7697 			    u32 mss, u32 vlan)
7698 {
7699 	struct tg3 *tp = tnapi->tp;
7700 	bool hwbug = false;
7701 
7702 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7703 		hwbug = true;
7704 
7705 	if (tg3_4g_overflow_test(map, len))
7706 		hwbug = true;
7707 
7708 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7709 		hwbug = true;
7710 
7711 	if (tg3_40bit_overflow_test(tp, map, len))
7712 		hwbug = true;
7713 
7714 	if (tp->dma_limit) {
7715 		u32 prvidx = *entry;
7716 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7717 		while (len > tp->dma_limit && *budget) {
7718 			u32 frag_len = tp->dma_limit;
7719 			len -= tp->dma_limit;
7720 
7721 			/* Avoid the 8byte DMA problem */
7722 			if (len <= 8) {
7723 				len += tp->dma_limit / 2;
7724 				frag_len = tp->dma_limit / 2;
7725 			}
7726 
7727 			tnapi->tx_buffers[*entry].fragmented = true;
7728 
7729 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7730 				      frag_len, tmp_flag, mss, vlan);
7731 			*budget -= 1;
7732 			prvidx = *entry;
7733 			*entry = NEXT_TX(*entry);
7734 
7735 			map += frag_len;
7736 		}
7737 
7738 		if (len) {
7739 			if (*budget) {
7740 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7741 					      len, flags, mss, vlan);
7742 				*budget -= 1;
7743 				*entry = NEXT_TX(*entry);
7744 			} else {
7745 				hwbug = true;
7746 				tnapi->tx_buffers[prvidx].fragmented = false;
7747 			}
7748 		}
7749 	} else {
7750 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7751 			      len, flags, mss, vlan);
7752 		*entry = NEXT_TX(*entry);
7753 	}
7754 
7755 	return hwbug;
7756 }
7757 
tg3_tx_skb_unmap(struct tg3_napi * tnapi,u32 entry,int last)7758 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7759 {
7760 	int i;
7761 	struct sk_buff *skb;
7762 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7763 
7764 	skb = txb->skb;
7765 	txb->skb = NULL;
7766 
7767 	dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7768 			 skb_headlen(skb), DMA_TO_DEVICE);
7769 
7770 	while (txb->fragmented) {
7771 		txb->fragmented = false;
7772 		entry = NEXT_TX(entry);
7773 		txb = &tnapi->tx_buffers[entry];
7774 	}
7775 
7776 	for (i = 0; i <= last; i++) {
7777 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7778 
7779 		entry = NEXT_TX(entry);
7780 		txb = &tnapi->tx_buffers[entry];
7781 
7782 		dma_unmap_page(&tnapi->tp->pdev->dev,
7783 			       dma_unmap_addr(txb, mapping),
7784 			       skb_frag_size(frag), DMA_TO_DEVICE);
7785 
7786 		while (txb->fragmented) {
7787 			txb->fragmented = false;
7788 			entry = NEXT_TX(entry);
7789 			txb = &tnapi->tx_buffers[entry];
7790 		}
7791 	}
7792 }
7793 
7794 /* Workaround 4GB and 40-bit hardware DMA bugs. */
tigon3_dma_hwbug_workaround(struct tg3_napi * tnapi,struct sk_buff ** pskb,u32 * entry,u32 * budget,u32 base_flags,u32 mss,u32 vlan)7795 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7796 				       struct sk_buff **pskb,
7797 				       u32 *entry, u32 *budget,
7798 				       u32 base_flags, u32 mss, u32 vlan)
7799 {
7800 	struct tg3 *tp = tnapi->tp;
7801 	struct sk_buff *new_skb, *skb = *pskb;
7802 	dma_addr_t new_addr = 0;
7803 	int ret = 0;
7804 
7805 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7806 		new_skb = skb_copy(skb, GFP_ATOMIC);
7807 	else {
7808 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7809 
7810 		new_skb = skb_copy_expand(skb,
7811 					  skb_headroom(skb) + more_headroom,
7812 					  skb_tailroom(skb), GFP_ATOMIC);
7813 	}
7814 
7815 	if (!new_skb) {
7816 		ret = -1;
7817 	} else {
7818 		/* New SKB is guaranteed to be linear. */
7819 		new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7820 					  new_skb->len, DMA_TO_DEVICE);
7821 		/* Make sure the mapping succeeded */
7822 		if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7823 			dev_kfree_skb_any(new_skb);
7824 			ret = -1;
7825 		} else {
7826 			u32 save_entry = *entry;
7827 
7828 			base_flags |= TXD_FLAG_END;
7829 
7830 			tnapi->tx_buffers[*entry].skb = new_skb;
7831 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7832 					   mapping, new_addr);
7833 
7834 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7835 					    new_skb->len, base_flags,
7836 					    mss, vlan)) {
7837 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7838 				dev_kfree_skb_any(new_skb);
7839 				ret = -1;
7840 			}
7841 		}
7842 	}
7843 
7844 	dev_consume_skb_any(skb);
7845 	*pskb = new_skb;
7846 	return ret;
7847 }
7848 
tg3_tso_bug_gso_check(struct tg3_napi * tnapi,struct sk_buff * skb)7849 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7850 {
7851 	/* Check if we will never have enough descriptors,
7852 	 * as gso_segs can be more than current ring size
7853 	 */
7854 	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7855 }
7856 
7857 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7858 
7859 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7860  * indicated in tg3_tx_frag_set()
7861  */
tg3_tso_bug(struct tg3 * tp,struct tg3_napi * tnapi,struct netdev_queue * txq,struct sk_buff * skb)7862 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7863 		       struct netdev_queue *txq, struct sk_buff *skb)
7864 {
7865 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7866 	struct sk_buff *segs, *seg, *next;
7867 
7868 	/* Estimate the number of fragments in the worst case */
7869 	if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7870 		netif_tx_stop_queue(txq);
7871 
7872 		/* netif_tx_stop_queue() must be done before checking
7873 		 * checking tx index in tg3_tx_avail() below, because in
7874 		 * tg3_tx(), we update tx index before checking for
7875 		 * netif_tx_queue_stopped().
7876 		 */
7877 		smp_mb();
7878 		if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7879 			return NETDEV_TX_BUSY;
7880 
7881 		netif_tx_wake_queue(txq);
7882 	}
7883 
7884 	segs = skb_gso_segment(skb, tp->dev->features &
7885 				    ~(NETIF_F_TSO | NETIF_F_TSO6));
7886 	if (IS_ERR(segs) || !segs) {
7887 		tnapi->tx_dropped++;
7888 		goto tg3_tso_bug_end;
7889 	}
7890 
7891 	skb_list_walk_safe(segs, seg, next) {
7892 		skb_mark_not_on_list(seg);
7893 		tg3_start_xmit(seg, tp->dev);
7894 	}
7895 
7896 tg3_tso_bug_end:
7897 	dev_consume_skb_any(skb);
7898 
7899 	return NETDEV_TX_OK;
7900 }
7901 
7902 /* hard_start_xmit for all devices */
tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)7903 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7904 {
7905 	struct tg3 *tp = netdev_priv(dev);
7906 	u32 len, entry, base_flags, mss, vlan = 0;
7907 	u32 budget;
7908 	int i = -1, would_hit_hwbug;
7909 	dma_addr_t mapping;
7910 	struct tg3_napi *tnapi;
7911 	struct netdev_queue *txq;
7912 	unsigned int last;
7913 	struct iphdr *iph = NULL;
7914 	struct tcphdr *tcph = NULL;
7915 	__sum16 tcp_csum = 0, ip_csum = 0;
7916 	__be16 ip_tot_len = 0;
7917 
7918 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7919 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7920 	if (tg3_flag(tp, ENABLE_TSS))
7921 		tnapi++;
7922 
7923 	budget = tg3_tx_avail(tnapi);
7924 
7925 	/* We are running in BH disabled context with netif_tx_lock
7926 	 * and TX reclaim runs via tp->napi.poll inside of a software
7927 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7928 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7929 	 */
7930 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7931 		if (!netif_tx_queue_stopped(txq)) {
7932 			netif_tx_stop_queue(txq);
7933 
7934 			/* This is a hard error, log it. */
7935 			netdev_err(dev,
7936 				   "BUG! Tx Ring full when queue awake!\n");
7937 		}
7938 		return NETDEV_TX_BUSY;
7939 	}
7940 
7941 	entry = tnapi->tx_prod;
7942 	base_flags = 0;
7943 
7944 	mss = skb_shinfo(skb)->gso_size;
7945 	if (mss) {
7946 		u32 tcp_opt_len, hdr_len;
7947 
7948 		if (skb_cow_head(skb, 0))
7949 			goto drop;
7950 
7951 		iph = ip_hdr(skb);
7952 		tcp_opt_len = tcp_optlen(skb);
7953 
7954 		hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
7955 
7956 		/* HW/FW can not correctly segment packets that have been
7957 		 * vlan encapsulated.
7958 		 */
7959 		if (skb->protocol == htons(ETH_P_8021Q) ||
7960 		    skb->protocol == htons(ETH_P_8021AD)) {
7961 			if (tg3_tso_bug_gso_check(tnapi, skb))
7962 				return tg3_tso_bug(tp, tnapi, txq, skb);
7963 			goto drop;
7964 		}
7965 
7966 		if (!skb_is_gso_v6(skb)) {
7967 			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7968 			    tg3_flag(tp, TSO_BUG)) {
7969 				if (tg3_tso_bug_gso_check(tnapi, skb))
7970 					return tg3_tso_bug(tp, tnapi, txq, skb);
7971 				goto drop;
7972 			}
7973 			ip_csum = iph->check;
7974 			ip_tot_len = iph->tot_len;
7975 			iph->check = 0;
7976 			iph->tot_len = htons(mss + hdr_len);
7977 		}
7978 
7979 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7980 			       TXD_FLAG_CPU_POST_DMA);
7981 
7982 		tcph = tcp_hdr(skb);
7983 		tcp_csum = tcph->check;
7984 
7985 		if (tg3_flag(tp, HW_TSO_1) ||
7986 		    tg3_flag(tp, HW_TSO_2) ||
7987 		    tg3_flag(tp, HW_TSO_3)) {
7988 			tcph->check = 0;
7989 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7990 		} else {
7991 			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7992 							 0, IPPROTO_TCP, 0);
7993 		}
7994 
7995 		if (tg3_flag(tp, HW_TSO_3)) {
7996 			mss |= (hdr_len & 0xc) << 12;
7997 			if (hdr_len & 0x10)
7998 				base_flags |= 0x00000010;
7999 			base_flags |= (hdr_len & 0x3e0) << 5;
8000 		} else if (tg3_flag(tp, HW_TSO_2))
8001 			mss |= hdr_len << 9;
8002 		else if (tg3_flag(tp, HW_TSO_1) ||
8003 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
8004 			if (tcp_opt_len || iph->ihl > 5) {
8005 				int tsflags;
8006 
8007 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8008 				mss |= (tsflags << 11);
8009 			}
8010 		} else {
8011 			if (tcp_opt_len || iph->ihl > 5) {
8012 				int tsflags;
8013 
8014 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8015 				base_flags |= tsflags << 12;
8016 			}
8017 		}
8018 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8019 		/* HW/FW can not correctly checksum packets that have been
8020 		 * vlan encapsulated.
8021 		 */
8022 		if (skb->protocol == htons(ETH_P_8021Q) ||
8023 		    skb->protocol == htons(ETH_P_8021AD)) {
8024 			if (skb_checksum_help(skb))
8025 				goto drop;
8026 		} else  {
8027 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
8028 		}
8029 	}
8030 
8031 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8032 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
8033 		base_flags |= TXD_FLAG_JMB_PKT;
8034 
8035 	if (skb_vlan_tag_present(skb)) {
8036 		base_flags |= TXD_FLAG_VLAN;
8037 		vlan = skb_vlan_tag_get(skb);
8038 	}
8039 
8040 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8041 	    tg3_flag(tp, TX_TSTAMP_EN)) {
8042 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8043 		base_flags |= TXD_FLAG_HWTSTAMP;
8044 	}
8045 
8046 	len = skb_headlen(skb);
8047 
8048 	mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8049 				 DMA_TO_DEVICE);
8050 	if (dma_mapping_error(&tp->pdev->dev, mapping))
8051 		goto drop;
8052 
8053 
8054 	tnapi->tx_buffers[entry].skb = skb;
8055 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8056 
8057 	would_hit_hwbug = 0;
8058 
8059 	if (tg3_flag(tp, 5701_DMA_BUG))
8060 		would_hit_hwbug = 1;
8061 
8062 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8063 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8064 			    mss, vlan)) {
8065 		would_hit_hwbug = 1;
8066 	} else if (skb_shinfo(skb)->nr_frags > 0) {
8067 		u32 tmp_mss = mss;
8068 
8069 		if (!tg3_flag(tp, HW_TSO_1) &&
8070 		    !tg3_flag(tp, HW_TSO_2) &&
8071 		    !tg3_flag(tp, HW_TSO_3))
8072 			tmp_mss = 0;
8073 
8074 		/* Now loop through additional data
8075 		 * fragments, and queue them.
8076 		 */
8077 		last = skb_shinfo(skb)->nr_frags - 1;
8078 		for (i = 0; i <= last; i++) {
8079 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8080 
8081 			len = skb_frag_size(frag);
8082 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8083 						   len, DMA_TO_DEVICE);
8084 
8085 			tnapi->tx_buffers[entry].skb = NULL;
8086 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8087 					   mapping);
8088 			if (dma_mapping_error(&tp->pdev->dev, mapping))
8089 				goto dma_error;
8090 
8091 			if (!budget ||
8092 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8093 					    len, base_flags |
8094 					    ((i == last) ? TXD_FLAG_END : 0),
8095 					    tmp_mss, vlan)) {
8096 				would_hit_hwbug = 1;
8097 				break;
8098 			}
8099 		}
8100 	}
8101 
8102 	if (would_hit_hwbug) {
8103 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8104 
8105 		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8106 			/* If it's a TSO packet, do GSO instead of
8107 			 * allocating and copying to a large linear SKB
8108 			 */
8109 			if (ip_tot_len) {
8110 				iph->check = ip_csum;
8111 				iph->tot_len = ip_tot_len;
8112 			}
8113 			tcph->check = tcp_csum;
8114 			return tg3_tso_bug(tp, tnapi, txq, skb);
8115 		}
8116 
8117 		/* If the workaround fails due to memory/mapping
8118 		 * failure, silently drop this packet.
8119 		 */
8120 		entry = tnapi->tx_prod;
8121 		budget = tg3_tx_avail(tnapi);
8122 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8123 						base_flags, mss, vlan))
8124 			goto drop_nofree;
8125 	}
8126 
8127 	skb_tx_timestamp(skb);
8128 	netdev_tx_sent_queue(txq, skb->len);
8129 
8130 	/* Sync BD data before updating mailbox */
8131 	wmb();
8132 
8133 	tnapi->tx_prod = entry;
8134 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8135 		netif_tx_stop_queue(txq);
8136 
8137 		/* netif_tx_stop_queue() must be done before checking
8138 		 * checking tx index in tg3_tx_avail() below, because in
8139 		 * tg3_tx(), we update tx index before checking for
8140 		 * netif_tx_queue_stopped().
8141 		 */
8142 		smp_mb();
8143 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8144 			netif_tx_wake_queue(txq);
8145 	}
8146 
8147 	if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8148 		/* Packets are ready, update Tx producer idx on card. */
8149 		tw32_tx_mbox(tnapi->prodmbox, entry);
8150 	}
8151 
8152 	return NETDEV_TX_OK;
8153 
8154 dma_error:
8155 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8156 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8157 drop:
8158 	dev_kfree_skb_any(skb);
8159 drop_nofree:
8160 	tnapi->tx_dropped++;
8161 	return NETDEV_TX_OK;
8162 }
8163 
tg3_mac_loopback(struct tg3 * tp,bool enable)8164 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8165 {
8166 	if (enable) {
8167 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8168 				  MAC_MODE_PORT_MODE_MASK);
8169 
8170 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8171 
8172 		if (!tg3_flag(tp, 5705_PLUS))
8173 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8174 
8175 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8176 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8177 		else
8178 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8179 	} else {
8180 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8181 
8182 		if (tg3_flag(tp, 5705_PLUS) ||
8183 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8184 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8185 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8186 	}
8187 
8188 	tw32(MAC_MODE, tp->mac_mode);
8189 	udelay(40);
8190 }
8191 
tg3_phy_lpbk_set(struct tg3 * tp,u32 speed,bool extlpbk)8192 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8193 {
8194 	u32 val, bmcr, mac_mode, ptest = 0;
8195 
8196 	tg3_phy_toggle_apd(tp, false);
8197 	tg3_phy_toggle_automdix(tp, false);
8198 
8199 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8200 		return -EIO;
8201 
8202 	bmcr = BMCR_FULLDPLX;
8203 	switch (speed) {
8204 	case SPEED_10:
8205 		break;
8206 	case SPEED_100:
8207 		bmcr |= BMCR_SPEED100;
8208 		break;
8209 	case SPEED_1000:
8210 	default:
8211 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8212 			speed = SPEED_100;
8213 			bmcr |= BMCR_SPEED100;
8214 		} else {
8215 			speed = SPEED_1000;
8216 			bmcr |= BMCR_SPEED1000;
8217 		}
8218 	}
8219 
8220 	if (extlpbk) {
8221 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8222 			tg3_readphy(tp, MII_CTRL1000, &val);
8223 			val |= CTL1000_AS_MASTER |
8224 			       CTL1000_ENABLE_MASTER;
8225 			tg3_writephy(tp, MII_CTRL1000, val);
8226 		} else {
8227 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8228 				MII_TG3_FET_PTEST_TRIM_2;
8229 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8230 		}
8231 	} else
8232 		bmcr |= BMCR_LOOPBACK;
8233 
8234 	tg3_writephy(tp, MII_BMCR, bmcr);
8235 
8236 	/* The write needs to be flushed for the FETs */
8237 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8238 		tg3_readphy(tp, MII_BMCR, &bmcr);
8239 
8240 	udelay(40);
8241 
8242 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8243 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8244 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8245 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8246 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8247 
8248 		/* The write needs to be flushed for the AC131 */
8249 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8250 	}
8251 
8252 	/* Reset to prevent losing 1st rx packet intermittently */
8253 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8254 	    tg3_flag(tp, 5780_CLASS)) {
8255 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8256 		udelay(10);
8257 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8258 	}
8259 
8260 	mac_mode = tp->mac_mode &
8261 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8262 	if (speed == SPEED_1000)
8263 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8264 	else
8265 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8266 
8267 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8268 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8269 
8270 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8271 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8272 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8273 			mac_mode |= MAC_MODE_LINK_POLARITY;
8274 
8275 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8276 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8277 	}
8278 
8279 	tw32(MAC_MODE, mac_mode);
8280 	udelay(40);
8281 
8282 	return 0;
8283 }
8284 
tg3_set_loopback(struct net_device * dev,netdev_features_t features)8285 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8286 {
8287 	struct tg3 *tp = netdev_priv(dev);
8288 
8289 	if (features & NETIF_F_LOOPBACK) {
8290 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8291 			return;
8292 
8293 		spin_lock_bh(&tp->lock);
8294 		tg3_mac_loopback(tp, true);
8295 		netif_carrier_on(tp->dev);
8296 		spin_unlock_bh(&tp->lock);
8297 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8298 	} else {
8299 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8300 			return;
8301 
8302 		spin_lock_bh(&tp->lock);
8303 		tg3_mac_loopback(tp, false);
8304 		/* Force link status check */
8305 		tg3_setup_phy(tp, true);
8306 		spin_unlock_bh(&tp->lock);
8307 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8308 	}
8309 }
8310 
tg3_fix_features(struct net_device * dev,netdev_features_t features)8311 static netdev_features_t tg3_fix_features(struct net_device *dev,
8312 	netdev_features_t features)
8313 {
8314 	struct tg3 *tp = netdev_priv(dev);
8315 
8316 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8317 		features &= ~NETIF_F_ALL_TSO;
8318 
8319 	return features;
8320 }
8321 
tg3_set_features(struct net_device * dev,netdev_features_t features)8322 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8323 {
8324 	netdev_features_t changed = dev->features ^ features;
8325 
8326 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8327 		tg3_set_loopback(dev, features);
8328 
8329 	return 0;
8330 }
8331 
tg3_rx_prodring_free(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8332 static void tg3_rx_prodring_free(struct tg3 *tp,
8333 				 struct tg3_rx_prodring_set *tpr)
8334 {
8335 	int i;
8336 
8337 	if (tpr != &tp->napi[0].prodring) {
8338 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8339 		     i = (i + 1) & tp->rx_std_ring_mask)
8340 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8341 					tp->rx_pkt_map_sz);
8342 
8343 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8344 			for (i = tpr->rx_jmb_cons_idx;
8345 			     i != tpr->rx_jmb_prod_idx;
8346 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8347 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8348 						TG3_RX_JMB_MAP_SZ);
8349 			}
8350 		}
8351 
8352 		return;
8353 	}
8354 
8355 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8356 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8357 				tp->rx_pkt_map_sz);
8358 
8359 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8360 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8361 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8362 					TG3_RX_JMB_MAP_SZ);
8363 	}
8364 }
8365 
8366 /* Initialize rx rings for packet processing.
8367  *
8368  * The chip has been shut down and the driver detached from
8369  * the networking, so no interrupts or new tx packets will
8370  * end up in the driver.  tp->{tx,}lock are held and thus
8371  * we may not sleep.
8372  */
tg3_rx_prodring_alloc(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8373 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8374 				 struct tg3_rx_prodring_set *tpr)
8375 {
8376 	u32 i, rx_pkt_dma_sz;
8377 
8378 	tpr->rx_std_cons_idx = 0;
8379 	tpr->rx_std_prod_idx = 0;
8380 	tpr->rx_jmb_cons_idx = 0;
8381 	tpr->rx_jmb_prod_idx = 0;
8382 
8383 	if (tpr != &tp->napi[0].prodring) {
8384 		memset(&tpr->rx_std_buffers[0], 0,
8385 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8386 		if (tpr->rx_jmb_buffers)
8387 			memset(&tpr->rx_jmb_buffers[0], 0,
8388 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8389 		goto done;
8390 	}
8391 
8392 	/* Zero out all descriptors. */
8393 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8394 
8395 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8396 	if (tg3_flag(tp, 5780_CLASS) &&
8397 	    tp->dev->mtu > ETH_DATA_LEN)
8398 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8399 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8400 
8401 	/* Initialize invariants of the rings, we only set this
8402 	 * stuff once.  This works because the card does not
8403 	 * write into the rx buffer posting rings.
8404 	 */
8405 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8406 		struct tg3_rx_buffer_desc *rxd;
8407 
8408 		rxd = &tpr->rx_std[i];
8409 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8410 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8411 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8412 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8413 	}
8414 
8415 	/* Now allocate fresh SKBs for each rx ring. */
8416 	for (i = 0; i < tp->rx_pending; i++) {
8417 		unsigned int frag_size;
8418 
8419 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8420 				      &frag_size) < 0) {
8421 			netdev_warn(tp->dev,
8422 				    "Using a smaller RX standard ring. Only "
8423 				    "%d out of %d buffers were allocated "
8424 				    "successfully\n", i, tp->rx_pending);
8425 			if (i == 0)
8426 				goto initfail;
8427 			tp->rx_pending = i;
8428 			break;
8429 		}
8430 	}
8431 
8432 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8433 		goto done;
8434 
8435 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8436 
8437 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8438 		goto done;
8439 
8440 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8441 		struct tg3_rx_buffer_desc *rxd;
8442 
8443 		rxd = &tpr->rx_jmb[i].std;
8444 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8445 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8446 				  RXD_FLAG_JUMBO;
8447 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8448 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8449 	}
8450 
8451 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8452 		unsigned int frag_size;
8453 
8454 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8455 				      &frag_size) < 0) {
8456 			netdev_warn(tp->dev,
8457 				    "Using a smaller RX jumbo ring. Only %d "
8458 				    "out of %d buffers were allocated "
8459 				    "successfully\n", i, tp->rx_jumbo_pending);
8460 			if (i == 0)
8461 				goto initfail;
8462 			tp->rx_jumbo_pending = i;
8463 			break;
8464 		}
8465 	}
8466 
8467 done:
8468 	return 0;
8469 
8470 initfail:
8471 	tg3_rx_prodring_free(tp, tpr);
8472 	return -ENOMEM;
8473 }
8474 
tg3_rx_prodring_fini(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8475 static void tg3_rx_prodring_fini(struct tg3 *tp,
8476 				 struct tg3_rx_prodring_set *tpr)
8477 {
8478 	kfree(tpr->rx_std_buffers);
8479 	tpr->rx_std_buffers = NULL;
8480 	kfree(tpr->rx_jmb_buffers);
8481 	tpr->rx_jmb_buffers = NULL;
8482 	if (tpr->rx_std) {
8483 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8484 				  tpr->rx_std, tpr->rx_std_mapping);
8485 		tpr->rx_std = NULL;
8486 	}
8487 	if (tpr->rx_jmb) {
8488 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8489 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8490 		tpr->rx_jmb = NULL;
8491 	}
8492 }
8493 
tg3_rx_prodring_init(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8494 static int tg3_rx_prodring_init(struct tg3 *tp,
8495 				struct tg3_rx_prodring_set *tpr)
8496 {
8497 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8498 				      GFP_KERNEL);
8499 	if (!tpr->rx_std_buffers)
8500 		return -ENOMEM;
8501 
8502 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8503 					 TG3_RX_STD_RING_BYTES(tp),
8504 					 &tpr->rx_std_mapping,
8505 					 GFP_KERNEL);
8506 	if (!tpr->rx_std)
8507 		goto err_out;
8508 
8509 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8510 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8511 					      GFP_KERNEL);
8512 		if (!tpr->rx_jmb_buffers)
8513 			goto err_out;
8514 
8515 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8516 						 TG3_RX_JMB_RING_BYTES(tp),
8517 						 &tpr->rx_jmb_mapping,
8518 						 GFP_KERNEL);
8519 		if (!tpr->rx_jmb)
8520 			goto err_out;
8521 	}
8522 
8523 	return 0;
8524 
8525 err_out:
8526 	tg3_rx_prodring_fini(tp, tpr);
8527 	return -ENOMEM;
8528 }
8529 
8530 /* Free up pending packets in all rx/tx rings.
8531  *
8532  * The chip has been shut down and the driver detached from
8533  * the networking, so no interrupts or new tx packets will
8534  * end up in the driver.  tp->{tx,}lock is not held and we are not
8535  * in an interrupt context and thus may sleep.
8536  */
tg3_free_rings(struct tg3 * tp)8537 static void tg3_free_rings(struct tg3 *tp)
8538 {
8539 	int i, j;
8540 
8541 	for (j = 0; j < tp->irq_cnt; j++) {
8542 		struct tg3_napi *tnapi = &tp->napi[j];
8543 
8544 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8545 
8546 		if (!tnapi->tx_buffers)
8547 			continue;
8548 
8549 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8550 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8551 
8552 			if (!skb)
8553 				continue;
8554 
8555 			tg3_tx_skb_unmap(tnapi, i,
8556 					 skb_shinfo(skb)->nr_frags - 1);
8557 
8558 			dev_consume_skb_any(skb);
8559 		}
8560 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8561 	}
8562 }
8563 
8564 /* Initialize tx/rx rings for packet processing.
8565  *
8566  * The chip has been shut down and the driver detached from
8567  * the networking, so no interrupts or new tx packets will
8568  * end up in the driver.  tp->{tx,}lock are held and thus
8569  * we may not sleep.
8570  */
tg3_init_rings(struct tg3 * tp)8571 static int tg3_init_rings(struct tg3 *tp)
8572 {
8573 	int i;
8574 
8575 	/* Free up all the SKBs. */
8576 	tg3_free_rings(tp);
8577 
8578 	for (i = 0; i < tp->irq_cnt; i++) {
8579 		struct tg3_napi *tnapi = &tp->napi[i];
8580 
8581 		tnapi->last_tag = 0;
8582 		tnapi->last_irq_tag = 0;
8583 		tnapi->hw_status->status = 0;
8584 		tnapi->hw_status->status_tag = 0;
8585 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8586 
8587 		tnapi->tx_prod = 0;
8588 		tnapi->tx_cons = 0;
8589 		if (tnapi->tx_ring)
8590 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8591 
8592 		tnapi->rx_rcb_ptr = 0;
8593 		if (tnapi->rx_rcb)
8594 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8595 
8596 		if (tnapi->prodring.rx_std &&
8597 		    tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8598 			tg3_free_rings(tp);
8599 			return -ENOMEM;
8600 		}
8601 	}
8602 
8603 	return 0;
8604 }
8605 
tg3_mem_tx_release(struct tg3 * tp)8606 static void tg3_mem_tx_release(struct tg3 *tp)
8607 {
8608 	int i;
8609 
8610 	for (i = 0; i < tp->irq_max; i++) {
8611 		struct tg3_napi *tnapi = &tp->napi[i];
8612 
8613 		if (tnapi->tx_ring) {
8614 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8615 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8616 			tnapi->tx_ring = NULL;
8617 		}
8618 
8619 		kfree(tnapi->tx_buffers);
8620 		tnapi->tx_buffers = NULL;
8621 	}
8622 }
8623 
tg3_mem_tx_acquire(struct tg3 * tp)8624 static int tg3_mem_tx_acquire(struct tg3 *tp)
8625 {
8626 	int i;
8627 	struct tg3_napi *tnapi = &tp->napi[0];
8628 
8629 	/* If multivector TSS is enabled, vector 0 does not handle
8630 	 * tx interrupts.  Don't allocate any resources for it.
8631 	 */
8632 	if (tg3_flag(tp, ENABLE_TSS))
8633 		tnapi++;
8634 
8635 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8636 		tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8637 					    sizeof(struct tg3_tx_ring_info),
8638 					    GFP_KERNEL);
8639 		if (!tnapi->tx_buffers)
8640 			goto err_out;
8641 
8642 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8643 						    TG3_TX_RING_BYTES,
8644 						    &tnapi->tx_desc_mapping,
8645 						    GFP_KERNEL);
8646 		if (!tnapi->tx_ring)
8647 			goto err_out;
8648 	}
8649 
8650 	return 0;
8651 
8652 err_out:
8653 	tg3_mem_tx_release(tp);
8654 	return -ENOMEM;
8655 }
8656 
tg3_mem_rx_release(struct tg3 * tp)8657 static void tg3_mem_rx_release(struct tg3 *tp)
8658 {
8659 	int i;
8660 
8661 	for (i = 0; i < tp->irq_max; i++) {
8662 		struct tg3_napi *tnapi = &tp->napi[i];
8663 
8664 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8665 
8666 		if (!tnapi->rx_rcb)
8667 			continue;
8668 
8669 		dma_free_coherent(&tp->pdev->dev,
8670 				  TG3_RX_RCB_RING_BYTES(tp),
8671 				  tnapi->rx_rcb,
8672 				  tnapi->rx_rcb_mapping);
8673 		tnapi->rx_rcb = NULL;
8674 	}
8675 }
8676 
tg3_mem_rx_acquire(struct tg3 * tp)8677 static int tg3_mem_rx_acquire(struct tg3 *tp)
8678 {
8679 	unsigned int i, limit;
8680 
8681 	limit = tp->rxq_cnt;
8682 
8683 	/* If RSS is enabled, we need a (dummy) producer ring
8684 	 * set on vector zero.  This is the true hw prodring.
8685 	 */
8686 	if (tg3_flag(tp, ENABLE_RSS))
8687 		limit++;
8688 
8689 	for (i = 0; i < limit; i++) {
8690 		struct tg3_napi *tnapi = &tp->napi[i];
8691 
8692 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8693 			goto err_out;
8694 
8695 		/* If multivector RSS is enabled, vector 0
8696 		 * does not handle rx or tx interrupts.
8697 		 * Don't allocate any resources for it.
8698 		 */
8699 		if (!i && tg3_flag(tp, ENABLE_RSS))
8700 			continue;
8701 
8702 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8703 						   TG3_RX_RCB_RING_BYTES(tp),
8704 						   &tnapi->rx_rcb_mapping,
8705 						   GFP_KERNEL);
8706 		if (!tnapi->rx_rcb)
8707 			goto err_out;
8708 	}
8709 
8710 	return 0;
8711 
8712 err_out:
8713 	tg3_mem_rx_release(tp);
8714 	return -ENOMEM;
8715 }
8716 
8717 /*
8718  * Must not be invoked with interrupt sources disabled and
8719  * the hardware shutdown down.
8720  */
tg3_free_consistent(struct tg3 * tp)8721 static void tg3_free_consistent(struct tg3 *tp)
8722 {
8723 	int i;
8724 
8725 	for (i = 0; i < tp->irq_cnt; i++) {
8726 		struct tg3_napi *tnapi = &tp->napi[i];
8727 
8728 		if (tnapi->hw_status) {
8729 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8730 					  tnapi->hw_status,
8731 					  tnapi->status_mapping);
8732 			tnapi->hw_status = NULL;
8733 		}
8734 	}
8735 
8736 	tg3_mem_rx_release(tp);
8737 	tg3_mem_tx_release(tp);
8738 
8739 	/* tp->hw_stats can be referenced safely:
8740 	 *     1. under rtnl_lock
8741 	 *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8742 	 */
8743 	if (tp->hw_stats) {
8744 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8745 				  tp->hw_stats, tp->stats_mapping);
8746 		tp->hw_stats = NULL;
8747 	}
8748 }
8749 
8750 /*
8751  * Must not be invoked with interrupt sources disabled and
8752  * the hardware shutdown down.  Can sleep.
8753  */
tg3_alloc_consistent(struct tg3 * tp)8754 static int tg3_alloc_consistent(struct tg3 *tp)
8755 {
8756 	int i;
8757 
8758 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8759 					  sizeof(struct tg3_hw_stats),
8760 					  &tp->stats_mapping, GFP_KERNEL);
8761 	if (!tp->hw_stats)
8762 		goto err_out;
8763 
8764 	for (i = 0; i < tp->irq_cnt; i++) {
8765 		struct tg3_napi *tnapi = &tp->napi[i];
8766 		struct tg3_hw_status *sblk;
8767 
8768 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8769 						      TG3_HW_STATUS_SIZE,
8770 						      &tnapi->status_mapping,
8771 						      GFP_KERNEL);
8772 		if (!tnapi->hw_status)
8773 			goto err_out;
8774 
8775 		sblk = tnapi->hw_status;
8776 
8777 		if (tg3_flag(tp, ENABLE_RSS)) {
8778 			u16 *prodptr = NULL;
8779 
8780 			/*
8781 			 * When RSS is enabled, the status block format changes
8782 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8783 			 * and "rx_mini_consumer" members get mapped to the
8784 			 * other three rx return ring producer indexes.
8785 			 */
8786 			switch (i) {
8787 			case 1:
8788 				prodptr = &sblk->idx[0].rx_producer;
8789 				break;
8790 			case 2:
8791 				prodptr = &sblk->rx_jumbo_consumer;
8792 				break;
8793 			case 3:
8794 				prodptr = &sblk->reserved;
8795 				break;
8796 			case 4:
8797 				prodptr = &sblk->rx_mini_consumer;
8798 				break;
8799 			}
8800 			tnapi->rx_rcb_prod_idx = prodptr;
8801 		} else {
8802 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8803 		}
8804 	}
8805 
8806 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8807 		goto err_out;
8808 
8809 	return 0;
8810 
8811 err_out:
8812 	tg3_free_consistent(tp);
8813 	return -ENOMEM;
8814 }
8815 
8816 #define MAX_WAIT_CNT 1000
8817 
8818 /* To stop a block, clear the enable bit and poll till it
8819  * clears.  tp->lock is held.
8820  */
tg3_stop_block(struct tg3 * tp,unsigned long ofs,u32 enable_bit,bool silent)8821 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8822 {
8823 	unsigned int i;
8824 	u32 val;
8825 
8826 	if (tg3_flag(tp, 5705_PLUS)) {
8827 		switch (ofs) {
8828 		case RCVLSC_MODE:
8829 		case DMAC_MODE:
8830 		case MBFREE_MODE:
8831 		case BUFMGR_MODE:
8832 		case MEMARB_MODE:
8833 			/* We can't enable/disable these bits of the
8834 			 * 5705/5750, just say success.
8835 			 */
8836 			return 0;
8837 
8838 		default:
8839 			break;
8840 		}
8841 	}
8842 
8843 	val = tr32(ofs);
8844 	val &= ~enable_bit;
8845 	tw32_f(ofs, val);
8846 
8847 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8848 		if (pci_channel_offline(tp->pdev)) {
8849 			dev_err(&tp->pdev->dev,
8850 				"tg3_stop_block device offline, "
8851 				"ofs=%lx enable_bit=%x\n",
8852 				ofs, enable_bit);
8853 			return -ENODEV;
8854 		}
8855 
8856 		udelay(100);
8857 		val = tr32(ofs);
8858 		if ((val & enable_bit) == 0)
8859 			break;
8860 	}
8861 
8862 	if (i == MAX_WAIT_CNT && !silent) {
8863 		dev_err(&tp->pdev->dev,
8864 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8865 			ofs, enable_bit);
8866 		return -ENODEV;
8867 	}
8868 
8869 	return 0;
8870 }
8871 
8872 /* tp->lock is held. */
tg3_abort_hw(struct tg3 * tp,bool silent)8873 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8874 {
8875 	int i, err;
8876 
8877 	tg3_disable_ints(tp);
8878 
8879 	if (pci_channel_offline(tp->pdev)) {
8880 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8881 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8882 		err = -ENODEV;
8883 		goto err_no_dev;
8884 	}
8885 
8886 	tp->rx_mode &= ~RX_MODE_ENABLE;
8887 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8888 	udelay(10);
8889 
8890 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8891 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8892 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8893 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8894 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8895 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8896 
8897 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8898 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8899 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8900 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8901 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8902 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8903 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8904 
8905 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8906 	tw32_f(MAC_MODE, tp->mac_mode);
8907 	udelay(40);
8908 
8909 	tp->tx_mode &= ~TX_MODE_ENABLE;
8910 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8911 
8912 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8913 		udelay(100);
8914 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8915 			break;
8916 	}
8917 	if (i >= MAX_WAIT_CNT) {
8918 		dev_err(&tp->pdev->dev,
8919 			"%s timed out, TX_MODE_ENABLE will not clear "
8920 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8921 		err |= -ENODEV;
8922 	}
8923 
8924 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8925 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8926 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8927 
8928 	tw32(FTQ_RESET, 0xffffffff);
8929 	tw32(FTQ_RESET, 0x00000000);
8930 
8931 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8932 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8933 
8934 err_no_dev:
8935 	for (i = 0; i < tp->irq_cnt; i++) {
8936 		struct tg3_napi *tnapi = &tp->napi[i];
8937 		if (tnapi->hw_status)
8938 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8939 	}
8940 
8941 	return err;
8942 }
8943 
8944 /* Save PCI command register before chip reset */
tg3_save_pci_state(struct tg3 * tp)8945 static void tg3_save_pci_state(struct tg3 *tp)
8946 {
8947 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8948 }
8949 
8950 /* Restore PCI state after chip reset */
tg3_restore_pci_state(struct tg3 * tp)8951 static void tg3_restore_pci_state(struct tg3 *tp)
8952 {
8953 	u32 val;
8954 
8955 	/* Re-enable indirect register accesses. */
8956 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8957 			       tp->misc_host_ctrl);
8958 
8959 	/* Set MAX PCI retry to zero. */
8960 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8961 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8962 	    tg3_flag(tp, PCIX_MODE))
8963 		val |= PCISTATE_RETRY_SAME_DMA;
8964 	/* Allow reads and writes to the APE register and memory space. */
8965 	if (tg3_flag(tp, ENABLE_APE))
8966 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8967 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8968 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8969 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8970 
8971 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8972 
8973 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8974 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8975 				      tp->pci_cacheline_sz);
8976 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8977 				      tp->pci_lat_timer);
8978 	}
8979 
8980 	/* Make sure PCI-X relaxed ordering bit is clear. */
8981 	if (tg3_flag(tp, PCIX_MODE)) {
8982 		u16 pcix_cmd;
8983 
8984 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8985 				     &pcix_cmd);
8986 		pcix_cmd &= ~PCI_X_CMD_ERO;
8987 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8988 				      pcix_cmd);
8989 	}
8990 
8991 	if (tg3_flag(tp, 5780_CLASS)) {
8992 
8993 		/* Chip reset on 5780 will reset MSI enable bit,
8994 		 * so need to restore it.
8995 		 */
8996 		if (tg3_flag(tp, USING_MSI)) {
8997 			u16 ctrl;
8998 
8999 			pci_read_config_word(tp->pdev,
9000 					     tp->msi_cap + PCI_MSI_FLAGS,
9001 					     &ctrl);
9002 			pci_write_config_word(tp->pdev,
9003 					      tp->msi_cap + PCI_MSI_FLAGS,
9004 					      ctrl | PCI_MSI_FLAGS_ENABLE);
9005 			val = tr32(MSGINT_MODE);
9006 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9007 		}
9008 	}
9009 }
9010 
tg3_override_clk(struct tg3 * tp)9011 static void tg3_override_clk(struct tg3 *tp)
9012 {
9013 	u32 val;
9014 
9015 	switch (tg3_asic_rev(tp)) {
9016 	case ASIC_REV_5717:
9017 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9018 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9019 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9020 		break;
9021 
9022 	case ASIC_REV_5719:
9023 	case ASIC_REV_5720:
9024 		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9025 		break;
9026 
9027 	default:
9028 		return;
9029 	}
9030 }
9031 
tg3_restore_clk(struct tg3 * tp)9032 static void tg3_restore_clk(struct tg3 *tp)
9033 {
9034 	u32 val;
9035 
9036 	switch (tg3_asic_rev(tp)) {
9037 	case ASIC_REV_5717:
9038 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9039 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9040 		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9041 		break;
9042 
9043 	case ASIC_REV_5719:
9044 	case ASIC_REV_5720:
9045 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9046 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9047 		break;
9048 
9049 	default:
9050 		return;
9051 	}
9052 }
9053 
9054 /* tp->lock is held. */
tg3_chip_reset(struct tg3 * tp)9055 static int tg3_chip_reset(struct tg3 *tp)
9056 	__releases(tp->lock)
9057 	__acquires(tp->lock)
9058 {
9059 	u32 val;
9060 	void (*write_op)(struct tg3 *, u32, u32);
9061 	int i, err;
9062 
9063 	if (!pci_device_is_present(tp->pdev))
9064 		return -ENODEV;
9065 
9066 	tg3_nvram_lock(tp);
9067 
9068 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9069 
9070 	/* No matching tg3_nvram_unlock() after this because
9071 	 * chip reset below will undo the nvram lock.
9072 	 */
9073 	tp->nvram_lock_cnt = 0;
9074 
9075 	/* GRC_MISC_CFG core clock reset will clear the memory
9076 	 * enable bit in PCI register 4 and the MSI enable bit
9077 	 * on some chips, so we save relevant registers here.
9078 	 */
9079 	tg3_save_pci_state(tp);
9080 
9081 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9082 	    tg3_flag(tp, 5755_PLUS))
9083 		tw32(GRC_FASTBOOT_PC, 0);
9084 
9085 	/*
9086 	 * We must avoid the readl() that normally takes place.
9087 	 * It locks machines, causes machine checks, and other
9088 	 * fun things.  So, temporarily disable the 5701
9089 	 * hardware workaround, while we do the reset.
9090 	 */
9091 	write_op = tp->write32;
9092 	if (write_op == tg3_write_flush_reg32)
9093 		tp->write32 = tg3_write32;
9094 
9095 	/* Prevent the irq handler from reading or writing PCI registers
9096 	 * during chip reset when the memory enable bit in the PCI command
9097 	 * register may be cleared.  The chip does not generate interrupt
9098 	 * at this time, but the irq handler may still be called due to irq
9099 	 * sharing or irqpoll.
9100 	 */
9101 	tg3_flag_set(tp, CHIP_RESETTING);
9102 	for (i = 0; i < tp->irq_cnt; i++) {
9103 		struct tg3_napi *tnapi = &tp->napi[i];
9104 		if (tnapi->hw_status) {
9105 			tnapi->hw_status->status = 0;
9106 			tnapi->hw_status->status_tag = 0;
9107 		}
9108 		tnapi->last_tag = 0;
9109 		tnapi->last_irq_tag = 0;
9110 	}
9111 	smp_mb();
9112 
9113 	tg3_full_unlock(tp);
9114 
9115 	for (i = 0; i < tp->irq_cnt; i++)
9116 		synchronize_irq(tp->napi[i].irq_vec);
9117 
9118 	tg3_full_lock(tp, 0);
9119 
9120 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9121 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9122 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9123 	}
9124 
9125 	/* do the reset */
9126 	val = GRC_MISC_CFG_CORECLK_RESET;
9127 
9128 	if (tg3_flag(tp, PCI_EXPRESS)) {
9129 		/* Force PCIe 1.0a mode */
9130 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9131 		    !tg3_flag(tp, 57765_PLUS) &&
9132 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
9133 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9134 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9135 
9136 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9137 			tw32(GRC_MISC_CFG, (1 << 29));
9138 			val |= (1 << 29);
9139 		}
9140 	}
9141 
9142 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9143 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9144 		tw32(GRC_VCPU_EXT_CTRL,
9145 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9146 	}
9147 
9148 	/* Set the clock to the highest frequency to avoid timeouts. With link
9149 	 * aware mode, the clock speed could be slow and bootcode does not
9150 	 * complete within the expected time. Override the clock to allow the
9151 	 * bootcode to finish sooner and then restore it.
9152 	 */
9153 	tg3_override_clk(tp);
9154 
9155 	/* Manage gphy power for all CPMU absent PCIe devices. */
9156 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9157 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9158 
9159 	tw32(GRC_MISC_CFG, val);
9160 
9161 	/* restore 5701 hardware bug workaround write method */
9162 	tp->write32 = write_op;
9163 
9164 	/* Unfortunately, we have to delay before the PCI read back.
9165 	 * Some 575X chips even will not respond to a PCI cfg access
9166 	 * when the reset command is given to the chip.
9167 	 *
9168 	 * How do these hardware designers expect things to work
9169 	 * properly if the PCI write is posted for a long period
9170 	 * of time?  It is always necessary to have some method by
9171 	 * which a register read back can occur to push the write
9172 	 * out which does the reset.
9173 	 *
9174 	 * For most tg3 variants the trick below was working.
9175 	 * Ho hum...
9176 	 */
9177 	udelay(120);
9178 
9179 	/* Flush PCI posted writes.  The normal MMIO registers
9180 	 * are inaccessible at this time so this is the only
9181 	 * way to make this reliably (actually, this is no longer
9182 	 * the case, see above).  I tried to use indirect
9183 	 * register read/write but this upset some 5701 variants.
9184 	 */
9185 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9186 
9187 	udelay(120);
9188 
9189 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9190 		u16 val16;
9191 
9192 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9193 			int j;
9194 			u32 cfg_val;
9195 
9196 			/* Wait for link training to complete.  */
9197 			for (j = 0; j < 5000; j++)
9198 				udelay(100);
9199 
9200 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9201 			pci_write_config_dword(tp->pdev, 0xc4,
9202 					       cfg_val | (1 << 15));
9203 		}
9204 
9205 		/* Clear the "no snoop" and "relaxed ordering" bits. */
9206 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9207 		/*
9208 		 * Older PCIe devices only support the 128 byte
9209 		 * MPS setting.  Enforce the restriction.
9210 		 */
9211 		if (!tg3_flag(tp, CPMU_PRESENT))
9212 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9213 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9214 
9215 		/* Clear error status */
9216 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9217 				      PCI_EXP_DEVSTA_CED |
9218 				      PCI_EXP_DEVSTA_NFED |
9219 				      PCI_EXP_DEVSTA_FED |
9220 				      PCI_EXP_DEVSTA_URD);
9221 	}
9222 
9223 	tg3_restore_pci_state(tp);
9224 
9225 	tg3_flag_clear(tp, CHIP_RESETTING);
9226 	tg3_flag_clear(tp, ERROR_PROCESSED);
9227 
9228 	val = 0;
9229 	if (tg3_flag(tp, 5780_CLASS))
9230 		val = tr32(MEMARB_MODE);
9231 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9232 
9233 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9234 		tg3_stop_fw(tp);
9235 		tw32(0x5000, 0x400);
9236 	}
9237 
9238 	if (tg3_flag(tp, IS_SSB_CORE)) {
9239 		/*
9240 		 * BCM4785: In order to avoid repercussions from using
9241 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9242 		 * which is not required.
9243 		 */
9244 		tg3_stop_fw(tp);
9245 		tg3_halt_cpu(tp, RX_CPU_BASE);
9246 	}
9247 
9248 	err = tg3_poll_fw(tp);
9249 	if (err)
9250 		return err;
9251 
9252 	tw32(GRC_MODE, tp->grc_mode);
9253 
9254 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9255 		val = tr32(0xc4);
9256 
9257 		tw32(0xc4, val | (1 << 15));
9258 	}
9259 
9260 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9261 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9262 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9263 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9264 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9265 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9266 	}
9267 
9268 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9269 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9270 		val = tp->mac_mode;
9271 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9272 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9273 		val = tp->mac_mode;
9274 	} else
9275 		val = 0;
9276 
9277 	tw32_f(MAC_MODE, val);
9278 	udelay(40);
9279 
9280 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9281 
9282 	tg3_mdio_start(tp);
9283 
9284 	if (tg3_flag(tp, PCI_EXPRESS) &&
9285 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9286 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9287 	    !tg3_flag(tp, 57765_PLUS)) {
9288 		val = tr32(0x7c00);
9289 
9290 		tw32(0x7c00, val | (1 << 25));
9291 	}
9292 
9293 	tg3_restore_clk(tp);
9294 
9295 	/* Increase the core clock speed to fix tx timeout issue for 5762
9296 	 * with 100Mbps link speed.
9297 	 */
9298 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9299 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9300 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9301 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9302 	}
9303 
9304 	/* Reprobe ASF enable state.  */
9305 	tg3_flag_clear(tp, ENABLE_ASF);
9306 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9307 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9308 
9309 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9310 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9311 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9312 		u32 nic_cfg;
9313 
9314 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9315 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9316 			tg3_flag_set(tp, ENABLE_ASF);
9317 			tp->last_event_jiffies = jiffies;
9318 			if (tg3_flag(tp, 5750_PLUS))
9319 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9320 
9321 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9322 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9323 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9324 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9325 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9326 		}
9327 	}
9328 
9329 	return 0;
9330 }
9331 
9332 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9333 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9334 static void __tg3_set_rx_mode(struct net_device *);
9335 
9336 /* tp->lock is held. */
tg3_halt(struct tg3 * tp,int kind,bool silent)9337 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9338 {
9339 	int err, i;
9340 
9341 	tg3_stop_fw(tp);
9342 
9343 	tg3_write_sig_pre_reset(tp, kind);
9344 
9345 	tg3_abort_hw(tp, silent);
9346 	err = tg3_chip_reset(tp);
9347 
9348 	__tg3_set_mac_addr(tp, false);
9349 
9350 	tg3_write_sig_legacy(tp, kind);
9351 	tg3_write_sig_post_reset(tp, kind);
9352 
9353 	if (tp->hw_stats) {
9354 		/* Save the stats across chip resets... */
9355 		tg3_get_nstats(tp, &tp->net_stats_prev);
9356 		tg3_get_estats(tp, &tp->estats_prev);
9357 
9358 		/* And make sure the next sample is new data */
9359 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9360 
9361 		for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
9362 			struct tg3_napi *tnapi = &tp->napi[i];
9363 
9364 			tnapi->rx_dropped = 0;
9365 			tnapi->tx_dropped = 0;
9366 		}
9367 	}
9368 
9369 	return err;
9370 }
9371 
tg3_set_mac_addr(struct net_device * dev,void * p)9372 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9373 {
9374 	struct tg3 *tp = netdev_priv(dev);
9375 	struct sockaddr *addr = p;
9376 	int err = 0;
9377 	bool skip_mac_1 = false;
9378 
9379 	if (!is_valid_ether_addr(addr->sa_data))
9380 		return -EADDRNOTAVAIL;
9381 
9382 	eth_hw_addr_set(dev, addr->sa_data);
9383 
9384 	if (!netif_running(dev))
9385 		return 0;
9386 
9387 	if (tg3_flag(tp, ENABLE_ASF)) {
9388 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9389 
9390 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9391 		addr0_low = tr32(MAC_ADDR_0_LOW);
9392 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9393 		addr1_low = tr32(MAC_ADDR_1_LOW);
9394 
9395 		/* Skip MAC addr 1 if ASF is using it. */
9396 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9397 		    !(addr1_high == 0 && addr1_low == 0))
9398 			skip_mac_1 = true;
9399 	}
9400 	spin_lock_bh(&tp->lock);
9401 	__tg3_set_mac_addr(tp, skip_mac_1);
9402 	__tg3_set_rx_mode(dev);
9403 	spin_unlock_bh(&tp->lock);
9404 
9405 	return err;
9406 }
9407 
9408 /* tp->lock is held. */
tg3_set_bdinfo(struct tg3 * tp,u32 bdinfo_addr,dma_addr_t mapping,u32 maxlen_flags,u32 nic_addr)9409 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9410 			   dma_addr_t mapping, u32 maxlen_flags,
9411 			   u32 nic_addr)
9412 {
9413 	tg3_write_mem(tp,
9414 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9415 		      ((u64) mapping >> 32));
9416 	tg3_write_mem(tp,
9417 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9418 		      ((u64) mapping & 0xffffffff));
9419 	tg3_write_mem(tp,
9420 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9421 		       maxlen_flags);
9422 
9423 	if (!tg3_flag(tp, 5705_PLUS))
9424 		tg3_write_mem(tp,
9425 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9426 			      nic_addr);
9427 }
9428 
9429 
tg3_coal_tx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9430 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9431 {
9432 	int i = 0;
9433 
9434 	if (!tg3_flag(tp, ENABLE_TSS)) {
9435 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9436 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9437 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9438 	} else {
9439 		tw32(HOSTCC_TXCOL_TICKS, 0);
9440 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9441 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9442 
9443 		for (; i < tp->txq_cnt; i++) {
9444 			u32 reg;
9445 
9446 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9447 			tw32(reg, ec->tx_coalesce_usecs);
9448 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9449 			tw32(reg, ec->tx_max_coalesced_frames);
9450 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9451 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9452 		}
9453 	}
9454 
9455 	for (; i < tp->irq_max - 1; i++) {
9456 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9457 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9458 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9459 	}
9460 }
9461 
tg3_coal_rx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9462 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9463 {
9464 	int i = 0;
9465 	u32 limit = tp->rxq_cnt;
9466 
9467 	if (!tg3_flag(tp, ENABLE_RSS)) {
9468 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9469 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9470 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9471 		limit--;
9472 	} else {
9473 		tw32(HOSTCC_RXCOL_TICKS, 0);
9474 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9475 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9476 	}
9477 
9478 	for (; i < limit; i++) {
9479 		u32 reg;
9480 
9481 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9482 		tw32(reg, ec->rx_coalesce_usecs);
9483 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9484 		tw32(reg, ec->rx_max_coalesced_frames);
9485 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9486 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9487 	}
9488 
9489 	for (; i < tp->irq_max - 1; i++) {
9490 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9491 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9492 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9493 	}
9494 }
9495 
__tg3_set_coalesce(struct tg3 * tp,struct ethtool_coalesce * ec)9496 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9497 {
9498 	tg3_coal_tx_init(tp, ec);
9499 	tg3_coal_rx_init(tp, ec);
9500 
9501 	if (!tg3_flag(tp, 5705_PLUS)) {
9502 		u32 val = ec->stats_block_coalesce_usecs;
9503 
9504 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9505 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9506 
9507 		if (!tp->link_up)
9508 			val = 0;
9509 
9510 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9511 	}
9512 }
9513 
9514 /* tp->lock is held. */
tg3_tx_rcbs_disable(struct tg3 * tp)9515 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9516 {
9517 	u32 txrcb, limit;
9518 
9519 	/* Disable all transmit rings but the first. */
9520 	if (!tg3_flag(tp, 5705_PLUS))
9521 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9522 	else if (tg3_flag(tp, 5717_PLUS))
9523 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9524 	else if (tg3_flag(tp, 57765_CLASS) ||
9525 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9526 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9527 	else
9528 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9529 
9530 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9531 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9532 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9533 			      BDINFO_FLAGS_DISABLED);
9534 }
9535 
9536 /* tp->lock is held. */
tg3_tx_rcbs_init(struct tg3 * tp)9537 static void tg3_tx_rcbs_init(struct tg3 *tp)
9538 {
9539 	int i = 0;
9540 	u32 txrcb = NIC_SRAM_SEND_RCB;
9541 
9542 	if (tg3_flag(tp, ENABLE_TSS))
9543 		i++;
9544 
9545 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9546 		struct tg3_napi *tnapi = &tp->napi[i];
9547 
9548 		if (!tnapi->tx_ring)
9549 			continue;
9550 
9551 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9552 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9553 			       NIC_SRAM_TX_BUFFER_DESC);
9554 	}
9555 }
9556 
9557 /* tp->lock is held. */
tg3_rx_ret_rcbs_disable(struct tg3 * tp)9558 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9559 {
9560 	u32 rxrcb, limit;
9561 
9562 	/* Disable all receive return rings but the first. */
9563 	if (tg3_flag(tp, 5717_PLUS))
9564 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9565 	else if (!tg3_flag(tp, 5705_PLUS))
9566 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9567 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9568 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9569 		 tg3_flag(tp, 57765_CLASS))
9570 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9571 	else
9572 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9573 
9574 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9575 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9576 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9577 			      BDINFO_FLAGS_DISABLED);
9578 }
9579 
9580 /* tp->lock is held. */
tg3_rx_ret_rcbs_init(struct tg3 * tp)9581 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9582 {
9583 	int i = 0;
9584 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9585 
9586 	if (tg3_flag(tp, ENABLE_RSS))
9587 		i++;
9588 
9589 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9590 		struct tg3_napi *tnapi = &tp->napi[i];
9591 
9592 		if (!tnapi->rx_rcb)
9593 			continue;
9594 
9595 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9596 			       (tp->rx_ret_ring_mask + 1) <<
9597 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9598 	}
9599 }
9600 
9601 /* tp->lock is held. */
tg3_rings_reset(struct tg3 * tp)9602 static void tg3_rings_reset(struct tg3 *tp)
9603 {
9604 	int i;
9605 	u32 stblk;
9606 	struct tg3_napi *tnapi = &tp->napi[0];
9607 
9608 	tg3_tx_rcbs_disable(tp);
9609 
9610 	tg3_rx_ret_rcbs_disable(tp);
9611 
9612 	/* Disable interrupts */
9613 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9614 	tp->napi[0].chk_msi_cnt = 0;
9615 	tp->napi[0].last_rx_cons = 0;
9616 	tp->napi[0].last_tx_cons = 0;
9617 
9618 	/* Zero mailbox registers. */
9619 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9620 		for (i = 1; i < tp->irq_max; i++) {
9621 			tp->napi[i].tx_prod = 0;
9622 			tp->napi[i].tx_cons = 0;
9623 			if (tg3_flag(tp, ENABLE_TSS))
9624 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9625 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9626 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9627 			tp->napi[i].chk_msi_cnt = 0;
9628 			tp->napi[i].last_rx_cons = 0;
9629 			tp->napi[i].last_tx_cons = 0;
9630 		}
9631 		if (!tg3_flag(tp, ENABLE_TSS))
9632 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9633 	} else {
9634 		tp->napi[0].tx_prod = 0;
9635 		tp->napi[0].tx_cons = 0;
9636 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9637 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9638 	}
9639 
9640 	/* Make sure the NIC-based send BD rings are disabled. */
9641 	if (!tg3_flag(tp, 5705_PLUS)) {
9642 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9643 		for (i = 0; i < 16; i++)
9644 			tw32_tx_mbox(mbox + i * 8, 0);
9645 	}
9646 
9647 	/* Clear status block in ram. */
9648 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9649 
9650 	/* Set status block DMA address */
9651 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9652 	     ((u64) tnapi->status_mapping >> 32));
9653 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9654 	     ((u64) tnapi->status_mapping & 0xffffffff));
9655 
9656 	stblk = HOSTCC_STATBLCK_RING1;
9657 
9658 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9659 		u64 mapping = (u64)tnapi->status_mapping;
9660 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9661 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9662 		stblk += 8;
9663 
9664 		/* Clear status block in ram. */
9665 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9666 	}
9667 
9668 	tg3_tx_rcbs_init(tp);
9669 	tg3_rx_ret_rcbs_init(tp);
9670 }
9671 
tg3_setup_rxbd_thresholds(struct tg3 * tp)9672 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9673 {
9674 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9675 
9676 	if (!tg3_flag(tp, 5750_PLUS) ||
9677 	    tg3_flag(tp, 5780_CLASS) ||
9678 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9679 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9680 	    tg3_flag(tp, 57765_PLUS))
9681 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9682 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9683 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9684 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9685 	else
9686 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9687 
9688 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9689 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9690 
9691 	val = min(nic_rep_thresh, host_rep_thresh);
9692 	tw32(RCVBDI_STD_THRESH, val);
9693 
9694 	if (tg3_flag(tp, 57765_PLUS))
9695 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9696 
9697 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9698 		return;
9699 
9700 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9701 
9702 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9703 
9704 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9705 	tw32(RCVBDI_JUMBO_THRESH, val);
9706 
9707 	if (tg3_flag(tp, 57765_PLUS))
9708 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9709 }
9710 
calc_crc(unsigned char * buf,int len)9711 static inline u32 calc_crc(unsigned char *buf, int len)
9712 {
9713 	u32 reg;
9714 	u32 tmp;
9715 	int j, k;
9716 
9717 	reg = 0xffffffff;
9718 
9719 	for (j = 0; j < len; j++) {
9720 		reg ^= buf[j];
9721 
9722 		for (k = 0; k < 8; k++) {
9723 			tmp = reg & 0x01;
9724 
9725 			reg >>= 1;
9726 
9727 			if (tmp)
9728 				reg ^= CRC32_POLY_LE;
9729 		}
9730 	}
9731 
9732 	return ~reg;
9733 }
9734 
tg3_set_multi(struct tg3 * tp,unsigned int accept_all)9735 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9736 {
9737 	/* accept or reject all multicast frames */
9738 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9739 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9740 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9741 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9742 }
9743 
__tg3_set_rx_mode(struct net_device * dev)9744 static void __tg3_set_rx_mode(struct net_device *dev)
9745 {
9746 	struct tg3 *tp = netdev_priv(dev);
9747 	u32 rx_mode;
9748 
9749 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9750 				  RX_MODE_KEEP_VLAN_TAG);
9751 
9752 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9753 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9754 	 * flag clear.
9755 	 */
9756 	if (!tg3_flag(tp, ENABLE_ASF))
9757 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9758 #endif
9759 
9760 	if (dev->flags & IFF_PROMISC) {
9761 		/* Promiscuous mode. */
9762 		rx_mode |= RX_MODE_PROMISC;
9763 	} else if (dev->flags & IFF_ALLMULTI) {
9764 		/* Accept all multicast. */
9765 		tg3_set_multi(tp, 1);
9766 	} else if (netdev_mc_empty(dev)) {
9767 		/* Reject all multicast. */
9768 		tg3_set_multi(tp, 0);
9769 	} else {
9770 		/* Accept one or more multicast(s). */
9771 		struct netdev_hw_addr *ha;
9772 		u32 mc_filter[4] = { 0, };
9773 		u32 regidx;
9774 		u32 bit;
9775 		u32 crc;
9776 
9777 		netdev_for_each_mc_addr(ha, dev) {
9778 			crc = calc_crc(ha->addr, ETH_ALEN);
9779 			bit = ~crc & 0x7f;
9780 			regidx = (bit & 0x60) >> 5;
9781 			bit &= 0x1f;
9782 			mc_filter[regidx] |= (1 << bit);
9783 		}
9784 
9785 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9786 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9787 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9788 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9789 	}
9790 
9791 	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9792 		rx_mode |= RX_MODE_PROMISC;
9793 	} else if (!(dev->flags & IFF_PROMISC)) {
9794 		/* Add all entries into to the mac addr filter list */
9795 		int i = 0;
9796 		struct netdev_hw_addr *ha;
9797 
9798 		netdev_for_each_uc_addr(ha, dev) {
9799 			__tg3_set_one_mac_addr(tp, ha->addr,
9800 					       i + TG3_UCAST_ADDR_IDX(tp));
9801 			i++;
9802 		}
9803 	}
9804 
9805 	if (rx_mode != tp->rx_mode) {
9806 		tp->rx_mode = rx_mode;
9807 		tw32_f(MAC_RX_MODE, rx_mode);
9808 		udelay(10);
9809 	}
9810 }
9811 
tg3_rss_init_dflt_indir_tbl(struct tg3 * tp,u32 qcnt)9812 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9813 {
9814 	int i;
9815 
9816 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9817 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9818 }
9819 
tg3_rss_check_indir_tbl(struct tg3 * tp)9820 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9821 {
9822 	int i;
9823 
9824 	if (!tg3_flag(tp, SUPPORT_MSIX))
9825 		return;
9826 
9827 	if (tp->rxq_cnt == 1) {
9828 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9829 		return;
9830 	}
9831 
9832 	/* Validate table against current IRQ count */
9833 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9834 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9835 			break;
9836 	}
9837 
9838 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9839 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9840 }
9841 
tg3_rss_write_indir_tbl(struct tg3 * tp)9842 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9843 {
9844 	int i = 0;
9845 	u32 reg = MAC_RSS_INDIR_TBL_0;
9846 
9847 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9848 		u32 val = tp->rss_ind_tbl[i];
9849 		i++;
9850 		for (; i % 8; i++) {
9851 			val <<= 4;
9852 			val |= tp->rss_ind_tbl[i];
9853 		}
9854 		tw32(reg, val);
9855 		reg += 4;
9856 	}
9857 }
9858 
tg3_lso_rd_dma_workaround_bit(struct tg3 * tp)9859 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9860 {
9861 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9862 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9863 	else
9864 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9865 }
9866 
9867 /* tp->lock is held. */
tg3_reset_hw(struct tg3 * tp,bool reset_phy)9868 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9869 {
9870 	u32 val, rdmac_mode;
9871 	int i, err, limit;
9872 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9873 
9874 	tg3_disable_ints(tp);
9875 
9876 	tg3_stop_fw(tp);
9877 
9878 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9879 
9880 	if (tg3_flag(tp, INIT_COMPLETE))
9881 		tg3_abort_hw(tp, 1);
9882 
9883 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9884 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9885 		tg3_phy_pull_config(tp);
9886 		tg3_eee_pull_config(tp, NULL);
9887 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9888 	}
9889 
9890 	/* Enable MAC control of LPI */
9891 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9892 		tg3_setup_eee(tp);
9893 
9894 	if (reset_phy)
9895 		tg3_phy_reset(tp);
9896 
9897 	err = tg3_chip_reset(tp);
9898 	if (err)
9899 		return err;
9900 
9901 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9902 
9903 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9904 		val = tr32(TG3_CPMU_CTRL);
9905 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9906 		tw32(TG3_CPMU_CTRL, val);
9907 
9908 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9909 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9910 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9911 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9912 
9913 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9914 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9915 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9916 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9917 
9918 		val = tr32(TG3_CPMU_HST_ACC);
9919 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9920 		val |= CPMU_HST_ACC_MACCLK_6_25;
9921 		tw32(TG3_CPMU_HST_ACC, val);
9922 	}
9923 
9924 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9925 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9926 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9927 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9928 		tw32(PCIE_PWR_MGMT_THRESH, val);
9929 
9930 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9931 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9932 
9933 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9934 
9935 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9936 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9937 	}
9938 
9939 	if (tg3_flag(tp, L1PLLPD_EN)) {
9940 		u32 grc_mode = tr32(GRC_MODE);
9941 
9942 		/* Access the lower 1K of PL PCIE block registers. */
9943 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9944 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9945 
9946 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9947 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9948 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9949 
9950 		tw32(GRC_MODE, grc_mode);
9951 	}
9952 
9953 	if (tg3_flag(tp, 57765_CLASS)) {
9954 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9955 			u32 grc_mode = tr32(GRC_MODE);
9956 
9957 			/* Access the lower 1K of PL PCIE block registers. */
9958 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9959 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9960 
9961 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9962 				   TG3_PCIE_PL_LO_PHYCTL5);
9963 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9964 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9965 
9966 			tw32(GRC_MODE, grc_mode);
9967 		}
9968 
9969 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9970 			u32 grc_mode;
9971 
9972 			/* Fix transmit hangs */
9973 			val = tr32(TG3_CPMU_PADRNG_CTL);
9974 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9975 			tw32(TG3_CPMU_PADRNG_CTL, val);
9976 
9977 			grc_mode = tr32(GRC_MODE);
9978 
9979 			/* Access the lower 1K of DL PCIE block registers. */
9980 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9981 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9982 
9983 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9984 				   TG3_PCIE_DL_LO_FTSMAX);
9985 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9986 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9987 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9988 
9989 			tw32(GRC_MODE, grc_mode);
9990 		}
9991 
9992 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9993 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9994 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9995 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9996 	}
9997 
9998 	/* This works around an issue with Athlon chipsets on
9999 	 * B3 tigon3 silicon.  This bit has no effect on any
10000 	 * other revision.  But do not set this on PCI Express
10001 	 * chips and don't even touch the clocks if the CPMU is present.
10002 	 */
10003 	if (!tg3_flag(tp, CPMU_PRESENT)) {
10004 		if (!tg3_flag(tp, PCI_EXPRESS))
10005 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10006 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10007 	}
10008 
10009 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10010 	    tg3_flag(tp, PCIX_MODE)) {
10011 		val = tr32(TG3PCI_PCISTATE);
10012 		val |= PCISTATE_RETRY_SAME_DMA;
10013 		tw32(TG3PCI_PCISTATE, val);
10014 	}
10015 
10016 	if (tg3_flag(tp, ENABLE_APE)) {
10017 		/* Allow reads and writes to the
10018 		 * APE register and memory space.
10019 		 */
10020 		val = tr32(TG3PCI_PCISTATE);
10021 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10022 		       PCISTATE_ALLOW_APE_SHMEM_WR |
10023 		       PCISTATE_ALLOW_APE_PSPACE_WR;
10024 		tw32(TG3PCI_PCISTATE, val);
10025 	}
10026 
10027 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10028 		/* Enable some hw fixes.  */
10029 		val = tr32(TG3PCI_MSI_DATA);
10030 		val |= (1 << 26) | (1 << 28) | (1 << 29);
10031 		tw32(TG3PCI_MSI_DATA, val);
10032 	}
10033 
10034 	/* Descriptor ring init may make accesses to the
10035 	 * NIC SRAM area to setup the TX descriptors, so we
10036 	 * can only do this after the hardware has been
10037 	 * successfully reset.
10038 	 */
10039 	err = tg3_init_rings(tp);
10040 	if (err)
10041 		return err;
10042 
10043 	if (tg3_flag(tp, 57765_PLUS)) {
10044 		val = tr32(TG3PCI_DMA_RW_CTRL) &
10045 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10046 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10047 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10048 		if (!tg3_flag(tp, 57765_CLASS) &&
10049 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10050 		    tg3_asic_rev(tp) != ASIC_REV_5762)
10051 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
10052 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10053 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10054 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
10055 		/* This value is determined during the probe time DMA
10056 		 * engine test, tg3_test_dma.
10057 		 */
10058 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10059 	}
10060 
10061 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10062 			  GRC_MODE_4X_NIC_SEND_RINGS |
10063 			  GRC_MODE_NO_TX_PHDR_CSUM |
10064 			  GRC_MODE_NO_RX_PHDR_CSUM);
10065 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10066 
10067 	/* Pseudo-header checksum is done by hardware logic and not
10068 	 * the offload processers, so make the chip do the pseudo-
10069 	 * header checksums on receive.  For transmit it is more
10070 	 * convenient to do the pseudo-header checksum in software
10071 	 * as Linux does that on transmit for us in all cases.
10072 	 */
10073 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10074 
10075 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10076 	if (tp->rxptpctl)
10077 		tw32(TG3_RX_PTP_CTL,
10078 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10079 
10080 	if (tg3_flag(tp, PTP_CAPABLE))
10081 		val |= GRC_MODE_TIME_SYNC_ENABLE;
10082 
10083 	tw32(GRC_MODE, tp->grc_mode | val);
10084 
10085 	/* On one of the AMD platform, MRRS is restricted to 4000 because of
10086 	 * south bridge limitation. As a workaround, Driver is setting MRRS
10087 	 * to 2048 instead of default 4096.
10088 	 */
10089 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10090 	    tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10091 		val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10092 		tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10093 	}
10094 
10095 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
10096 	val = tr32(GRC_MISC_CFG);
10097 	val &= ~0xff;
10098 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10099 	tw32(GRC_MISC_CFG, val);
10100 
10101 	/* Initialize MBUF/DESC pool. */
10102 	if (tg3_flag(tp, 5750_PLUS)) {
10103 		/* Do nothing.  */
10104 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10105 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10106 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
10107 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10108 		else
10109 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10110 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10111 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10112 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
10113 		int fw_len;
10114 
10115 		fw_len = tp->fw_len;
10116 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10117 		tw32(BUFMGR_MB_POOL_ADDR,
10118 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10119 		tw32(BUFMGR_MB_POOL_SIZE,
10120 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10121 	}
10122 
10123 	if (tp->dev->mtu <= ETH_DATA_LEN) {
10124 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10125 		     tp->bufmgr_config.mbuf_read_dma_low_water);
10126 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10127 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
10128 		tw32(BUFMGR_MB_HIGH_WATER,
10129 		     tp->bufmgr_config.mbuf_high_water);
10130 	} else {
10131 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10132 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10133 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10134 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10135 		tw32(BUFMGR_MB_HIGH_WATER,
10136 		     tp->bufmgr_config.mbuf_high_water_jumbo);
10137 	}
10138 	tw32(BUFMGR_DMA_LOW_WATER,
10139 	     tp->bufmgr_config.dma_low_water);
10140 	tw32(BUFMGR_DMA_HIGH_WATER,
10141 	     tp->bufmgr_config.dma_high_water);
10142 
10143 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10144 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
10145 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10146 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10147 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
10148 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10149 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10150 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10151 	tw32(BUFMGR_MODE, val);
10152 	for (i = 0; i < 2000; i++) {
10153 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10154 			break;
10155 		udelay(10);
10156 	}
10157 	if (i >= 2000) {
10158 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10159 		return -ENODEV;
10160 	}
10161 
10162 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10163 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10164 
10165 	tg3_setup_rxbd_thresholds(tp);
10166 
10167 	/* Initialize TG3_BDINFO's at:
10168 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
10169 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
10170 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
10171 	 *
10172 	 * like so:
10173 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
10174 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
10175 	 *                              ring attribute flags
10176 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
10177 	 *
10178 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10179 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10180 	 *
10181 	 * The size of each ring is fixed in the firmware, but the location is
10182 	 * configurable.
10183 	 */
10184 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10185 	     ((u64) tpr->rx_std_mapping >> 32));
10186 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10187 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10188 	if (!tg3_flag(tp, 5717_PLUS))
10189 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10190 		     NIC_SRAM_RX_BUFFER_DESC);
10191 
10192 	/* Disable the mini ring */
10193 	if (!tg3_flag(tp, 5705_PLUS))
10194 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10195 		     BDINFO_FLAGS_DISABLED);
10196 
10197 	/* Program the jumbo buffer descriptor ring control
10198 	 * blocks on those devices that have them.
10199 	 */
10200 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10201 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10202 
10203 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10204 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10205 			     ((u64) tpr->rx_jmb_mapping >> 32));
10206 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10207 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10208 			val = TG3_RX_JMB_RING_SIZE(tp) <<
10209 			      BDINFO_FLAGS_MAXLEN_SHIFT;
10210 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10211 			     val | BDINFO_FLAGS_USE_EXT_RECV);
10212 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10213 			    tg3_flag(tp, 57765_CLASS) ||
10214 			    tg3_asic_rev(tp) == ASIC_REV_5762)
10215 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10216 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10217 		} else {
10218 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10219 			     BDINFO_FLAGS_DISABLED);
10220 		}
10221 
10222 		if (tg3_flag(tp, 57765_PLUS)) {
10223 			val = TG3_RX_STD_RING_SIZE(tp);
10224 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10225 			val |= (TG3_RX_STD_DMA_SZ << 2);
10226 		} else
10227 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10228 	} else
10229 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10230 
10231 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10232 
10233 	tpr->rx_std_prod_idx = tp->rx_pending;
10234 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10235 
10236 	tpr->rx_jmb_prod_idx =
10237 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10238 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10239 
10240 	tg3_rings_reset(tp);
10241 
10242 	/* Initialize MAC address and backoff seed. */
10243 	__tg3_set_mac_addr(tp, false);
10244 
10245 	/* MTU + ethernet header + FCS + optional VLAN tag */
10246 	tw32(MAC_RX_MTU_SIZE,
10247 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10248 
10249 	/* The slot time is changed by tg3_setup_phy if we
10250 	 * run at gigabit with half duplex.
10251 	 */
10252 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10253 	      (6 << TX_LENGTHS_IPG_SHIFT) |
10254 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10255 
10256 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10257 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10258 		val |= tr32(MAC_TX_LENGTHS) &
10259 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10260 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10261 
10262 	tw32(MAC_TX_LENGTHS, val);
10263 
10264 	/* Receive rules. */
10265 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10266 	tw32(RCVLPC_CONFIG, 0x0181);
10267 
10268 	/* Calculate RDMAC_MODE setting early, we need it to determine
10269 	 * the RCVLPC_STATE_ENABLE mask.
10270 	 */
10271 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10272 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10273 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10274 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10275 		      RDMAC_MODE_LNGREAD_ENAB);
10276 
10277 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10278 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10279 
10280 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10281 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10282 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10283 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10284 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10285 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10286 
10287 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10288 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10289 		if (tg3_flag(tp, TSO_CAPABLE)) {
10290 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10291 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10292 			   !tg3_flag(tp, IS_5788)) {
10293 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10294 		}
10295 	}
10296 
10297 	if (tg3_flag(tp, PCI_EXPRESS))
10298 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10299 
10300 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10301 		tp->dma_limit = 0;
10302 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10303 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10304 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10305 		}
10306 	}
10307 
10308 	if (tg3_flag(tp, HW_TSO_1) ||
10309 	    tg3_flag(tp, HW_TSO_2) ||
10310 	    tg3_flag(tp, HW_TSO_3))
10311 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10312 
10313 	if (tg3_flag(tp, 57765_PLUS) ||
10314 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10315 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10316 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10317 
10318 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10319 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10320 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10321 
10322 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10323 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10324 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10325 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10326 	    tg3_flag(tp, 57765_PLUS)) {
10327 		u32 tgtreg;
10328 
10329 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10330 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10331 		else
10332 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10333 
10334 		val = tr32(tgtreg);
10335 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10336 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10337 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10338 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10339 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10340 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10341 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10342 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10343 		}
10344 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10345 	}
10346 
10347 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10348 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10349 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10350 		u32 tgtreg;
10351 
10352 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10353 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10354 		else
10355 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10356 
10357 		val = tr32(tgtreg);
10358 		tw32(tgtreg, val |
10359 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10360 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10361 	}
10362 
10363 	/* Receive/send statistics. */
10364 	if (tg3_flag(tp, 5750_PLUS)) {
10365 		val = tr32(RCVLPC_STATS_ENABLE);
10366 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10367 		tw32(RCVLPC_STATS_ENABLE, val);
10368 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10369 		   tg3_flag(tp, TSO_CAPABLE)) {
10370 		val = tr32(RCVLPC_STATS_ENABLE);
10371 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10372 		tw32(RCVLPC_STATS_ENABLE, val);
10373 	} else {
10374 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10375 	}
10376 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10377 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10378 	tw32(SNDDATAI_STATSCTRL,
10379 	     (SNDDATAI_SCTRL_ENABLE |
10380 	      SNDDATAI_SCTRL_FASTUPD));
10381 
10382 	/* Setup host coalescing engine. */
10383 	tw32(HOSTCC_MODE, 0);
10384 	for (i = 0; i < 2000; i++) {
10385 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10386 			break;
10387 		udelay(10);
10388 	}
10389 
10390 	__tg3_set_coalesce(tp, &tp->coal);
10391 
10392 	if (!tg3_flag(tp, 5705_PLUS)) {
10393 		/* Status/statistics block address.  See tg3_timer,
10394 		 * the tg3_periodic_fetch_stats call there, and
10395 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10396 		 */
10397 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10398 		     ((u64) tp->stats_mapping >> 32));
10399 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10400 		     ((u64) tp->stats_mapping & 0xffffffff));
10401 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10402 
10403 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10404 
10405 		/* Clear statistics and status block memory areas */
10406 		for (i = NIC_SRAM_STATS_BLK;
10407 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10408 		     i += sizeof(u32)) {
10409 			tg3_write_mem(tp, i, 0);
10410 			udelay(40);
10411 		}
10412 	}
10413 
10414 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10415 
10416 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10417 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10418 	if (!tg3_flag(tp, 5705_PLUS))
10419 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10420 
10421 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10422 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10423 		/* reset to prevent losing 1st rx packet intermittently */
10424 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10425 		udelay(10);
10426 	}
10427 
10428 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10429 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10430 			MAC_MODE_FHDE_ENABLE;
10431 	if (tg3_flag(tp, ENABLE_APE))
10432 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10433 	if (!tg3_flag(tp, 5705_PLUS) &&
10434 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10435 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10436 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10437 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10438 	udelay(40);
10439 
10440 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10441 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10442 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10443 	 * whether used as inputs or outputs, are set by boot code after
10444 	 * reset.
10445 	 */
10446 	if (!tg3_flag(tp, IS_NIC)) {
10447 		u32 gpio_mask;
10448 
10449 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10450 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10451 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10452 
10453 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10454 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10455 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10456 
10457 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10458 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10459 
10460 		tp->grc_local_ctrl &= ~gpio_mask;
10461 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10462 
10463 		/* GPIO1 must be driven high for eeprom write protect */
10464 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10465 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10466 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10467 	}
10468 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10469 	udelay(100);
10470 
10471 	if (tg3_flag(tp, USING_MSIX)) {
10472 		val = tr32(MSGINT_MODE);
10473 		val |= MSGINT_MODE_ENABLE;
10474 		if (tp->irq_cnt > 1)
10475 			val |= MSGINT_MODE_MULTIVEC_EN;
10476 		if (!tg3_flag(tp, 1SHOT_MSI))
10477 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10478 		tw32(MSGINT_MODE, val);
10479 	}
10480 
10481 	if (!tg3_flag(tp, 5705_PLUS)) {
10482 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10483 		udelay(40);
10484 	}
10485 
10486 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10487 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10488 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10489 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10490 	       WDMAC_MODE_LNGREAD_ENAB);
10491 
10492 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10493 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10494 		if (tg3_flag(tp, TSO_CAPABLE) &&
10495 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10496 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10497 			/* nothing */
10498 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10499 			   !tg3_flag(tp, IS_5788)) {
10500 			val |= WDMAC_MODE_RX_ACCEL;
10501 		}
10502 	}
10503 
10504 	/* Enable host coalescing bug fix */
10505 	if (tg3_flag(tp, 5755_PLUS))
10506 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10507 
10508 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10509 		val |= WDMAC_MODE_BURST_ALL_DATA;
10510 
10511 	tw32_f(WDMAC_MODE, val);
10512 	udelay(40);
10513 
10514 	if (tg3_flag(tp, PCIX_MODE)) {
10515 		u16 pcix_cmd;
10516 
10517 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10518 				     &pcix_cmd);
10519 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10520 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10521 			pcix_cmd |= PCI_X_CMD_READ_2K;
10522 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10523 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10524 			pcix_cmd |= PCI_X_CMD_READ_2K;
10525 		}
10526 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10527 				      pcix_cmd);
10528 	}
10529 
10530 	tw32_f(RDMAC_MODE, rdmac_mode);
10531 	udelay(40);
10532 
10533 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10534 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10535 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10536 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10537 				break;
10538 		}
10539 		if (i < TG3_NUM_RDMA_CHANNELS) {
10540 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10541 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10542 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10543 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10544 		}
10545 	}
10546 
10547 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10548 	if (!tg3_flag(tp, 5705_PLUS))
10549 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10550 
10551 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10552 		tw32(SNDDATAC_MODE,
10553 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10554 	else
10555 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10556 
10557 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10558 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10559 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10560 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10561 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10562 	tw32(RCVDBDI_MODE, val);
10563 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10564 	if (tg3_flag(tp, HW_TSO_1) ||
10565 	    tg3_flag(tp, HW_TSO_2) ||
10566 	    tg3_flag(tp, HW_TSO_3))
10567 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10568 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10569 	if (tg3_flag(tp, ENABLE_TSS))
10570 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10571 	tw32(SNDBDI_MODE, val);
10572 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10573 
10574 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10575 		err = tg3_load_5701_a0_firmware_fix(tp);
10576 		if (err)
10577 			return err;
10578 	}
10579 
10580 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10581 		/* Ignore any errors for the firmware download. If download
10582 		 * fails, the device will operate with EEE disabled
10583 		 */
10584 		tg3_load_57766_firmware(tp);
10585 	}
10586 
10587 	if (tg3_flag(tp, TSO_CAPABLE)) {
10588 		err = tg3_load_tso_firmware(tp);
10589 		if (err)
10590 			return err;
10591 	}
10592 
10593 	tp->tx_mode = TX_MODE_ENABLE;
10594 
10595 	if (tg3_flag(tp, 5755_PLUS) ||
10596 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10597 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10598 
10599 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10600 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10601 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10602 		tp->tx_mode &= ~val;
10603 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10604 	}
10605 
10606 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10607 	udelay(100);
10608 
10609 	if (tg3_flag(tp, ENABLE_RSS)) {
10610 		u32 rss_key[10];
10611 
10612 		tg3_rss_write_indir_tbl(tp);
10613 
10614 		netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10615 
10616 		for (i = 0; i < 10 ; i++)
10617 			tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10618 	}
10619 
10620 	tp->rx_mode = RX_MODE_ENABLE;
10621 	if (tg3_flag(tp, 5755_PLUS))
10622 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10623 
10624 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10625 		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10626 
10627 	if (tg3_flag(tp, ENABLE_RSS))
10628 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10629 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10630 			       RX_MODE_RSS_IPV6_HASH_EN |
10631 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10632 			       RX_MODE_RSS_IPV4_HASH_EN |
10633 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10634 
10635 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10636 	udelay(10);
10637 
10638 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10639 
10640 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10641 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10642 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10643 		udelay(10);
10644 	}
10645 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10646 	udelay(10);
10647 
10648 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10649 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10650 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10651 			/* Set drive transmission level to 1.2V  */
10652 			/* only if the signal pre-emphasis bit is not set  */
10653 			val = tr32(MAC_SERDES_CFG);
10654 			val &= 0xfffff000;
10655 			val |= 0x880;
10656 			tw32(MAC_SERDES_CFG, val);
10657 		}
10658 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10659 			tw32(MAC_SERDES_CFG, 0x616000);
10660 	}
10661 
10662 	/* Prevent chip from dropping frames when flow control
10663 	 * is enabled.
10664 	 */
10665 	if (tg3_flag(tp, 57765_CLASS))
10666 		val = 1;
10667 	else
10668 		val = 2;
10669 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10670 
10671 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10672 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10673 		/* Use hardware link auto-negotiation */
10674 		tg3_flag_set(tp, HW_AUTONEG);
10675 	}
10676 
10677 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10678 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10679 		u32 tmp;
10680 
10681 		tmp = tr32(SERDES_RX_CTRL);
10682 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10683 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10684 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10685 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10686 	}
10687 
10688 	if (!tg3_flag(tp, USE_PHYLIB)) {
10689 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10690 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10691 
10692 		err = tg3_setup_phy(tp, false);
10693 		if (err)
10694 			return err;
10695 
10696 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10697 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10698 			u32 tmp;
10699 
10700 			/* Clear CRC stats. */
10701 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10702 				tg3_writephy(tp, MII_TG3_TEST1,
10703 					     tmp | MII_TG3_TEST1_CRC_EN);
10704 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10705 			}
10706 		}
10707 	}
10708 
10709 	__tg3_set_rx_mode(tp->dev);
10710 
10711 	/* Initialize receive rules. */
10712 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10713 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10714 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10715 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10716 
10717 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10718 		limit = 8;
10719 	else
10720 		limit = 16;
10721 	if (tg3_flag(tp, ENABLE_ASF))
10722 		limit -= 4;
10723 	switch (limit) {
10724 	case 16:
10725 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10726 		fallthrough;
10727 	case 15:
10728 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10729 		fallthrough;
10730 	case 14:
10731 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10732 		fallthrough;
10733 	case 13:
10734 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10735 		fallthrough;
10736 	case 12:
10737 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10738 		fallthrough;
10739 	case 11:
10740 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10741 		fallthrough;
10742 	case 10:
10743 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10744 		fallthrough;
10745 	case 9:
10746 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10747 		fallthrough;
10748 	case 8:
10749 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10750 		fallthrough;
10751 	case 7:
10752 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10753 		fallthrough;
10754 	case 6:
10755 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10756 		fallthrough;
10757 	case 5:
10758 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10759 		fallthrough;
10760 	case 4:
10761 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10762 	case 3:
10763 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10764 	case 2:
10765 	case 1:
10766 
10767 	default:
10768 		break;
10769 	}
10770 
10771 	if (tg3_flag(tp, ENABLE_APE))
10772 		/* Write our heartbeat update interval to APE. */
10773 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10774 				APE_HOST_HEARTBEAT_INT_5SEC);
10775 
10776 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10777 
10778 	return 0;
10779 }
10780 
10781 /* Called at device open time to get the chip ready for
10782  * packet processing.  Invoked with tp->lock held.
10783  */
tg3_init_hw(struct tg3 * tp,bool reset_phy)10784 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10785 {
10786 	/* Chip may have been just powered on. If so, the boot code may still
10787 	 * be running initialization. Wait for it to finish to avoid races in
10788 	 * accessing the hardware.
10789 	 */
10790 	tg3_enable_register_access(tp);
10791 	tg3_poll_fw(tp);
10792 
10793 	tg3_switch_clocks(tp);
10794 
10795 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10796 
10797 	return tg3_reset_hw(tp, reset_phy);
10798 }
10799 
10800 #ifdef CONFIG_TIGON3_HWMON
tg3_sd_scan_scratchpad(struct tg3 * tp,struct tg3_ocir * ocir)10801 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10802 {
10803 	u32 off, len = TG3_OCIR_LEN;
10804 	int i;
10805 
10806 	for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10807 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10808 
10809 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10810 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10811 			memset(ocir, 0, len);
10812 	}
10813 }
10814 
10815 /* sysfs attributes for hwmon */
tg3_show_temp(struct device * dev,struct device_attribute * devattr,char * buf)10816 static ssize_t tg3_show_temp(struct device *dev,
10817 			     struct device_attribute *devattr, char *buf)
10818 {
10819 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10820 	struct tg3 *tp = dev_get_drvdata(dev);
10821 	u32 temperature;
10822 
10823 	spin_lock_bh(&tp->lock);
10824 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10825 				sizeof(temperature));
10826 	spin_unlock_bh(&tp->lock);
10827 	return sprintf(buf, "%u\n", temperature * 1000);
10828 }
10829 
10830 
10831 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10832 			  TG3_TEMP_SENSOR_OFFSET);
10833 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10834 			  TG3_TEMP_CAUTION_OFFSET);
10835 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10836 			  TG3_TEMP_MAX_OFFSET);
10837 
10838 static struct attribute *tg3_attrs[] = {
10839 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10840 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10841 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10842 	NULL
10843 };
10844 ATTRIBUTE_GROUPS(tg3);
10845 
tg3_hwmon_close(struct tg3 * tp)10846 static void tg3_hwmon_close(struct tg3 *tp)
10847 {
10848 	if (tp->hwmon_dev) {
10849 		hwmon_device_unregister(tp->hwmon_dev);
10850 		tp->hwmon_dev = NULL;
10851 	}
10852 }
10853 
tg3_hwmon_open(struct tg3 * tp)10854 static void tg3_hwmon_open(struct tg3 *tp)
10855 {
10856 	int i;
10857 	u32 size = 0;
10858 	struct pci_dev *pdev = tp->pdev;
10859 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10860 
10861 	tg3_sd_scan_scratchpad(tp, ocirs);
10862 
10863 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10864 		if (!ocirs[i].src_data_length)
10865 			continue;
10866 
10867 		size += ocirs[i].src_hdr_length;
10868 		size += ocirs[i].src_data_length;
10869 	}
10870 
10871 	if (!size)
10872 		return;
10873 
10874 	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10875 							  tp, tg3_groups);
10876 	if (IS_ERR(tp->hwmon_dev)) {
10877 		tp->hwmon_dev = NULL;
10878 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10879 	}
10880 }
10881 #else
tg3_hwmon_close(struct tg3 * tp)10882 static inline void tg3_hwmon_close(struct tg3 *tp) { }
tg3_hwmon_open(struct tg3 * tp)10883 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10884 #endif /* CONFIG_TIGON3_HWMON */
10885 
10886 
10887 #define TG3_STAT_ADD32(PSTAT, REG) \
10888 do {	u32 __val = tr32(REG); \
10889 	(PSTAT)->low += __val; \
10890 	if ((PSTAT)->low < __val) \
10891 		(PSTAT)->high += 1; \
10892 } while (0)
10893 
tg3_periodic_fetch_stats(struct tg3 * tp)10894 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10895 {
10896 	struct tg3_hw_stats *sp = tp->hw_stats;
10897 
10898 	if (!tp->link_up)
10899 		return;
10900 
10901 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10902 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10903 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10904 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10905 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10906 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10907 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10908 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10909 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10910 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10911 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10912 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10913 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10914 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10915 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10916 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10917 		u32 val;
10918 
10919 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10920 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10921 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10922 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10923 	}
10924 
10925 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10926 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10927 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10928 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10929 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10930 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10931 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10932 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10933 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10934 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10935 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10936 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10937 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10938 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10939 
10940 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10941 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10942 	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
10943 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10944 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10945 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10946 	} else {
10947 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10948 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10949 		if (val) {
10950 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10951 			sp->rx_discards.low += val;
10952 			if (sp->rx_discards.low < val)
10953 				sp->rx_discards.high += 1;
10954 		}
10955 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10956 	}
10957 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10958 }
10959 
tg3_chk_missed_msi(struct tg3 * tp)10960 static void tg3_chk_missed_msi(struct tg3 *tp)
10961 {
10962 	u32 i;
10963 
10964 	for (i = 0; i < tp->irq_cnt; i++) {
10965 		struct tg3_napi *tnapi = &tp->napi[i];
10966 
10967 		if (tg3_has_work(tnapi)) {
10968 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10969 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10970 				if (tnapi->chk_msi_cnt < 1) {
10971 					tnapi->chk_msi_cnt++;
10972 					return;
10973 				}
10974 				tg3_msi(0, tnapi);
10975 			}
10976 		}
10977 		tnapi->chk_msi_cnt = 0;
10978 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10979 		tnapi->last_tx_cons = tnapi->tx_cons;
10980 	}
10981 }
10982 
tg3_timer(struct timer_list * t)10983 static void tg3_timer(struct timer_list *t)
10984 {
10985 	struct tg3 *tp = from_timer(tp, t, timer);
10986 
10987 	spin_lock(&tp->lock);
10988 
10989 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10990 		spin_unlock(&tp->lock);
10991 		goto restart_timer;
10992 	}
10993 
10994 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10995 	    tg3_flag(tp, 57765_CLASS))
10996 		tg3_chk_missed_msi(tp);
10997 
10998 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10999 		/* BCM4785: Flush posted writes from GbE to host memory. */
11000 		tr32(HOSTCC_MODE);
11001 	}
11002 
11003 	if (!tg3_flag(tp, TAGGED_STATUS)) {
11004 		/* All of this garbage is because when using non-tagged
11005 		 * IRQ status the mailbox/status_block protocol the chip
11006 		 * uses with the cpu is race prone.
11007 		 */
11008 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11009 			tw32(GRC_LOCAL_CTRL,
11010 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11011 		} else {
11012 			tw32(HOSTCC_MODE, tp->coalesce_mode |
11013 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11014 		}
11015 
11016 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11017 			spin_unlock(&tp->lock);
11018 			tg3_reset_task_schedule(tp);
11019 			goto restart_timer;
11020 		}
11021 	}
11022 
11023 	/* This part only runs once per second. */
11024 	if (!--tp->timer_counter) {
11025 		if (tg3_flag(tp, 5705_PLUS))
11026 			tg3_periodic_fetch_stats(tp);
11027 
11028 		if (tp->setlpicnt && !--tp->setlpicnt)
11029 			tg3_phy_eee_enable(tp);
11030 
11031 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
11032 			u32 mac_stat;
11033 			int phy_event;
11034 
11035 			mac_stat = tr32(MAC_STATUS);
11036 
11037 			phy_event = 0;
11038 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11039 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11040 					phy_event = 1;
11041 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11042 				phy_event = 1;
11043 
11044 			if (phy_event)
11045 				tg3_setup_phy(tp, false);
11046 		} else if (tg3_flag(tp, POLL_SERDES)) {
11047 			u32 mac_stat = tr32(MAC_STATUS);
11048 			int need_setup = 0;
11049 
11050 			if (tp->link_up &&
11051 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11052 				need_setup = 1;
11053 			}
11054 			if (!tp->link_up &&
11055 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
11056 					 MAC_STATUS_SIGNAL_DET))) {
11057 				need_setup = 1;
11058 			}
11059 			if (need_setup) {
11060 				if (!tp->serdes_counter) {
11061 					tw32_f(MAC_MODE,
11062 					     (tp->mac_mode &
11063 					      ~MAC_MODE_PORT_MODE_MASK));
11064 					udelay(40);
11065 					tw32_f(MAC_MODE, tp->mac_mode);
11066 					udelay(40);
11067 				}
11068 				tg3_setup_phy(tp, false);
11069 			}
11070 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11071 			   tg3_flag(tp, 5780_CLASS)) {
11072 			tg3_serdes_parallel_detect(tp);
11073 		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11074 			u32 cpmu = tr32(TG3_CPMU_STATUS);
11075 			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11076 					 TG3_CPMU_STATUS_LINK_MASK);
11077 
11078 			if (link_up != tp->link_up)
11079 				tg3_setup_phy(tp, false);
11080 		}
11081 
11082 		tp->timer_counter = tp->timer_multiplier;
11083 	}
11084 
11085 	/* Heartbeat is only sent once every 2 seconds.
11086 	 *
11087 	 * The heartbeat is to tell the ASF firmware that the host
11088 	 * driver is still alive.  In the event that the OS crashes,
11089 	 * ASF needs to reset the hardware to free up the FIFO space
11090 	 * that may be filled with rx packets destined for the host.
11091 	 * If the FIFO is full, ASF will no longer function properly.
11092 	 *
11093 	 * Unintended resets have been reported on real time kernels
11094 	 * where the timer doesn't run on time.  Netpoll will also have
11095 	 * same problem.
11096 	 *
11097 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11098 	 * to check the ring condition when the heartbeat is expiring
11099 	 * before doing the reset.  This will prevent most unintended
11100 	 * resets.
11101 	 */
11102 	if (!--tp->asf_counter) {
11103 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11104 			tg3_wait_for_event_ack(tp);
11105 
11106 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11107 				      FWCMD_NICDRV_ALIVE3);
11108 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11109 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11110 				      TG3_FW_UPDATE_TIMEOUT_SEC);
11111 
11112 			tg3_generate_fw_event(tp);
11113 		}
11114 		tp->asf_counter = tp->asf_multiplier;
11115 	}
11116 
11117 	/* Update the APE heartbeat every 5 seconds.*/
11118 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11119 
11120 	spin_unlock(&tp->lock);
11121 
11122 restart_timer:
11123 	tp->timer.expires = jiffies + tp->timer_offset;
11124 	add_timer(&tp->timer);
11125 }
11126 
tg3_timer_init(struct tg3 * tp)11127 static void tg3_timer_init(struct tg3 *tp)
11128 {
11129 	if (tg3_flag(tp, TAGGED_STATUS) &&
11130 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
11131 	    !tg3_flag(tp, 57765_CLASS))
11132 		tp->timer_offset = HZ;
11133 	else
11134 		tp->timer_offset = HZ / 10;
11135 
11136 	BUG_ON(tp->timer_offset > HZ);
11137 
11138 	tp->timer_multiplier = (HZ / tp->timer_offset);
11139 	tp->asf_multiplier = (HZ / tp->timer_offset) *
11140 			     TG3_FW_UPDATE_FREQ_SEC;
11141 
11142 	timer_setup(&tp->timer, tg3_timer, 0);
11143 }
11144 
tg3_timer_start(struct tg3 * tp)11145 static void tg3_timer_start(struct tg3 *tp)
11146 {
11147 	tp->asf_counter   = tp->asf_multiplier;
11148 	tp->timer_counter = tp->timer_multiplier;
11149 
11150 	tp->timer.expires = jiffies + tp->timer_offset;
11151 	add_timer(&tp->timer);
11152 }
11153 
tg3_timer_stop(struct tg3 * tp)11154 static void tg3_timer_stop(struct tg3 *tp)
11155 {
11156 	del_timer_sync(&tp->timer);
11157 }
11158 
11159 /* Restart hardware after configuration changes, self-test, etc.
11160  * Invoked with tp->lock held.
11161  */
tg3_restart_hw(struct tg3 * tp,bool reset_phy)11162 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11163 	__releases(tp->lock)
11164 	__acquires(tp->lock)
11165 {
11166 	int err;
11167 
11168 	err = tg3_init_hw(tp, reset_phy);
11169 	if (err) {
11170 		netdev_err(tp->dev,
11171 			   "Failed to re-initialize device, aborting\n");
11172 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11173 		tg3_full_unlock(tp);
11174 		tg3_timer_stop(tp);
11175 		tp->irq_sync = 0;
11176 		tg3_napi_enable(tp);
11177 		dev_close(tp->dev);
11178 		tg3_full_lock(tp, 0);
11179 	}
11180 	return err;
11181 }
11182 
tg3_reset_task(struct work_struct * work)11183 static void tg3_reset_task(struct work_struct *work)
11184 {
11185 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
11186 	int err;
11187 
11188 	rtnl_lock();
11189 	tg3_full_lock(tp, 0);
11190 
11191 	if (tp->pcierr_recovery || !netif_running(tp->dev) ||
11192 	    tp->pdev->error_state != pci_channel_io_normal) {
11193 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11194 		tg3_full_unlock(tp);
11195 		rtnl_unlock();
11196 		return;
11197 	}
11198 
11199 	tg3_full_unlock(tp);
11200 
11201 	tg3_phy_stop(tp);
11202 
11203 	tg3_netif_stop(tp);
11204 
11205 	tg3_full_lock(tp, 1);
11206 
11207 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11208 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11209 		tp->write32_rx_mbox = tg3_write_flush_reg32;
11210 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11211 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11212 	}
11213 
11214 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11215 	err = tg3_init_hw(tp, true);
11216 	if (err) {
11217 		tg3_full_unlock(tp);
11218 		tp->irq_sync = 0;
11219 		tg3_napi_enable(tp);
11220 		/* Clear this flag so that tg3_reset_task_cancel() will not
11221 		 * call cancel_work_sync() and wait forever.
11222 		 */
11223 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11224 		dev_close(tp->dev);
11225 		goto out;
11226 	}
11227 
11228 	tg3_netif_start(tp);
11229 	tg3_full_unlock(tp);
11230 	tg3_phy_start(tp);
11231 	tg3_flag_clear(tp, RESET_TASK_PENDING);
11232 out:
11233 	rtnl_unlock();
11234 }
11235 
tg3_request_irq(struct tg3 * tp,int irq_num)11236 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11237 {
11238 	irq_handler_t fn;
11239 	unsigned long flags;
11240 	char *name;
11241 	struct tg3_napi *tnapi = &tp->napi[irq_num];
11242 
11243 	if (tp->irq_cnt == 1)
11244 		name = tp->dev->name;
11245 	else {
11246 		name = &tnapi->irq_lbl[0];
11247 		if (tnapi->tx_buffers && tnapi->rx_rcb)
11248 			snprintf(name, IFNAMSIZ,
11249 				 "%s-txrx-%d", tp->dev->name, irq_num);
11250 		else if (tnapi->tx_buffers)
11251 			snprintf(name, IFNAMSIZ,
11252 				 "%s-tx-%d", tp->dev->name, irq_num);
11253 		else if (tnapi->rx_rcb)
11254 			snprintf(name, IFNAMSIZ,
11255 				 "%s-rx-%d", tp->dev->name, irq_num);
11256 		else
11257 			snprintf(name, IFNAMSIZ,
11258 				 "%s-%d", tp->dev->name, irq_num);
11259 		name[IFNAMSIZ-1] = 0;
11260 	}
11261 
11262 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11263 		fn = tg3_msi;
11264 		if (tg3_flag(tp, 1SHOT_MSI))
11265 			fn = tg3_msi_1shot;
11266 		flags = 0;
11267 	} else {
11268 		fn = tg3_interrupt;
11269 		if (tg3_flag(tp, TAGGED_STATUS))
11270 			fn = tg3_interrupt_tagged;
11271 		flags = IRQF_SHARED;
11272 	}
11273 
11274 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11275 }
11276 
tg3_test_interrupt(struct tg3 * tp)11277 static int tg3_test_interrupt(struct tg3 *tp)
11278 {
11279 	struct tg3_napi *tnapi = &tp->napi[0];
11280 	struct net_device *dev = tp->dev;
11281 	int err, i, intr_ok = 0;
11282 	u32 val;
11283 
11284 	if (!netif_running(dev))
11285 		return -ENODEV;
11286 
11287 	tg3_disable_ints(tp);
11288 
11289 	free_irq(tnapi->irq_vec, tnapi);
11290 
11291 	/*
11292 	 * Turn off MSI one shot mode.  Otherwise this test has no
11293 	 * observable way to know whether the interrupt was delivered.
11294 	 */
11295 	if (tg3_flag(tp, 57765_PLUS)) {
11296 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11297 		tw32(MSGINT_MODE, val);
11298 	}
11299 
11300 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11301 			  IRQF_SHARED, dev->name, tnapi);
11302 	if (err)
11303 		return err;
11304 
11305 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11306 	tg3_enable_ints(tp);
11307 
11308 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11309 	       tnapi->coal_now);
11310 
11311 	for (i = 0; i < 5; i++) {
11312 		u32 int_mbox, misc_host_ctrl;
11313 
11314 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11315 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11316 
11317 		if ((int_mbox != 0) ||
11318 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11319 			intr_ok = 1;
11320 			break;
11321 		}
11322 
11323 		if (tg3_flag(tp, 57765_PLUS) &&
11324 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11325 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11326 
11327 		msleep(10);
11328 	}
11329 
11330 	tg3_disable_ints(tp);
11331 
11332 	free_irq(tnapi->irq_vec, tnapi);
11333 
11334 	err = tg3_request_irq(tp, 0);
11335 
11336 	if (err)
11337 		return err;
11338 
11339 	if (intr_ok) {
11340 		/* Reenable MSI one shot mode. */
11341 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11342 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11343 			tw32(MSGINT_MODE, val);
11344 		}
11345 		return 0;
11346 	}
11347 
11348 	return -EIO;
11349 }
11350 
11351 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11352  * successfully restored
11353  */
tg3_test_msi(struct tg3 * tp)11354 static int tg3_test_msi(struct tg3 *tp)
11355 {
11356 	int err;
11357 	u16 pci_cmd;
11358 
11359 	if (!tg3_flag(tp, USING_MSI))
11360 		return 0;
11361 
11362 	/* Turn off SERR reporting in case MSI terminates with Master
11363 	 * Abort.
11364 	 */
11365 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11366 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11367 			      pci_cmd & ~PCI_COMMAND_SERR);
11368 
11369 	err = tg3_test_interrupt(tp);
11370 
11371 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11372 
11373 	if (!err)
11374 		return 0;
11375 
11376 	/* other failures */
11377 	if (err != -EIO)
11378 		return err;
11379 
11380 	/* MSI test failed, go back to INTx mode */
11381 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11382 		    "to INTx mode. Please report this failure to the PCI "
11383 		    "maintainer and include system chipset information\n");
11384 
11385 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11386 
11387 	pci_disable_msi(tp->pdev);
11388 
11389 	tg3_flag_clear(tp, USING_MSI);
11390 	tp->napi[0].irq_vec = tp->pdev->irq;
11391 
11392 	err = tg3_request_irq(tp, 0);
11393 	if (err)
11394 		return err;
11395 
11396 	/* Need to reset the chip because the MSI cycle may have terminated
11397 	 * with Master Abort.
11398 	 */
11399 	tg3_full_lock(tp, 1);
11400 
11401 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11402 	err = tg3_init_hw(tp, true);
11403 
11404 	tg3_full_unlock(tp);
11405 
11406 	if (err)
11407 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11408 
11409 	return err;
11410 }
11411 
tg3_request_firmware(struct tg3 * tp)11412 static int tg3_request_firmware(struct tg3 *tp)
11413 {
11414 	const struct tg3_firmware_hdr *fw_hdr;
11415 
11416 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11417 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11418 			   tp->fw_needed);
11419 		return -ENOENT;
11420 	}
11421 
11422 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11423 
11424 	/* Firmware blob starts with version numbers, followed by
11425 	 * start address and _full_ length including BSS sections
11426 	 * (which must be longer than the actual data, of course
11427 	 */
11428 
11429 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11430 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11431 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11432 			   tp->fw_len, tp->fw_needed);
11433 		release_firmware(tp->fw);
11434 		tp->fw = NULL;
11435 		return -EINVAL;
11436 	}
11437 
11438 	/* We no longer need firmware; we have it. */
11439 	tp->fw_needed = NULL;
11440 	return 0;
11441 }
11442 
tg3_irq_count(struct tg3 * tp)11443 static u32 tg3_irq_count(struct tg3 *tp)
11444 {
11445 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11446 
11447 	if (irq_cnt > 1) {
11448 		/* We want as many rx rings enabled as there are cpus.
11449 		 * In multiqueue MSI-X mode, the first MSI-X vector
11450 		 * only deals with link interrupts, etc, so we add
11451 		 * one to the number of vectors we are requesting.
11452 		 */
11453 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11454 	}
11455 
11456 	return irq_cnt;
11457 }
11458 
tg3_enable_msix(struct tg3 * tp)11459 static bool tg3_enable_msix(struct tg3 *tp)
11460 {
11461 	int i, rc;
11462 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11463 
11464 	tp->txq_cnt = tp->txq_req;
11465 	tp->rxq_cnt = tp->rxq_req;
11466 	if (!tp->rxq_cnt)
11467 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11468 	if (tp->rxq_cnt > tp->rxq_max)
11469 		tp->rxq_cnt = tp->rxq_max;
11470 
11471 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11472 	 * scheduling of the TX rings can cause starvation of rings with
11473 	 * small packets when other rings have TSO or jumbo packets.
11474 	 */
11475 	if (!tp->txq_req)
11476 		tp->txq_cnt = 1;
11477 
11478 	tp->irq_cnt = tg3_irq_count(tp);
11479 
11480 	for (i = 0; i < tp->irq_max; i++) {
11481 		msix_ent[i].entry  = i;
11482 		msix_ent[i].vector = 0;
11483 	}
11484 
11485 	rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11486 	if (rc < 0) {
11487 		return false;
11488 	} else if (rc < tp->irq_cnt) {
11489 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11490 			      tp->irq_cnt, rc);
11491 		tp->irq_cnt = rc;
11492 		tp->rxq_cnt = max(rc - 1, 1);
11493 		if (tp->txq_cnt)
11494 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11495 	}
11496 
11497 	for (i = 0; i < tp->irq_max; i++)
11498 		tp->napi[i].irq_vec = msix_ent[i].vector;
11499 
11500 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11501 		pci_disable_msix(tp->pdev);
11502 		return false;
11503 	}
11504 
11505 	if (tp->irq_cnt == 1)
11506 		return true;
11507 
11508 	tg3_flag_set(tp, ENABLE_RSS);
11509 
11510 	if (tp->txq_cnt > 1)
11511 		tg3_flag_set(tp, ENABLE_TSS);
11512 
11513 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11514 
11515 	return true;
11516 }
11517 
tg3_ints_init(struct tg3 * tp)11518 static void tg3_ints_init(struct tg3 *tp)
11519 {
11520 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11521 	    !tg3_flag(tp, TAGGED_STATUS)) {
11522 		/* All MSI supporting chips should support tagged
11523 		 * status.  Assert that this is the case.
11524 		 */
11525 		netdev_warn(tp->dev,
11526 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11527 		goto defcfg;
11528 	}
11529 
11530 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11531 		tg3_flag_set(tp, USING_MSIX);
11532 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11533 		tg3_flag_set(tp, USING_MSI);
11534 
11535 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11536 		u32 msi_mode = tr32(MSGINT_MODE);
11537 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11538 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11539 		if (!tg3_flag(tp, 1SHOT_MSI))
11540 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11541 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11542 	}
11543 defcfg:
11544 	if (!tg3_flag(tp, USING_MSIX)) {
11545 		tp->irq_cnt = 1;
11546 		tp->napi[0].irq_vec = tp->pdev->irq;
11547 	}
11548 
11549 	if (tp->irq_cnt == 1) {
11550 		tp->txq_cnt = 1;
11551 		tp->rxq_cnt = 1;
11552 		netif_set_real_num_tx_queues(tp->dev, 1);
11553 		netif_set_real_num_rx_queues(tp->dev, 1);
11554 	}
11555 }
11556 
tg3_ints_fini(struct tg3 * tp)11557 static void tg3_ints_fini(struct tg3 *tp)
11558 {
11559 	if (tg3_flag(tp, USING_MSIX))
11560 		pci_disable_msix(tp->pdev);
11561 	else if (tg3_flag(tp, USING_MSI))
11562 		pci_disable_msi(tp->pdev);
11563 	tg3_flag_clear(tp, USING_MSI);
11564 	tg3_flag_clear(tp, USING_MSIX);
11565 	tg3_flag_clear(tp, ENABLE_RSS);
11566 	tg3_flag_clear(tp, ENABLE_TSS);
11567 }
11568 
tg3_start(struct tg3 * tp,bool reset_phy,bool test_irq,bool init)11569 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11570 		     bool init)
11571 {
11572 	struct net_device *dev = tp->dev;
11573 	int i, err;
11574 
11575 	/*
11576 	 * Setup interrupts first so we know how
11577 	 * many NAPI resources to allocate
11578 	 */
11579 	tg3_ints_init(tp);
11580 
11581 	tg3_rss_check_indir_tbl(tp);
11582 
11583 	/* The placement of this call is tied
11584 	 * to the setup and use of Host TX descriptors.
11585 	 */
11586 	err = tg3_alloc_consistent(tp);
11587 	if (err)
11588 		goto out_ints_fini;
11589 
11590 	tg3_napi_init(tp);
11591 
11592 	tg3_napi_enable(tp);
11593 
11594 	for (i = 0; i < tp->irq_cnt; i++) {
11595 		err = tg3_request_irq(tp, i);
11596 		if (err) {
11597 			for (i--; i >= 0; i--) {
11598 				struct tg3_napi *tnapi = &tp->napi[i];
11599 
11600 				free_irq(tnapi->irq_vec, tnapi);
11601 			}
11602 			goto out_napi_fini;
11603 		}
11604 	}
11605 
11606 	tg3_full_lock(tp, 0);
11607 
11608 	if (init)
11609 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11610 
11611 	err = tg3_init_hw(tp, reset_phy);
11612 	if (err) {
11613 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11614 		tg3_free_rings(tp);
11615 	}
11616 
11617 	tg3_full_unlock(tp);
11618 
11619 	if (err)
11620 		goto out_free_irq;
11621 
11622 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11623 		err = tg3_test_msi(tp);
11624 
11625 		if (err) {
11626 			tg3_full_lock(tp, 0);
11627 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11628 			tg3_free_rings(tp);
11629 			tg3_full_unlock(tp);
11630 
11631 			goto out_napi_fini;
11632 		}
11633 
11634 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11635 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11636 
11637 			tw32(PCIE_TRANSACTION_CFG,
11638 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11639 		}
11640 	}
11641 
11642 	tg3_phy_start(tp);
11643 
11644 	tg3_hwmon_open(tp);
11645 
11646 	tg3_full_lock(tp, 0);
11647 
11648 	tg3_timer_start(tp);
11649 	tg3_flag_set(tp, INIT_COMPLETE);
11650 	tg3_enable_ints(tp);
11651 
11652 	tg3_ptp_resume(tp);
11653 
11654 	tg3_full_unlock(tp);
11655 
11656 	netif_tx_start_all_queues(dev);
11657 
11658 	/*
11659 	 * Reset loopback feature if it was turned on while the device was down
11660 	 * make sure that it's installed properly now.
11661 	 */
11662 	if (dev->features & NETIF_F_LOOPBACK)
11663 		tg3_set_loopback(dev, dev->features);
11664 
11665 	return 0;
11666 
11667 out_free_irq:
11668 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11669 		struct tg3_napi *tnapi = &tp->napi[i];
11670 		free_irq(tnapi->irq_vec, tnapi);
11671 	}
11672 
11673 out_napi_fini:
11674 	tg3_napi_disable(tp);
11675 	tg3_napi_fini(tp);
11676 	tg3_free_consistent(tp);
11677 
11678 out_ints_fini:
11679 	tg3_ints_fini(tp);
11680 
11681 	return err;
11682 }
11683 
tg3_stop(struct tg3 * tp)11684 static void tg3_stop(struct tg3 *tp)
11685 {
11686 	int i;
11687 
11688 	tg3_reset_task_cancel(tp);
11689 	tg3_netif_stop(tp);
11690 
11691 	tg3_timer_stop(tp);
11692 
11693 	tg3_hwmon_close(tp);
11694 
11695 	tg3_phy_stop(tp);
11696 
11697 	tg3_full_lock(tp, 1);
11698 
11699 	tg3_disable_ints(tp);
11700 
11701 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11702 	tg3_free_rings(tp);
11703 	tg3_flag_clear(tp, INIT_COMPLETE);
11704 
11705 	tg3_full_unlock(tp);
11706 
11707 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11708 		struct tg3_napi *tnapi = &tp->napi[i];
11709 		free_irq(tnapi->irq_vec, tnapi);
11710 	}
11711 
11712 	tg3_ints_fini(tp);
11713 
11714 	tg3_napi_fini(tp);
11715 
11716 	tg3_free_consistent(tp);
11717 }
11718 
tg3_open(struct net_device * dev)11719 static int tg3_open(struct net_device *dev)
11720 {
11721 	struct tg3 *tp = netdev_priv(dev);
11722 	int err;
11723 
11724 	if (tp->pcierr_recovery) {
11725 		netdev_err(dev, "Failed to open device. PCI error recovery "
11726 			   "in progress\n");
11727 		return -EAGAIN;
11728 	}
11729 
11730 	if (tp->fw_needed) {
11731 		err = tg3_request_firmware(tp);
11732 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11733 			if (err) {
11734 				netdev_warn(tp->dev, "EEE capability disabled\n");
11735 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11736 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11737 				netdev_warn(tp->dev, "EEE capability restored\n");
11738 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11739 			}
11740 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11741 			if (err)
11742 				return err;
11743 		} else if (err) {
11744 			netdev_warn(tp->dev, "TSO capability disabled\n");
11745 			tg3_flag_clear(tp, TSO_CAPABLE);
11746 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11747 			netdev_notice(tp->dev, "TSO capability restored\n");
11748 			tg3_flag_set(tp, TSO_CAPABLE);
11749 		}
11750 	}
11751 
11752 	tg3_carrier_off(tp);
11753 
11754 	err = tg3_power_up(tp);
11755 	if (err)
11756 		return err;
11757 
11758 	tg3_full_lock(tp, 0);
11759 
11760 	tg3_disable_ints(tp);
11761 	tg3_flag_clear(tp, INIT_COMPLETE);
11762 
11763 	tg3_full_unlock(tp);
11764 
11765 	err = tg3_start(tp,
11766 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11767 			true, true);
11768 	if (err) {
11769 		tg3_frob_aux_power(tp, false);
11770 		pci_set_power_state(tp->pdev, PCI_D3hot);
11771 	}
11772 
11773 	return err;
11774 }
11775 
tg3_close(struct net_device * dev)11776 static int tg3_close(struct net_device *dev)
11777 {
11778 	struct tg3 *tp = netdev_priv(dev);
11779 
11780 	if (tp->pcierr_recovery) {
11781 		netdev_err(dev, "Failed to close device. PCI error recovery "
11782 			   "in progress\n");
11783 		return -EAGAIN;
11784 	}
11785 
11786 	tg3_stop(tp);
11787 
11788 	if (pci_device_is_present(tp->pdev)) {
11789 		tg3_power_down_prepare(tp);
11790 
11791 		tg3_carrier_off(tp);
11792 	}
11793 	return 0;
11794 }
11795 
get_stat64(tg3_stat64_t * val)11796 static inline u64 get_stat64(tg3_stat64_t *val)
11797 {
11798        return ((u64)val->high << 32) | ((u64)val->low);
11799 }
11800 
tg3_calc_crc_errors(struct tg3 * tp)11801 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11802 {
11803 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11804 
11805 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11806 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11807 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11808 		u32 val;
11809 
11810 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11811 			tg3_writephy(tp, MII_TG3_TEST1,
11812 				     val | MII_TG3_TEST1_CRC_EN);
11813 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11814 		} else
11815 			val = 0;
11816 
11817 		tp->phy_crc_errors += val;
11818 
11819 		return tp->phy_crc_errors;
11820 	}
11821 
11822 	return get_stat64(&hw_stats->rx_fcs_errors);
11823 }
11824 
11825 #define ESTAT_ADD(member) \
11826 	estats->member =	old_estats->member + \
11827 				get_stat64(&hw_stats->member)
11828 
tg3_get_estats(struct tg3 * tp,struct tg3_ethtool_stats * estats)11829 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11830 {
11831 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11832 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11833 
11834 	ESTAT_ADD(rx_octets);
11835 	ESTAT_ADD(rx_fragments);
11836 	ESTAT_ADD(rx_ucast_packets);
11837 	ESTAT_ADD(rx_mcast_packets);
11838 	ESTAT_ADD(rx_bcast_packets);
11839 	ESTAT_ADD(rx_fcs_errors);
11840 	ESTAT_ADD(rx_align_errors);
11841 	ESTAT_ADD(rx_xon_pause_rcvd);
11842 	ESTAT_ADD(rx_xoff_pause_rcvd);
11843 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11844 	ESTAT_ADD(rx_xoff_entered);
11845 	ESTAT_ADD(rx_frame_too_long_errors);
11846 	ESTAT_ADD(rx_jabbers);
11847 	ESTAT_ADD(rx_undersize_packets);
11848 	ESTAT_ADD(rx_in_length_errors);
11849 	ESTAT_ADD(rx_out_length_errors);
11850 	ESTAT_ADD(rx_64_or_less_octet_packets);
11851 	ESTAT_ADD(rx_65_to_127_octet_packets);
11852 	ESTAT_ADD(rx_128_to_255_octet_packets);
11853 	ESTAT_ADD(rx_256_to_511_octet_packets);
11854 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11855 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11856 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11857 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11858 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11859 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11860 
11861 	ESTAT_ADD(tx_octets);
11862 	ESTAT_ADD(tx_collisions);
11863 	ESTAT_ADD(tx_xon_sent);
11864 	ESTAT_ADD(tx_xoff_sent);
11865 	ESTAT_ADD(tx_flow_control);
11866 	ESTAT_ADD(tx_mac_errors);
11867 	ESTAT_ADD(tx_single_collisions);
11868 	ESTAT_ADD(tx_mult_collisions);
11869 	ESTAT_ADD(tx_deferred);
11870 	ESTAT_ADD(tx_excessive_collisions);
11871 	ESTAT_ADD(tx_late_collisions);
11872 	ESTAT_ADD(tx_collide_2times);
11873 	ESTAT_ADD(tx_collide_3times);
11874 	ESTAT_ADD(tx_collide_4times);
11875 	ESTAT_ADD(tx_collide_5times);
11876 	ESTAT_ADD(tx_collide_6times);
11877 	ESTAT_ADD(tx_collide_7times);
11878 	ESTAT_ADD(tx_collide_8times);
11879 	ESTAT_ADD(tx_collide_9times);
11880 	ESTAT_ADD(tx_collide_10times);
11881 	ESTAT_ADD(tx_collide_11times);
11882 	ESTAT_ADD(tx_collide_12times);
11883 	ESTAT_ADD(tx_collide_13times);
11884 	ESTAT_ADD(tx_collide_14times);
11885 	ESTAT_ADD(tx_collide_15times);
11886 	ESTAT_ADD(tx_ucast_packets);
11887 	ESTAT_ADD(tx_mcast_packets);
11888 	ESTAT_ADD(tx_bcast_packets);
11889 	ESTAT_ADD(tx_carrier_sense_errors);
11890 	ESTAT_ADD(tx_discards);
11891 	ESTAT_ADD(tx_errors);
11892 
11893 	ESTAT_ADD(dma_writeq_full);
11894 	ESTAT_ADD(dma_write_prioq_full);
11895 	ESTAT_ADD(rxbds_empty);
11896 	ESTAT_ADD(rx_discards);
11897 	ESTAT_ADD(rx_errors);
11898 	ESTAT_ADD(rx_threshold_hit);
11899 
11900 	ESTAT_ADD(dma_readq_full);
11901 	ESTAT_ADD(dma_read_prioq_full);
11902 	ESTAT_ADD(tx_comp_queue_full);
11903 
11904 	ESTAT_ADD(ring_set_send_prod_index);
11905 	ESTAT_ADD(ring_status_update);
11906 	ESTAT_ADD(nic_irqs);
11907 	ESTAT_ADD(nic_avoided_irqs);
11908 	ESTAT_ADD(nic_tx_threshold_hit);
11909 
11910 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11911 }
11912 
tg3_get_nstats(struct tg3 * tp,struct rtnl_link_stats64 * stats)11913 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11914 {
11915 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11916 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11917 	unsigned long rx_dropped;
11918 	unsigned long tx_dropped;
11919 	int i;
11920 
11921 	stats->rx_packets = old_stats->rx_packets +
11922 		get_stat64(&hw_stats->rx_ucast_packets) +
11923 		get_stat64(&hw_stats->rx_mcast_packets) +
11924 		get_stat64(&hw_stats->rx_bcast_packets);
11925 
11926 	stats->tx_packets = old_stats->tx_packets +
11927 		get_stat64(&hw_stats->tx_ucast_packets) +
11928 		get_stat64(&hw_stats->tx_mcast_packets) +
11929 		get_stat64(&hw_stats->tx_bcast_packets);
11930 
11931 	stats->rx_bytes = old_stats->rx_bytes +
11932 		get_stat64(&hw_stats->rx_octets);
11933 	stats->tx_bytes = old_stats->tx_bytes +
11934 		get_stat64(&hw_stats->tx_octets);
11935 
11936 	stats->rx_errors = old_stats->rx_errors +
11937 		get_stat64(&hw_stats->rx_errors);
11938 	stats->tx_errors = old_stats->tx_errors +
11939 		get_stat64(&hw_stats->tx_errors) +
11940 		get_stat64(&hw_stats->tx_mac_errors) +
11941 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11942 		get_stat64(&hw_stats->tx_discards);
11943 
11944 	stats->multicast = old_stats->multicast +
11945 		get_stat64(&hw_stats->rx_mcast_packets);
11946 	stats->collisions = old_stats->collisions +
11947 		get_stat64(&hw_stats->tx_collisions);
11948 
11949 	stats->rx_length_errors = old_stats->rx_length_errors +
11950 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11951 		get_stat64(&hw_stats->rx_undersize_packets);
11952 
11953 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11954 		get_stat64(&hw_stats->rx_align_errors);
11955 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11956 		get_stat64(&hw_stats->tx_discards);
11957 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11958 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11959 
11960 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11961 		tg3_calc_crc_errors(tp);
11962 
11963 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11964 		get_stat64(&hw_stats->rx_discards);
11965 
11966 	/* Aggregate per-queue counters. The per-queue counters are updated
11967 	 * by a single writer, race-free. The result computed by this loop
11968 	 * might not be 100% accurate (counters can be updated in the middle of
11969 	 * the loop) but the next tg3_get_nstats() will recompute the current
11970 	 * value so it is acceptable.
11971 	 *
11972 	 * Note that these counters wrap around at 4G on 32bit machines.
11973 	 */
11974 	rx_dropped = (unsigned long)(old_stats->rx_dropped);
11975 	tx_dropped = (unsigned long)(old_stats->tx_dropped);
11976 
11977 	for (i = 0; i < tp->irq_cnt; i++) {
11978 		struct tg3_napi *tnapi = &tp->napi[i];
11979 
11980 		rx_dropped += tnapi->rx_dropped;
11981 		tx_dropped += tnapi->tx_dropped;
11982 	}
11983 
11984 	stats->rx_dropped = rx_dropped;
11985 	stats->tx_dropped = tx_dropped;
11986 }
11987 
tg3_get_regs_len(struct net_device * dev)11988 static int tg3_get_regs_len(struct net_device *dev)
11989 {
11990 	return TG3_REG_BLK_SIZE;
11991 }
11992 
tg3_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)11993 static void tg3_get_regs(struct net_device *dev,
11994 		struct ethtool_regs *regs, void *_p)
11995 {
11996 	struct tg3 *tp = netdev_priv(dev);
11997 
11998 	regs->version = 0;
11999 
12000 	memset(_p, 0, TG3_REG_BLK_SIZE);
12001 
12002 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12003 		return;
12004 
12005 	tg3_full_lock(tp, 0);
12006 
12007 	tg3_dump_legacy_regs(tp, (u32 *)_p);
12008 
12009 	tg3_full_unlock(tp);
12010 }
12011 
tg3_get_eeprom_len(struct net_device * dev)12012 static int tg3_get_eeprom_len(struct net_device *dev)
12013 {
12014 	struct tg3 *tp = netdev_priv(dev);
12015 
12016 	return tp->nvram_size;
12017 }
12018 
tg3_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12019 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12020 {
12021 	struct tg3 *tp = netdev_priv(dev);
12022 	int ret, cpmu_restore = 0;
12023 	u8  *pd;
12024 	u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12025 	__be32 val;
12026 
12027 	if (tg3_flag(tp, NO_NVRAM))
12028 		return -EINVAL;
12029 
12030 	offset = eeprom->offset;
12031 	len = eeprom->len;
12032 	eeprom->len = 0;
12033 
12034 	eeprom->magic = TG3_EEPROM_MAGIC;
12035 
12036 	/* Override clock, link aware and link idle modes */
12037 	if (tg3_flag(tp, CPMU_PRESENT)) {
12038 		cpmu_val = tr32(TG3_CPMU_CTRL);
12039 		if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12040 				CPMU_CTRL_LINK_IDLE_MODE)) {
12041 			tw32(TG3_CPMU_CTRL, cpmu_val &
12042 					    ~(CPMU_CTRL_LINK_AWARE_MODE |
12043 					     CPMU_CTRL_LINK_IDLE_MODE));
12044 			cpmu_restore = 1;
12045 		}
12046 	}
12047 	tg3_override_clk(tp);
12048 
12049 	if (offset & 3) {
12050 		/* adjustments to start on required 4 byte boundary */
12051 		b_offset = offset & 3;
12052 		b_count = 4 - b_offset;
12053 		if (b_count > len) {
12054 			/* i.e. offset=1 len=2 */
12055 			b_count = len;
12056 		}
12057 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12058 		if (ret)
12059 			goto eeprom_done;
12060 		memcpy(data, ((char *)&val) + b_offset, b_count);
12061 		len -= b_count;
12062 		offset += b_count;
12063 		eeprom->len += b_count;
12064 	}
12065 
12066 	/* read bytes up to the last 4 byte boundary */
12067 	pd = &data[eeprom->len];
12068 	for (i = 0; i < (len - (len & 3)); i += 4) {
12069 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
12070 		if (ret) {
12071 			if (i)
12072 				i -= 4;
12073 			eeprom->len += i;
12074 			goto eeprom_done;
12075 		}
12076 		memcpy(pd + i, &val, 4);
12077 		if (need_resched()) {
12078 			if (signal_pending(current)) {
12079 				eeprom->len += i;
12080 				ret = -EINTR;
12081 				goto eeprom_done;
12082 			}
12083 			cond_resched();
12084 		}
12085 	}
12086 	eeprom->len += i;
12087 
12088 	if (len & 3) {
12089 		/* read last bytes not ending on 4 byte boundary */
12090 		pd = &data[eeprom->len];
12091 		b_count = len & 3;
12092 		b_offset = offset + len - b_count;
12093 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
12094 		if (ret)
12095 			goto eeprom_done;
12096 		memcpy(pd, &val, b_count);
12097 		eeprom->len += b_count;
12098 	}
12099 	ret = 0;
12100 
12101 eeprom_done:
12102 	/* Restore clock, link aware and link idle modes */
12103 	tg3_restore_clk(tp);
12104 	if (cpmu_restore)
12105 		tw32(TG3_CPMU_CTRL, cpmu_val);
12106 
12107 	return ret;
12108 }
12109 
tg3_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12110 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12111 {
12112 	struct tg3 *tp = netdev_priv(dev);
12113 	int ret;
12114 	u32 offset, len, b_offset, odd_len;
12115 	u8 *buf;
12116 	__be32 start = 0, end;
12117 
12118 	if (tg3_flag(tp, NO_NVRAM) ||
12119 	    eeprom->magic != TG3_EEPROM_MAGIC)
12120 		return -EINVAL;
12121 
12122 	offset = eeprom->offset;
12123 	len = eeprom->len;
12124 
12125 	if ((b_offset = (offset & 3))) {
12126 		/* adjustments to start on required 4 byte boundary */
12127 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12128 		if (ret)
12129 			return ret;
12130 		len += b_offset;
12131 		offset &= ~3;
12132 		if (len < 4)
12133 			len = 4;
12134 	}
12135 
12136 	odd_len = 0;
12137 	if (len & 3) {
12138 		/* adjustments to end on required 4 byte boundary */
12139 		odd_len = 1;
12140 		len = (len + 3) & ~3;
12141 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12142 		if (ret)
12143 			return ret;
12144 	}
12145 
12146 	buf = data;
12147 	if (b_offset || odd_len) {
12148 		buf = kmalloc(len, GFP_KERNEL);
12149 		if (!buf)
12150 			return -ENOMEM;
12151 		if (b_offset)
12152 			memcpy(buf, &start, 4);
12153 		if (odd_len)
12154 			memcpy(buf+len-4, &end, 4);
12155 		memcpy(buf + b_offset, data, eeprom->len);
12156 	}
12157 
12158 	ret = tg3_nvram_write_block(tp, offset, len, buf);
12159 
12160 	if (buf != data)
12161 		kfree(buf);
12162 
12163 	return ret;
12164 }
12165 
tg3_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)12166 static int tg3_get_link_ksettings(struct net_device *dev,
12167 				  struct ethtool_link_ksettings *cmd)
12168 {
12169 	struct tg3 *tp = netdev_priv(dev);
12170 	u32 supported, advertising;
12171 
12172 	if (tg3_flag(tp, USE_PHYLIB)) {
12173 		struct phy_device *phydev;
12174 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12175 			return -EAGAIN;
12176 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12177 		phy_ethtool_ksettings_get(phydev, cmd);
12178 
12179 		return 0;
12180 	}
12181 
12182 	supported = (SUPPORTED_Autoneg);
12183 
12184 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12185 		supported |= (SUPPORTED_1000baseT_Half |
12186 			      SUPPORTED_1000baseT_Full);
12187 
12188 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12189 		supported |= (SUPPORTED_100baseT_Half |
12190 			      SUPPORTED_100baseT_Full |
12191 			      SUPPORTED_10baseT_Half |
12192 			      SUPPORTED_10baseT_Full |
12193 			      SUPPORTED_TP);
12194 		cmd->base.port = PORT_TP;
12195 	} else {
12196 		supported |= SUPPORTED_FIBRE;
12197 		cmd->base.port = PORT_FIBRE;
12198 	}
12199 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12200 						supported);
12201 
12202 	advertising = tp->link_config.advertising;
12203 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
12204 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12205 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12206 				advertising |= ADVERTISED_Pause;
12207 			} else {
12208 				advertising |= ADVERTISED_Pause |
12209 					ADVERTISED_Asym_Pause;
12210 			}
12211 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12212 			advertising |= ADVERTISED_Asym_Pause;
12213 		}
12214 	}
12215 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12216 						advertising);
12217 
12218 	if (netif_running(dev) && tp->link_up) {
12219 		cmd->base.speed = tp->link_config.active_speed;
12220 		cmd->base.duplex = tp->link_config.active_duplex;
12221 		ethtool_convert_legacy_u32_to_link_mode(
12222 			cmd->link_modes.lp_advertising,
12223 			tp->link_config.rmt_adv);
12224 
12225 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12226 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12227 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12228 			else
12229 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
12230 		}
12231 	} else {
12232 		cmd->base.speed = SPEED_UNKNOWN;
12233 		cmd->base.duplex = DUPLEX_UNKNOWN;
12234 		cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12235 	}
12236 	cmd->base.phy_address = tp->phy_addr;
12237 	cmd->base.autoneg = tp->link_config.autoneg;
12238 	return 0;
12239 }
12240 
tg3_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)12241 static int tg3_set_link_ksettings(struct net_device *dev,
12242 				  const struct ethtool_link_ksettings *cmd)
12243 {
12244 	struct tg3 *tp = netdev_priv(dev);
12245 	u32 speed = cmd->base.speed;
12246 	u32 advertising;
12247 
12248 	if (tg3_flag(tp, USE_PHYLIB)) {
12249 		struct phy_device *phydev;
12250 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12251 			return -EAGAIN;
12252 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12253 		return phy_ethtool_ksettings_set(phydev, cmd);
12254 	}
12255 
12256 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
12257 	    cmd->base.autoneg != AUTONEG_DISABLE)
12258 		return -EINVAL;
12259 
12260 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
12261 	    cmd->base.duplex != DUPLEX_FULL &&
12262 	    cmd->base.duplex != DUPLEX_HALF)
12263 		return -EINVAL;
12264 
12265 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
12266 						cmd->link_modes.advertising);
12267 
12268 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12269 		u32 mask = ADVERTISED_Autoneg |
12270 			   ADVERTISED_Pause |
12271 			   ADVERTISED_Asym_Pause;
12272 
12273 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12274 			mask |= ADVERTISED_1000baseT_Half |
12275 				ADVERTISED_1000baseT_Full;
12276 
12277 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12278 			mask |= ADVERTISED_100baseT_Half |
12279 				ADVERTISED_100baseT_Full |
12280 				ADVERTISED_10baseT_Half |
12281 				ADVERTISED_10baseT_Full |
12282 				ADVERTISED_TP;
12283 		else
12284 			mask |= ADVERTISED_FIBRE;
12285 
12286 		if (advertising & ~mask)
12287 			return -EINVAL;
12288 
12289 		mask &= (ADVERTISED_1000baseT_Half |
12290 			 ADVERTISED_1000baseT_Full |
12291 			 ADVERTISED_100baseT_Half |
12292 			 ADVERTISED_100baseT_Full |
12293 			 ADVERTISED_10baseT_Half |
12294 			 ADVERTISED_10baseT_Full);
12295 
12296 		advertising &= mask;
12297 	} else {
12298 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12299 			if (speed != SPEED_1000)
12300 				return -EINVAL;
12301 
12302 			if (cmd->base.duplex != DUPLEX_FULL)
12303 				return -EINVAL;
12304 		} else {
12305 			if (speed != SPEED_100 &&
12306 			    speed != SPEED_10)
12307 				return -EINVAL;
12308 		}
12309 	}
12310 
12311 	tg3_full_lock(tp, 0);
12312 
12313 	tp->link_config.autoneg = cmd->base.autoneg;
12314 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12315 		tp->link_config.advertising = (advertising |
12316 					      ADVERTISED_Autoneg);
12317 		tp->link_config.speed = SPEED_UNKNOWN;
12318 		tp->link_config.duplex = DUPLEX_UNKNOWN;
12319 	} else {
12320 		tp->link_config.advertising = 0;
12321 		tp->link_config.speed = speed;
12322 		tp->link_config.duplex = cmd->base.duplex;
12323 	}
12324 
12325 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12326 
12327 	tg3_warn_mgmt_link_flap(tp);
12328 
12329 	if (netif_running(dev))
12330 		tg3_setup_phy(tp, true);
12331 
12332 	tg3_full_unlock(tp);
12333 
12334 	return 0;
12335 }
12336 
tg3_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)12337 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12338 {
12339 	struct tg3 *tp = netdev_priv(dev);
12340 
12341 	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12342 	strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12343 	strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12344 }
12345 
tg3_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12346 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12347 {
12348 	struct tg3 *tp = netdev_priv(dev);
12349 
12350 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12351 		wol->supported = WAKE_MAGIC;
12352 	else
12353 		wol->supported = 0;
12354 	wol->wolopts = 0;
12355 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12356 		wol->wolopts = WAKE_MAGIC;
12357 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12358 }
12359 
tg3_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12360 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12361 {
12362 	struct tg3 *tp = netdev_priv(dev);
12363 	struct device *dp = &tp->pdev->dev;
12364 
12365 	if (wol->wolopts & ~WAKE_MAGIC)
12366 		return -EINVAL;
12367 	if ((wol->wolopts & WAKE_MAGIC) &&
12368 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12369 		return -EINVAL;
12370 
12371 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12372 
12373 	if (device_may_wakeup(dp))
12374 		tg3_flag_set(tp, WOL_ENABLE);
12375 	else
12376 		tg3_flag_clear(tp, WOL_ENABLE);
12377 
12378 	return 0;
12379 }
12380 
tg3_get_msglevel(struct net_device * dev)12381 static u32 tg3_get_msglevel(struct net_device *dev)
12382 {
12383 	struct tg3 *tp = netdev_priv(dev);
12384 	return tp->msg_enable;
12385 }
12386 
tg3_set_msglevel(struct net_device * dev,u32 value)12387 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12388 {
12389 	struct tg3 *tp = netdev_priv(dev);
12390 	tp->msg_enable = value;
12391 }
12392 
tg3_nway_reset(struct net_device * dev)12393 static int tg3_nway_reset(struct net_device *dev)
12394 {
12395 	struct tg3 *tp = netdev_priv(dev);
12396 	int r;
12397 
12398 	if (!netif_running(dev))
12399 		return -EAGAIN;
12400 
12401 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12402 		return -EINVAL;
12403 
12404 	tg3_warn_mgmt_link_flap(tp);
12405 
12406 	if (tg3_flag(tp, USE_PHYLIB)) {
12407 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12408 			return -EAGAIN;
12409 		r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12410 	} else {
12411 		u32 bmcr;
12412 
12413 		spin_lock_bh(&tp->lock);
12414 		r = -EINVAL;
12415 		tg3_readphy(tp, MII_BMCR, &bmcr);
12416 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12417 		    ((bmcr & BMCR_ANENABLE) ||
12418 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12419 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12420 						   BMCR_ANENABLE);
12421 			r = 0;
12422 		}
12423 		spin_unlock_bh(&tp->lock);
12424 	}
12425 
12426 	return r;
12427 }
12428 
tg3_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12429 static void tg3_get_ringparam(struct net_device *dev,
12430 			      struct ethtool_ringparam *ering,
12431 			      struct kernel_ethtool_ringparam *kernel_ering,
12432 			      struct netlink_ext_ack *extack)
12433 {
12434 	struct tg3 *tp = netdev_priv(dev);
12435 
12436 	ering->rx_max_pending = tp->rx_std_ring_mask;
12437 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12438 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12439 	else
12440 		ering->rx_jumbo_max_pending = 0;
12441 
12442 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12443 
12444 	ering->rx_pending = tp->rx_pending;
12445 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12446 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12447 	else
12448 		ering->rx_jumbo_pending = 0;
12449 
12450 	ering->tx_pending = tp->napi[0].tx_pending;
12451 }
12452 
tg3_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12453 static int tg3_set_ringparam(struct net_device *dev,
12454 			     struct ethtool_ringparam *ering,
12455 			     struct kernel_ethtool_ringparam *kernel_ering,
12456 			     struct netlink_ext_ack *extack)
12457 {
12458 	struct tg3 *tp = netdev_priv(dev);
12459 	int i, irq_sync = 0, err = 0;
12460 	bool reset_phy = false;
12461 
12462 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12463 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12464 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12465 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12466 	    (tg3_flag(tp, TSO_BUG) &&
12467 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12468 		return -EINVAL;
12469 
12470 	if (netif_running(dev)) {
12471 		tg3_phy_stop(tp);
12472 		tg3_netif_stop(tp);
12473 		irq_sync = 1;
12474 	}
12475 
12476 	tg3_full_lock(tp, irq_sync);
12477 
12478 	tp->rx_pending = ering->rx_pending;
12479 
12480 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12481 	    tp->rx_pending > 63)
12482 		tp->rx_pending = 63;
12483 
12484 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12485 		tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12486 
12487 	for (i = 0; i < tp->irq_max; i++)
12488 		tp->napi[i].tx_pending = ering->tx_pending;
12489 
12490 	if (netif_running(dev)) {
12491 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12492 		/* Reset PHY to avoid PHY lock up */
12493 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12494 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12495 		    tg3_asic_rev(tp) == ASIC_REV_5720)
12496 			reset_phy = true;
12497 
12498 		err = tg3_restart_hw(tp, reset_phy);
12499 		if (!err)
12500 			tg3_netif_start(tp);
12501 	}
12502 
12503 	tg3_full_unlock(tp);
12504 
12505 	if (irq_sync && !err)
12506 		tg3_phy_start(tp);
12507 
12508 	return err;
12509 }
12510 
tg3_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12511 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12512 {
12513 	struct tg3 *tp = netdev_priv(dev);
12514 
12515 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12516 
12517 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12518 		epause->rx_pause = 1;
12519 	else
12520 		epause->rx_pause = 0;
12521 
12522 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12523 		epause->tx_pause = 1;
12524 	else
12525 		epause->tx_pause = 0;
12526 }
12527 
tg3_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12528 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12529 {
12530 	struct tg3 *tp = netdev_priv(dev);
12531 	int err = 0;
12532 	bool reset_phy = false;
12533 
12534 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12535 		tg3_warn_mgmt_link_flap(tp);
12536 
12537 	if (tg3_flag(tp, USE_PHYLIB)) {
12538 		struct phy_device *phydev;
12539 
12540 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12541 
12542 		if (!phy_validate_pause(phydev, epause))
12543 			return -EINVAL;
12544 
12545 		tp->link_config.flowctrl = 0;
12546 		phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12547 		if (epause->rx_pause) {
12548 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12549 
12550 			if (epause->tx_pause) {
12551 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12552 			}
12553 		} else if (epause->tx_pause) {
12554 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12555 		}
12556 
12557 		if (epause->autoneg)
12558 			tg3_flag_set(tp, PAUSE_AUTONEG);
12559 		else
12560 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12561 
12562 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12563 			if (phydev->autoneg) {
12564 				/* phy_set_asym_pause() will
12565 				 * renegotiate the link to inform our
12566 				 * link partner of our flow control
12567 				 * settings, even if the flow control
12568 				 * is forced.  Let tg3_adjust_link()
12569 				 * do the final flow control setup.
12570 				 */
12571 				return 0;
12572 			}
12573 
12574 			if (!epause->autoneg)
12575 				tg3_setup_flow_control(tp, 0, 0);
12576 		}
12577 	} else {
12578 		int irq_sync = 0;
12579 
12580 		if (netif_running(dev)) {
12581 			tg3_netif_stop(tp);
12582 			irq_sync = 1;
12583 		}
12584 
12585 		tg3_full_lock(tp, irq_sync);
12586 
12587 		if (epause->autoneg)
12588 			tg3_flag_set(tp, PAUSE_AUTONEG);
12589 		else
12590 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12591 		if (epause->rx_pause)
12592 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12593 		else
12594 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12595 		if (epause->tx_pause)
12596 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12597 		else
12598 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12599 
12600 		if (netif_running(dev)) {
12601 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12602 			/* Reset PHY to avoid PHY lock up */
12603 			if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12604 			    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12605 			    tg3_asic_rev(tp) == ASIC_REV_5720)
12606 				reset_phy = true;
12607 
12608 			err = tg3_restart_hw(tp, reset_phy);
12609 			if (!err)
12610 				tg3_netif_start(tp);
12611 		}
12612 
12613 		tg3_full_unlock(tp);
12614 	}
12615 
12616 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12617 
12618 	return err;
12619 }
12620 
tg3_get_sset_count(struct net_device * dev,int sset)12621 static int tg3_get_sset_count(struct net_device *dev, int sset)
12622 {
12623 	switch (sset) {
12624 	case ETH_SS_TEST:
12625 		return TG3_NUM_TEST;
12626 	case ETH_SS_STATS:
12627 		return TG3_NUM_STATS;
12628 	default:
12629 		return -EOPNOTSUPP;
12630 	}
12631 }
12632 
tg3_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules __always_unused)12633 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12634 			 u32 *rules __always_unused)
12635 {
12636 	struct tg3 *tp = netdev_priv(dev);
12637 
12638 	if (!tg3_flag(tp, SUPPORT_MSIX))
12639 		return -EOPNOTSUPP;
12640 
12641 	switch (info->cmd) {
12642 	case ETHTOOL_GRXRINGS:
12643 		if (netif_running(tp->dev))
12644 			info->data = tp->rxq_cnt;
12645 		else {
12646 			info->data = num_online_cpus();
12647 			if (info->data > TG3_RSS_MAX_NUM_QS)
12648 				info->data = TG3_RSS_MAX_NUM_QS;
12649 		}
12650 
12651 		return 0;
12652 
12653 	default:
12654 		return -EOPNOTSUPP;
12655 	}
12656 }
12657 
tg3_get_rxfh_indir_size(struct net_device * dev)12658 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12659 {
12660 	u32 size = 0;
12661 	struct tg3 *tp = netdev_priv(dev);
12662 
12663 	if (tg3_flag(tp, SUPPORT_MSIX))
12664 		size = TG3_RSS_INDIR_TBL_SIZE;
12665 
12666 	return size;
12667 }
12668 
tg3_get_rxfh(struct net_device * dev,u32 * indir,u8 * key,u8 * hfunc)12669 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12670 {
12671 	struct tg3 *tp = netdev_priv(dev);
12672 	int i;
12673 
12674 	if (hfunc)
12675 		*hfunc = ETH_RSS_HASH_TOP;
12676 	if (!indir)
12677 		return 0;
12678 
12679 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12680 		indir[i] = tp->rss_ind_tbl[i];
12681 
12682 	return 0;
12683 }
12684 
tg3_set_rxfh(struct net_device * dev,const u32 * indir,const u8 * key,const u8 hfunc)12685 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12686 			const u8 hfunc)
12687 {
12688 	struct tg3 *tp = netdev_priv(dev);
12689 	size_t i;
12690 
12691 	/* We require at least one supported parameter to be changed and no
12692 	 * change in any of the unsupported parameters
12693 	 */
12694 	if (key ||
12695 	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12696 		return -EOPNOTSUPP;
12697 
12698 	if (!indir)
12699 		return 0;
12700 
12701 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12702 		tp->rss_ind_tbl[i] = indir[i];
12703 
12704 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12705 		return 0;
12706 
12707 	/* It is legal to write the indirection
12708 	 * table while the device is running.
12709 	 */
12710 	tg3_full_lock(tp, 0);
12711 	tg3_rss_write_indir_tbl(tp);
12712 	tg3_full_unlock(tp);
12713 
12714 	return 0;
12715 }
12716 
tg3_get_channels(struct net_device * dev,struct ethtool_channels * channel)12717 static void tg3_get_channels(struct net_device *dev,
12718 			     struct ethtool_channels *channel)
12719 {
12720 	struct tg3 *tp = netdev_priv(dev);
12721 	u32 deflt_qs = netif_get_num_default_rss_queues();
12722 
12723 	channel->max_rx = tp->rxq_max;
12724 	channel->max_tx = tp->txq_max;
12725 
12726 	if (netif_running(dev)) {
12727 		channel->rx_count = tp->rxq_cnt;
12728 		channel->tx_count = tp->txq_cnt;
12729 	} else {
12730 		if (tp->rxq_req)
12731 			channel->rx_count = tp->rxq_req;
12732 		else
12733 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12734 
12735 		if (tp->txq_req)
12736 			channel->tx_count = tp->txq_req;
12737 		else
12738 			channel->tx_count = min(deflt_qs, tp->txq_max);
12739 	}
12740 }
12741 
tg3_set_channels(struct net_device * dev,struct ethtool_channels * channel)12742 static int tg3_set_channels(struct net_device *dev,
12743 			    struct ethtool_channels *channel)
12744 {
12745 	struct tg3 *tp = netdev_priv(dev);
12746 
12747 	if (!tg3_flag(tp, SUPPORT_MSIX))
12748 		return -EOPNOTSUPP;
12749 
12750 	if (channel->rx_count > tp->rxq_max ||
12751 	    channel->tx_count > tp->txq_max)
12752 		return -EINVAL;
12753 
12754 	tp->rxq_req = channel->rx_count;
12755 	tp->txq_req = channel->tx_count;
12756 
12757 	if (!netif_running(dev))
12758 		return 0;
12759 
12760 	tg3_stop(tp);
12761 
12762 	tg3_carrier_off(tp);
12763 
12764 	tg3_start(tp, true, false, false);
12765 
12766 	return 0;
12767 }
12768 
tg3_get_strings(struct net_device * dev,u32 stringset,u8 * buf)12769 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12770 {
12771 	switch (stringset) {
12772 	case ETH_SS_STATS:
12773 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12774 		break;
12775 	case ETH_SS_TEST:
12776 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12777 		break;
12778 	default:
12779 		WARN_ON(1);	/* we need a WARN() */
12780 		break;
12781 	}
12782 }
12783 
tg3_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)12784 static int tg3_set_phys_id(struct net_device *dev,
12785 			    enum ethtool_phys_id_state state)
12786 {
12787 	struct tg3 *tp = netdev_priv(dev);
12788 
12789 	switch (state) {
12790 	case ETHTOOL_ID_ACTIVE:
12791 		return 1;	/* cycle on/off once per second */
12792 
12793 	case ETHTOOL_ID_ON:
12794 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12795 		     LED_CTRL_1000MBPS_ON |
12796 		     LED_CTRL_100MBPS_ON |
12797 		     LED_CTRL_10MBPS_ON |
12798 		     LED_CTRL_TRAFFIC_OVERRIDE |
12799 		     LED_CTRL_TRAFFIC_BLINK |
12800 		     LED_CTRL_TRAFFIC_LED);
12801 		break;
12802 
12803 	case ETHTOOL_ID_OFF:
12804 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12805 		     LED_CTRL_TRAFFIC_OVERRIDE);
12806 		break;
12807 
12808 	case ETHTOOL_ID_INACTIVE:
12809 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12810 		break;
12811 	}
12812 
12813 	return 0;
12814 }
12815 
tg3_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)12816 static void tg3_get_ethtool_stats(struct net_device *dev,
12817 				   struct ethtool_stats *estats, u64 *tmp_stats)
12818 {
12819 	struct tg3 *tp = netdev_priv(dev);
12820 
12821 	if (tp->hw_stats)
12822 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12823 	else
12824 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12825 }
12826 
tg3_vpd_readblock(struct tg3 * tp,unsigned int * vpdlen)12827 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12828 {
12829 	int i;
12830 	__be32 *buf;
12831 	u32 offset = 0, len = 0;
12832 	u32 magic, val;
12833 
12834 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12835 		return NULL;
12836 
12837 	if (magic == TG3_EEPROM_MAGIC) {
12838 		for (offset = TG3_NVM_DIR_START;
12839 		     offset < TG3_NVM_DIR_END;
12840 		     offset += TG3_NVM_DIRENT_SIZE) {
12841 			if (tg3_nvram_read(tp, offset, &val))
12842 				return NULL;
12843 
12844 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12845 			    TG3_NVM_DIRTYPE_EXTVPD)
12846 				break;
12847 		}
12848 
12849 		if (offset != TG3_NVM_DIR_END) {
12850 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12851 			if (tg3_nvram_read(tp, offset + 4, &offset))
12852 				return NULL;
12853 
12854 			offset = tg3_nvram_logical_addr(tp, offset);
12855 		}
12856 
12857 		if (!offset || !len) {
12858 			offset = TG3_NVM_VPD_OFF;
12859 			len = TG3_NVM_VPD_LEN;
12860 		}
12861 
12862 		buf = kmalloc(len, GFP_KERNEL);
12863 		if (!buf)
12864 			return NULL;
12865 
12866 		for (i = 0; i < len; i += 4) {
12867 			/* The data is in little-endian format in NVRAM.
12868 			 * Use the big-endian read routines to preserve
12869 			 * the byte order as it exists in NVRAM.
12870 			 */
12871 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12872 				goto error;
12873 		}
12874 		*vpdlen = len;
12875 	} else {
12876 		buf = pci_vpd_alloc(tp->pdev, vpdlen);
12877 		if (IS_ERR(buf))
12878 			return NULL;
12879 	}
12880 
12881 	return buf;
12882 
12883 error:
12884 	kfree(buf);
12885 	return NULL;
12886 }
12887 
12888 #define NVRAM_TEST_SIZE 0x100
12889 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12890 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12891 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12892 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12893 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12894 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12895 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12896 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12897 
tg3_test_nvram(struct tg3 * tp)12898 static int tg3_test_nvram(struct tg3 *tp)
12899 {
12900 	u32 csum, magic;
12901 	__be32 *buf;
12902 	int i, j, k, err = 0, size;
12903 	unsigned int len;
12904 
12905 	if (tg3_flag(tp, NO_NVRAM))
12906 		return 0;
12907 
12908 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12909 		return -EIO;
12910 
12911 	if (magic == TG3_EEPROM_MAGIC)
12912 		size = NVRAM_TEST_SIZE;
12913 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12914 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12915 		    TG3_EEPROM_SB_FORMAT_1) {
12916 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12917 			case TG3_EEPROM_SB_REVISION_0:
12918 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12919 				break;
12920 			case TG3_EEPROM_SB_REVISION_2:
12921 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12922 				break;
12923 			case TG3_EEPROM_SB_REVISION_3:
12924 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12925 				break;
12926 			case TG3_EEPROM_SB_REVISION_4:
12927 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12928 				break;
12929 			case TG3_EEPROM_SB_REVISION_5:
12930 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12931 				break;
12932 			case TG3_EEPROM_SB_REVISION_6:
12933 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12934 				break;
12935 			default:
12936 				return -EIO;
12937 			}
12938 		} else
12939 			return 0;
12940 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12941 		size = NVRAM_SELFBOOT_HW_SIZE;
12942 	else
12943 		return -EIO;
12944 
12945 	buf = kmalloc(size, GFP_KERNEL);
12946 	if (buf == NULL)
12947 		return -ENOMEM;
12948 
12949 	err = -EIO;
12950 	for (i = 0, j = 0; i < size; i += 4, j++) {
12951 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12952 		if (err)
12953 			break;
12954 	}
12955 	if (i < size)
12956 		goto out;
12957 
12958 	/* Selfboot format */
12959 	magic = be32_to_cpu(buf[0]);
12960 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12961 	    TG3_EEPROM_MAGIC_FW) {
12962 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12963 
12964 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12965 		    TG3_EEPROM_SB_REVISION_2) {
12966 			/* For rev 2, the csum doesn't include the MBA. */
12967 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12968 				csum8 += buf8[i];
12969 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12970 				csum8 += buf8[i];
12971 		} else {
12972 			for (i = 0; i < size; i++)
12973 				csum8 += buf8[i];
12974 		}
12975 
12976 		if (csum8 == 0) {
12977 			err = 0;
12978 			goto out;
12979 		}
12980 
12981 		err = -EIO;
12982 		goto out;
12983 	}
12984 
12985 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12986 	    TG3_EEPROM_MAGIC_HW) {
12987 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12988 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12989 		u8 *buf8 = (u8 *) buf;
12990 
12991 		/* Separate the parity bits and the data bytes.  */
12992 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12993 			if ((i == 0) || (i == 8)) {
12994 				int l;
12995 				u8 msk;
12996 
12997 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12998 					parity[k++] = buf8[i] & msk;
12999 				i++;
13000 			} else if (i == 16) {
13001 				int l;
13002 				u8 msk;
13003 
13004 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
13005 					parity[k++] = buf8[i] & msk;
13006 				i++;
13007 
13008 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13009 					parity[k++] = buf8[i] & msk;
13010 				i++;
13011 			}
13012 			data[j++] = buf8[i];
13013 		}
13014 
13015 		err = -EIO;
13016 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13017 			u8 hw8 = hweight8(data[i]);
13018 
13019 			if ((hw8 & 0x1) && parity[i])
13020 				goto out;
13021 			else if (!(hw8 & 0x1) && !parity[i])
13022 				goto out;
13023 		}
13024 		err = 0;
13025 		goto out;
13026 	}
13027 
13028 	err = -EIO;
13029 
13030 	/* Bootstrap checksum at offset 0x10 */
13031 	csum = calc_crc((unsigned char *) buf, 0x10);
13032 	if (csum != le32_to_cpu(buf[0x10/4]))
13033 		goto out;
13034 
13035 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13036 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13037 	if (csum != le32_to_cpu(buf[0xfc/4]))
13038 		goto out;
13039 
13040 	kfree(buf);
13041 
13042 	buf = tg3_vpd_readblock(tp, &len);
13043 	if (!buf)
13044 		return -ENOMEM;
13045 
13046 	err = pci_vpd_check_csum(buf, len);
13047 	/* go on if no checksum found */
13048 	if (err == 1)
13049 		err = 0;
13050 out:
13051 	kfree(buf);
13052 	return err;
13053 }
13054 
13055 #define TG3_SERDES_TIMEOUT_SEC	2
13056 #define TG3_COPPER_TIMEOUT_SEC	6
13057 
tg3_test_link(struct tg3 * tp)13058 static int tg3_test_link(struct tg3 *tp)
13059 {
13060 	int i, max;
13061 
13062 	if (!netif_running(tp->dev))
13063 		return -ENODEV;
13064 
13065 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13066 		max = TG3_SERDES_TIMEOUT_SEC;
13067 	else
13068 		max = TG3_COPPER_TIMEOUT_SEC;
13069 
13070 	for (i = 0; i < max; i++) {
13071 		if (tp->link_up)
13072 			return 0;
13073 
13074 		if (msleep_interruptible(1000))
13075 			break;
13076 	}
13077 
13078 	return -EIO;
13079 }
13080 
13081 /* Only test the commonly used registers */
tg3_test_registers(struct tg3 * tp)13082 static int tg3_test_registers(struct tg3 *tp)
13083 {
13084 	int i, is_5705, is_5750;
13085 	u32 offset, read_mask, write_mask, val, save_val, read_val;
13086 	static struct {
13087 		u16 offset;
13088 		u16 flags;
13089 #define TG3_FL_5705	0x1
13090 #define TG3_FL_NOT_5705	0x2
13091 #define TG3_FL_NOT_5788	0x4
13092 #define TG3_FL_NOT_5750	0x8
13093 		u32 read_mask;
13094 		u32 write_mask;
13095 	} reg_tbl[] = {
13096 		/* MAC Control Registers */
13097 		{ MAC_MODE, TG3_FL_NOT_5705,
13098 			0x00000000, 0x00ef6f8c },
13099 		{ MAC_MODE, TG3_FL_5705,
13100 			0x00000000, 0x01ef6b8c },
13101 		{ MAC_STATUS, TG3_FL_NOT_5705,
13102 			0x03800107, 0x00000000 },
13103 		{ MAC_STATUS, TG3_FL_5705,
13104 			0x03800100, 0x00000000 },
13105 		{ MAC_ADDR_0_HIGH, 0x0000,
13106 			0x00000000, 0x0000ffff },
13107 		{ MAC_ADDR_0_LOW, 0x0000,
13108 			0x00000000, 0xffffffff },
13109 		{ MAC_RX_MTU_SIZE, 0x0000,
13110 			0x00000000, 0x0000ffff },
13111 		{ MAC_TX_MODE, 0x0000,
13112 			0x00000000, 0x00000070 },
13113 		{ MAC_TX_LENGTHS, 0x0000,
13114 			0x00000000, 0x00003fff },
13115 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
13116 			0x00000000, 0x000007fc },
13117 		{ MAC_RX_MODE, TG3_FL_5705,
13118 			0x00000000, 0x000007dc },
13119 		{ MAC_HASH_REG_0, 0x0000,
13120 			0x00000000, 0xffffffff },
13121 		{ MAC_HASH_REG_1, 0x0000,
13122 			0x00000000, 0xffffffff },
13123 		{ MAC_HASH_REG_2, 0x0000,
13124 			0x00000000, 0xffffffff },
13125 		{ MAC_HASH_REG_3, 0x0000,
13126 			0x00000000, 0xffffffff },
13127 
13128 		/* Receive Data and Receive BD Initiator Control Registers. */
13129 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13130 			0x00000000, 0xffffffff },
13131 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13132 			0x00000000, 0xffffffff },
13133 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13134 			0x00000000, 0x00000003 },
13135 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13136 			0x00000000, 0xffffffff },
13137 		{ RCVDBDI_STD_BD+0, 0x0000,
13138 			0x00000000, 0xffffffff },
13139 		{ RCVDBDI_STD_BD+4, 0x0000,
13140 			0x00000000, 0xffffffff },
13141 		{ RCVDBDI_STD_BD+8, 0x0000,
13142 			0x00000000, 0xffff0002 },
13143 		{ RCVDBDI_STD_BD+0xc, 0x0000,
13144 			0x00000000, 0xffffffff },
13145 
13146 		/* Receive BD Initiator Control Registers. */
13147 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13148 			0x00000000, 0xffffffff },
13149 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
13150 			0x00000000, 0x000003ff },
13151 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13152 			0x00000000, 0xffffffff },
13153 
13154 		/* Host Coalescing Control Registers. */
13155 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
13156 			0x00000000, 0x00000004 },
13157 		{ HOSTCC_MODE, TG3_FL_5705,
13158 			0x00000000, 0x000000f6 },
13159 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13160 			0x00000000, 0xffffffff },
13161 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13162 			0x00000000, 0x000003ff },
13163 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13164 			0x00000000, 0xffffffff },
13165 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13166 			0x00000000, 0x000003ff },
13167 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13168 			0x00000000, 0xffffffff },
13169 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13170 			0x00000000, 0x000000ff },
13171 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13172 			0x00000000, 0xffffffff },
13173 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13174 			0x00000000, 0x000000ff },
13175 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13176 			0x00000000, 0xffffffff },
13177 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13178 			0x00000000, 0xffffffff },
13179 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13180 			0x00000000, 0xffffffff },
13181 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13182 			0x00000000, 0x000000ff },
13183 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13184 			0x00000000, 0xffffffff },
13185 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13186 			0x00000000, 0x000000ff },
13187 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13188 			0x00000000, 0xffffffff },
13189 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13190 			0x00000000, 0xffffffff },
13191 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13192 			0x00000000, 0xffffffff },
13193 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13194 			0x00000000, 0xffffffff },
13195 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13196 			0x00000000, 0xffffffff },
13197 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13198 			0xffffffff, 0x00000000 },
13199 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13200 			0xffffffff, 0x00000000 },
13201 
13202 		/* Buffer Manager Control Registers. */
13203 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13204 			0x00000000, 0x007fff80 },
13205 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13206 			0x00000000, 0x007fffff },
13207 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13208 			0x00000000, 0x0000003f },
13209 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13210 			0x00000000, 0x000001ff },
13211 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
13212 			0x00000000, 0x000001ff },
13213 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13214 			0xffffffff, 0x00000000 },
13215 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13216 			0xffffffff, 0x00000000 },
13217 
13218 		/* Mailbox Registers */
13219 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13220 			0x00000000, 0x000001ff },
13221 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13222 			0x00000000, 0x000001ff },
13223 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13224 			0x00000000, 0x000007ff },
13225 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13226 			0x00000000, 0x000001ff },
13227 
13228 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
13229 	};
13230 
13231 	is_5705 = is_5750 = 0;
13232 	if (tg3_flag(tp, 5705_PLUS)) {
13233 		is_5705 = 1;
13234 		if (tg3_flag(tp, 5750_PLUS))
13235 			is_5750 = 1;
13236 	}
13237 
13238 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13239 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13240 			continue;
13241 
13242 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13243 			continue;
13244 
13245 		if (tg3_flag(tp, IS_5788) &&
13246 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13247 			continue;
13248 
13249 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13250 			continue;
13251 
13252 		offset = (u32) reg_tbl[i].offset;
13253 		read_mask = reg_tbl[i].read_mask;
13254 		write_mask = reg_tbl[i].write_mask;
13255 
13256 		/* Save the original register content */
13257 		save_val = tr32(offset);
13258 
13259 		/* Determine the read-only value. */
13260 		read_val = save_val & read_mask;
13261 
13262 		/* Write zero to the register, then make sure the read-only bits
13263 		 * are not changed and the read/write bits are all zeros.
13264 		 */
13265 		tw32(offset, 0);
13266 
13267 		val = tr32(offset);
13268 
13269 		/* Test the read-only and read/write bits. */
13270 		if (((val & read_mask) != read_val) || (val & write_mask))
13271 			goto out;
13272 
13273 		/* Write ones to all the bits defined by RdMask and WrMask, then
13274 		 * make sure the read-only bits are not changed and the
13275 		 * read/write bits are all ones.
13276 		 */
13277 		tw32(offset, read_mask | write_mask);
13278 
13279 		val = tr32(offset);
13280 
13281 		/* Test the read-only bits. */
13282 		if ((val & read_mask) != read_val)
13283 			goto out;
13284 
13285 		/* Test the read/write bits. */
13286 		if ((val & write_mask) != write_mask)
13287 			goto out;
13288 
13289 		tw32(offset, save_val);
13290 	}
13291 
13292 	return 0;
13293 
13294 out:
13295 	if (netif_msg_hw(tp))
13296 		netdev_err(tp->dev,
13297 			   "Register test failed at offset %x\n", offset);
13298 	tw32(offset, save_val);
13299 	return -EIO;
13300 }
13301 
tg3_do_mem_test(struct tg3 * tp,u32 offset,u32 len)13302 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13303 {
13304 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13305 	int i;
13306 	u32 j;
13307 
13308 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13309 		for (j = 0; j < len; j += 4) {
13310 			u32 val;
13311 
13312 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13313 			tg3_read_mem(tp, offset + j, &val);
13314 			if (val != test_pattern[i])
13315 				return -EIO;
13316 		}
13317 	}
13318 	return 0;
13319 }
13320 
tg3_test_memory(struct tg3 * tp)13321 static int tg3_test_memory(struct tg3 *tp)
13322 {
13323 	static struct mem_entry {
13324 		u32 offset;
13325 		u32 len;
13326 	} mem_tbl_570x[] = {
13327 		{ 0x00000000, 0x00b50},
13328 		{ 0x00002000, 0x1c000},
13329 		{ 0xffffffff, 0x00000}
13330 	}, mem_tbl_5705[] = {
13331 		{ 0x00000100, 0x0000c},
13332 		{ 0x00000200, 0x00008},
13333 		{ 0x00004000, 0x00800},
13334 		{ 0x00006000, 0x01000},
13335 		{ 0x00008000, 0x02000},
13336 		{ 0x00010000, 0x0e000},
13337 		{ 0xffffffff, 0x00000}
13338 	}, mem_tbl_5755[] = {
13339 		{ 0x00000200, 0x00008},
13340 		{ 0x00004000, 0x00800},
13341 		{ 0x00006000, 0x00800},
13342 		{ 0x00008000, 0x02000},
13343 		{ 0x00010000, 0x0c000},
13344 		{ 0xffffffff, 0x00000}
13345 	}, mem_tbl_5906[] = {
13346 		{ 0x00000200, 0x00008},
13347 		{ 0x00004000, 0x00400},
13348 		{ 0x00006000, 0x00400},
13349 		{ 0x00008000, 0x01000},
13350 		{ 0x00010000, 0x01000},
13351 		{ 0xffffffff, 0x00000}
13352 	}, mem_tbl_5717[] = {
13353 		{ 0x00000200, 0x00008},
13354 		{ 0x00010000, 0x0a000},
13355 		{ 0x00020000, 0x13c00},
13356 		{ 0xffffffff, 0x00000}
13357 	}, mem_tbl_57765[] = {
13358 		{ 0x00000200, 0x00008},
13359 		{ 0x00004000, 0x00800},
13360 		{ 0x00006000, 0x09800},
13361 		{ 0x00010000, 0x0a000},
13362 		{ 0xffffffff, 0x00000}
13363 	};
13364 	struct mem_entry *mem_tbl;
13365 	int err = 0;
13366 	int i;
13367 
13368 	if (tg3_flag(tp, 5717_PLUS))
13369 		mem_tbl = mem_tbl_5717;
13370 	else if (tg3_flag(tp, 57765_CLASS) ||
13371 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13372 		mem_tbl = mem_tbl_57765;
13373 	else if (tg3_flag(tp, 5755_PLUS))
13374 		mem_tbl = mem_tbl_5755;
13375 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13376 		mem_tbl = mem_tbl_5906;
13377 	else if (tg3_flag(tp, 5705_PLUS))
13378 		mem_tbl = mem_tbl_5705;
13379 	else
13380 		mem_tbl = mem_tbl_570x;
13381 
13382 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13383 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13384 		if (err)
13385 			break;
13386 	}
13387 
13388 	return err;
13389 }
13390 
13391 #define TG3_TSO_MSS		500
13392 
13393 #define TG3_TSO_IP_HDR_LEN	20
13394 #define TG3_TSO_TCP_HDR_LEN	20
13395 #define TG3_TSO_TCP_OPT_LEN	12
13396 
13397 static const u8 tg3_tso_header[] = {
13398 0x08, 0x00,
13399 0x45, 0x00, 0x00, 0x00,
13400 0x00, 0x00, 0x40, 0x00,
13401 0x40, 0x06, 0x00, 0x00,
13402 0x0a, 0x00, 0x00, 0x01,
13403 0x0a, 0x00, 0x00, 0x02,
13404 0x0d, 0x00, 0xe0, 0x00,
13405 0x00, 0x00, 0x01, 0x00,
13406 0x00, 0x00, 0x02, 0x00,
13407 0x80, 0x10, 0x10, 0x00,
13408 0x14, 0x09, 0x00, 0x00,
13409 0x01, 0x01, 0x08, 0x0a,
13410 0x11, 0x11, 0x11, 0x11,
13411 0x11, 0x11, 0x11, 0x11,
13412 };
13413 
tg3_run_loopback(struct tg3 * tp,u32 pktsz,bool tso_loopback)13414 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13415 {
13416 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13417 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13418 	u32 budget;
13419 	struct sk_buff *skb;
13420 	u8 *tx_data, *rx_data;
13421 	dma_addr_t map;
13422 	int num_pkts, tx_len, rx_len, i, err;
13423 	struct tg3_rx_buffer_desc *desc;
13424 	struct tg3_napi *tnapi, *rnapi;
13425 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13426 
13427 	tnapi = &tp->napi[0];
13428 	rnapi = &tp->napi[0];
13429 	if (tp->irq_cnt > 1) {
13430 		if (tg3_flag(tp, ENABLE_RSS))
13431 			rnapi = &tp->napi[1];
13432 		if (tg3_flag(tp, ENABLE_TSS))
13433 			tnapi = &tp->napi[1];
13434 	}
13435 	coal_now = tnapi->coal_now | rnapi->coal_now;
13436 
13437 	err = -EIO;
13438 
13439 	tx_len = pktsz;
13440 	skb = netdev_alloc_skb(tp->dev, tx_len);
13441 	if (!skb)
13442 		return -ENOMEM;
13443 
13444 	tx_data = skb_put(skb, tx_len);
13445 	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13446 	memset(tx_data + ETH_ALEN, 0x0, 8);
13447 
13448 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13449 
13450 	if (tso_loopback) {
13451 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13452 
13453 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13454 			      TG3_TSO_TCP_OPT_LEN;
13455 
13456 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13457 		       sizeof(tg3_tso_header));
13458 		mss = TG3_TSO_MSS;
13459 
13460 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13461 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13462 
13463 		/* Set the total length field in the IP header */
13464 		iph->tot_len = htons((u16)(mss + hdr_len));
13465 
13466 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13467 			      TXD_FLAG_CPU_POST_DMA);
13468 
13469 		if (tg3_flag(tp, HW_TSO_1) ||
13470 		    tg3_flag(tp, HW_TSO_2) ||
13471 		    tg3_flag(tp, HW_TSO_3)) {
13472 			struct tcphdr *th;
13473 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13474 			th = (struct tcphdr *)&tx_data[val];
13475 			th->check = 0;
13476 		} else
13477 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13478 
13479 		if (tg3_flag(tp, HW_TSO_3)) {
13480 			mss |= (hdr_len & 0xc) << 12;
13481 			if (hdr_len & 0x10)
13482 				base_flags |= 0x00000010;
13483 			base_flags |= (hdr_len & 0x3e0) << 5;
13484 		} else if (tg3_flag(tp, HW_TSO_2))
13485 			mss |= hdr_len << 9;
13486 		else if (tg3_flag(tp, HW_TSO_1) ||
13487 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13488 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13489 		} else {
13490 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13491 		}
13492 
13493 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13494 	} else {
13495 		num_pkts = 1;
13496 		data_off = ETH_HLEN;
13497 
13498 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13499 		    tx_len > VLAN_ETH_FRAME_LEN)
13500 			base_flags |= TXD_FLAG_JMB_PKT;
13501 	}
13502 
13503 	for (i = data_off; i < tx_len; i++)
13504 		tx_data[i] = (u8) (i & 0xff);
13505 
13506 	map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13507 	if (dma_mapping_error(&tp->pdev->dev, map)) {
13508 		dev_kfree_skb(skb);
13509 		return -EIO;
13510 	}
13511 
13512 	val = tnapi->tx_prod;
13513 	tnapi->tx_buffers[val].skb = skb;
13514 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13515 
13516 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13517 	       rnapi->coal_now);
13518 
13519 	udelay(10);
13520 
13521 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13522 
13523 	budget = tg3_tx_avail(tnapi);
13524 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13525 			    base_flags | TXD_FLAG_END, mss, 0)) {
13526 		tnapi->tx_buffers[val].skb = NULL;
13527 		dev_kfree_skb(skb);
13528 		return -EIO;
13529 	}
13530 
13531 	tnapi->tx_prod++;
13532 
13533 	/* Sync BD data before updating mailbox */
13534 	wmb();
13535 
13536 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13537 	tr32_mailbox(tnapi->prodmbox);
13538 
13539 	udelay(10);
13540 
13541 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13542 	for (i = 0; i < 35; i++) {
13543 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13544 		       coal_now);
13545 
13546 		udelay(10);
13547 
13548 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13549 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13550 		if ((tx_idx == tnapi->tx_prod) &&
13551 		    (rx_idx == (rx_start_idx + num_pkts)))
13552 			break;
13553 	}
13554 
13555 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13556 	dev_kfree_skb(skb);
13557 
13558 	if (tx_idx != tnapi->tx_prod)
13559 		goto out;
13560 
13561 	if (rx_idx != rx_start_idx + num_pkts)
13562 		goto out;
13563 
13564 	val = data_off;
13565 	while (rx_idx != rx_start_idx) {
13566 		desc = &rnapi->rx_rcb[rx_start_idx++];
13567 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13568 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13569 
13570 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13571 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13572 			goto out;
13573 
13574 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13575 			 - ETH_FCS_LEN;
13576 
13577 		if (!tso_loopback) {
13578 			if (rx_len != tx_len)
13579 				goto out;
13580 
13581 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13582 				if (opaque_key != RXD_OPAQUE_RING_STD)
13583 					goto out;
13584 			} else {
13585 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13586 					goto out;
13587 			}
13588 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13589 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13590 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13591 			goto out;
13592 		}
13593 
13594 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13595 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13596 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13597 					     mapping);
13598 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13599 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13600 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13601 					     mapping);
13602 		} else
13603 			goto out;
13604 
13605 		dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13606 					DMA_FROM_DEVICE);
13607 
13608 		rx_data += TG3_RX_OFFSET(tp);
13609 		for (i = data_off; i < rx_len; i++, val++) {
13610 			if (*(rx_data + i) != (u8) (val & 0xff))
13611 				goto out;
13612 		}
13613 	}
13614 
13615 	err = 0;
13616 
13617 	/* tg3_free_rings will unmap and free the rx_data */
13618 out:
13619 	return err;
13620 }
13621 
13622 #define TG3_STD_LOOPBACK_FAILED		1
13623 #define TG3_JMB_LOOPBACK_FAILED		2
13624 #define TG3_TSO_LOOPBACK_FAILED		4
13625 #define TG3_LOOPBACK_FAILED \
13626 	(TG3_STD_LOOPBACK_FAILED | \
13627 	 TG3_JMB_LOOPBACK_FAILED | \
13628 	 TG3_TSO_LOOPBACK_FAILED)
13629 
tg3_test_loopback(struct tg3 * tp,u64 * data,bool do_extlpbk)13630 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13631 {
13632 	int err = -EIO;
13633 	u32 eee_cap;
13634 	u32 jmb_pkt_sz = 9000;
13635 
13636 	if (tp->dma_limit)
13637 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13638 
13639 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13640 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13641 
13642 	if (!netif_running(tp->dev)) {
13643 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13644 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13645 		if (do_extlpbk)
13646 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13647 		goto done;
13648 	}
13649 
13650 	err = tg3_reset_hw(tp, true);
13651 	if (err) {
13652 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13653 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13654 		if (do_extlpbk)
13655 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13656 		goto done;
13657 	}
13658 
13659 	if (tg3_flag(tp, ENABLE_RSS)) {
13660 		int i;
13661 
13662 		/* Reroute all rx packets to the 1st queue */
13663 		for (i = MAC_RSS_INDIR_TBL_0;
13664 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13665 			tw32(i, 0x0);
13666 	}
13667 
13668 	/* HW errata - mac loopback fails in some cases on 5780.
13669 	 * Normal traffic and PHY loopback are not affected by
13670 	 * errata.  Also, the MAC loopback test is deprecated for
13671 	 * all newer ASIC revisions.
13672 	 */
13673 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13674 	    !tg3_flag(tp, CPMU_PRESENT)) {
13675 		tg3_mac_loopback(tp, true);
13676 
13677 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13678 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13679 
13680 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13681 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13682 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13683 
13684 		tg3_mac_loopback(tp, false);
13685 	}
13686 
13687 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13688 	    !tg3_flag(tp, USE_PHYLIB)) {
13689 		int i;
13690 
13691 		tg3_phy_lpbk_set(tp, 0, false);
13692 
13693 		/* Wait for link */
13694 		for (i = 0; i < 100; i++) {
13695 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13696 				break;
13697 			mdelay(1);
13698 		}
13699 
13700 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13701 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13702 		if (tg3_flag(tp, TSO_CAPABLE) &&
13703 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13704 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13705 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13706 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13707 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13708 
13709 		if (do_extlpbk) {
13710 			tg3_phy_lpbk_set(tp, 0, true);
13711 
13712 			/* All link indications report up, but the hardware
13713 			 * isn't really ready for about 20 msec.  Double it
13714 			 * to be sure.
13715 			 */
13716 			mdelay(40);
13717 
13718 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13719 				data[TG3_EXT_LOOPB_TEST] |=
13720 							TG3_STD_LOOPBACK_FAILED;
13721 			if (tg3_flag(tp, TSO_CAPABLE) &&
13722 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13723 				data[TG3_EXT_LOOPB_TEST] |=
13724 							TG3_TSO_LOOPBACK_FAILED;
13725 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13726 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13727 				data[TG3_EXT_LOOPB_TEST] |=
13728 							TG3_JMB_LOOPBACK_FAILED;
13729 		}
13730 
13731 		/* Re-enable gphy autopowerdown. */
13732 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13733 			tg3_phy_toggle_apd(tp, true);
13734 	}
13735 
13736 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13737 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13738 
13739 done:
13740 	tp->phy_flags |= eee_cap;
13741 
13742 	return err;
13743 }
13744 
tg3_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * data)13745 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13746 			  u64 *data)
13747 {
13748 	struct tg3 *tp = netdev_priv(dev);
13749 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13750 
13751 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13752 		if (tg3_power_up(tp)) {
13753 			etest->flags |= ETH_TEST_FL_FAILED;
13754 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13755 			return;
13756 		}
13757 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13758 	}
13759 
13760 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13761 
13762 	if (tg3_test_nvram(tp) != 0) {
13763 		etest->flags |= ETH_TEST_FL_FAILED;
13764 		data[TG3_NVRAM_TEST] = 1;
13765 	}
13766 	if (!doextlpbk && tg3_test_link(tp)) {
13767 		etest->flags |= ETH_TEST_FL_FAILED;
13768 		data[TG3_LINK_TEST] = 1;
13769 	}
13770 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13771 		int err, err2 = 0, irq_sync = 0;
13772 
13773 		if (netif_running(dev)) {
13774 			tg3_phy_stop(tp);
13775 			tg3_netif_stop(tp);
13776 			irq_sync = 1;
13777 		}
13778 
13779 		tg3_full_lock(tp, irq_sync);
13780 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13781 		err = tg3_nvram_lock(tp);
13782 		tg3_halt_cpu(tp, RX_CPU_BASE);
13783 		if (!tg3_flag(tp, 5705_PLUS))
13784 			tg3_halt_cpu(tp, TX_CPU_BASE);
13785 		if (!err)
13786 			tg3_nvram_unlock(tp);
13787 
13788 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13789 			tg3_phy_reset(tp);
13790 
13791 		if (tg3_test_registers(tp) != 0) {
13792 			etest->flags |= ETH_TEST_FL_FAILED;
13793 			data[TG3_REGISTER_TEST] = 1;
13794 		}
13795 
13796 		if (tg3_test_memory(tp) != 0) {
13797 			etest->flags |= ETH_TEST_FL_FAILED;
13798 			data[TG3_MEMORY_TEST] = 1;
13799 		}
13800 
13801 		if (doextlpbk)
13802 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13803 
13804 		if (tg3_test_loopback(tp, data, doextlpbk))
13805 			etest->flags |= ETH_TEST_FL_FAILED;
13806 
13807 		tg3_full_unlock(tp);
13808 
13809 		if (tg3_test_interrupt(tp) != 0) {
13810 			etest->flags |= ETH_TEST_FL_FAILED;
13811 			data[TG3_INTERRUPT_TEST] = 1;
13812 		}
13813 
13814 		tg3_full_lock(tp, 0);
13815 
13816 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13817 		if (netif_running(dev)) {
13818 			tg3_flag_set(tp, INIT_COMPLETE);
13819 			err2 = tg3_restart_hw(tp, true);
13820 			if (!err2)
13821 				tg3_netif_start(tp);
13822 		}
13823 
13824 		tg3_full_unlock(tp);
13825 
13826 		if (irq_sync && !err2)
13827 			tg3_phy_start(tp);
13828 	}
13829 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13830 		tg3_power_down_prepare(tp);
13831 
13832 }
13833 
tg3_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)13834 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13835 {
13836 	struct tg3 *tp = netdev_priv(dev);
13837 	struct hwtstamp_config stmpconf;
13838 
13839 	if (!tg3_flag(tp, PTP_CAPABLE))
13840 		return -EOPNOTSUPP;
13841 
13842 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13843 		return -EFAULT;
13844 
13845 	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13846 	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13847 		return -ERANGE;
13848 
13849 	switch (stmpconf.rx_filter) {
13850 	case HWTSTAMP_FILTER_NONE:
13851 		tp->rxptpctl = 0;
13852 		break;
13853 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13854 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13855 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13856 		break;
13857 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13858 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13859 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13860 		break;
13861 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13862 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13863 			       TG3_RX_PTP_CTL_DELAY_REQ;
13864 		break;
13865 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13866 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13867 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13868 		break;
13869 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13870 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13871 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13872 		break;
13873 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13874 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13875 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13876 		break;
13877 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13878 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13879 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13880 		break;
13881 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13882 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13883 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13884 		break;
13885 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13886 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13887 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13888 		break;
13889 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13890 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13891 			       TG3_RX_PTP_CTL_DELAY_REQ;
13892 		break;
13893 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13894 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13895 			       TG3_RX_PTP_CTL_DELAY_REQ;
13896 		break;
13897 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13898 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13899 			       TG3_RX_PTP_CTL_DELAY_REQ;
13900 		break;
13901 	default:
13902 		return -ERANGE;
13903 	}
13904 
13905 	if (netif_running(dev) && tp->rxptpctl)
13906 		tw32(TG3_RX_PTP_CTL,
13907 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13908 
13909 	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13910 		tg3_flag_set(tp, TX_TSTAMP_EN);
13911 	else
13912 		tg3_flag_clear(tp, TX_TSTAMP_EN);
13913 
13914 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13915 		-EFAULT : 0;
13916 }
13917 
tg3_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)13918 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13919 {
13920 	struct tg3 *tp = netdev_priv(dev);
13921 	struct hwtstamp_config stmpconf;
13922 
13923 	if (!tg3_flag(tp, PTP_CAPABLE))
13924 		return -EOPNOTSUPP;
13925 
13926 	stmpconf.flags = 0;
13927 	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13928 			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13929 
13930 	switch (tp->rxptpctl) {
13931 	case 0:
13932 		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13933 		break;
13934 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13935 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13936 		break;
13937 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13938 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13939 		break;
13940 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13941 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13942 		break;
13943 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13944 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13945 		break;
13946 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13947 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13948 		break;
13949 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13950 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13951 		break;
13952 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13953 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13954 		break;
13955 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13956 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13957 		break;
13958 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13959 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13960 		break;
13961 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13962 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13963 		break;
13964 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13965 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13966 		break;
13967 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13968 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13969 		break;
13970 	default:
13971 		WARN_ON_ONCE(1);
13972 		return -ERANGE;
13973 	}
13974 
13975 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13976 		-EFAULT : 0;
13977 }
13978 
tg3_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)13979 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13980 {
13981 	struct mii_ioctl_data *data = if_mii(ifr);
13982 	struct tg3 *tp = netdev_priv(dev);
13983 	int err;
13984 
13985 	if (tg3_flag(tp, USE_PHYLIB)) {
13986 		struct phy_device *phydev;
13987 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13988 			return -EAGAIN;
13989 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13990 		return phy_mii_ioctl(phydev, ifr, cmd);
13991 	}
13992 
13993 	switch (cmd) {
13994 	case SIOCGMIIPHY:
13995 		data->phy_id = tp->phy_addr;
13996 
13997 		fallthrough;
13998 	case SIOCGMIIREG: {
13999 		u32 mii_regval;
14000 
14001 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14002 			break;			/* We have no PHY */
14003 
14004 		if (!netif_running(dev))
14005 			return -EAGAIN;
14006 
14007 		spin_lock_bh(&tp->lock);
14008 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
14009 				    data->reg_num & 0x1f, &mii_regval);
14010 		spin_unlock_bh(&tp->lock);
14011 
14012 		data->val_out = mii_regval;
14013 
14014 		return err;
14015 	}
14016 
14017 	case SIOCSMIIREG:
14018 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14019 			break;			/* We have no PHY */
14020 
14021 		if (!netif_running(dev))
14022 			return -EAGAIN;
14023 
14024 		spin_lock_bh(&tp->lock);
14025 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
14026 				     data->reg_num & 0x1f, data->val_in);
14027 		spin_unlock_bh(&tp->lock);
14028 
14029 		return err;
14030 
14031 	case SIOCSHWTSTAMP:
14032 		return tg3_hwtstamp_set(dev, ifr);
14033 
14034 	case SIOCGHWTSTAMP:
14035 		return tg3_hwtstamp_get(dev, ifr);
14036 
14037 	default:
14038 		/* do nothing */
14039 		break;
14040 	}
14041 	return -EOPNOTSUPP;
14042 }
14043 
tg3_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14044 static int tg3_get_coalesce(struct net_device *dev,
14045 			    struct ethtool_coalesce *ec,
14046 			    struct kernel_ethtool_coalesce *kernel_coal,
14047 			    struct netlink_ext_ack *extack)
14048 {
14049 	struct tg3 *tp = netdev_priv(dev);
14050 
14051 	memcpy(ec, &tp->coal, sizeof(*ec));
14052 	return 0;
14053 }
14054 
tg3_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14055 static int tg3_set_coalesce(struct net_device *dev,
14056 			    struct ethtool_coalesce *ec,
14057 			    struct kernel_ethtool_coalesce *kernel_coal,
14058 			    struct netlink_ext_ack *extack)
14059 {
14060 	struct tg3 *tp = netdev_priv(dev);
14061 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14062 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14063 
14064 	if (!tg3_flag(tp, 5705_PLUS)) {
14065 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14066 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14067 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14068 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14069 	}
14070 
14071 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14072 	    (!ec->rx_coalesce_usecs) ||
14073 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14074 	    (!ec->tx_coalesce_usecs) ||
14075 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14076 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14077 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14078 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14079 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14080 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14081 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14082 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14083 		return -EINVAL;
14084 
14085 	/* Only copy relevant parameters, ignore all others. */
14086 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14087 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14088 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14089 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14090 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14091 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14092 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14093 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14094 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14095 
14096 	if (netif_running(dev)) {
14097 		tg3_full_lock(tp, 0);
14098 		__tg3_set_coalesce(tp, &tp->coal);
14099 		tg3_full_unlock(tp);
14100 	}
14101 	return 0;
14102 }
14103 
tg3_set_eee(struct net_device * dev,struct ethtool_eee * edata)14104 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14105 {
14106 	struct tg3 *tp = netdev_priv(dev);
14107 
14108 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14109 		netdev_warn(tp->dev, "Board does not support EEE!\n");
14110 		return -EOPNOTSUPP;
14111 	}
14112 
14113 	if (edata->advertised != tp->eee.advertised) {
14114 		netdev_warn(tp->dev,
14115 			    "Direct manipulation of EEE advertisement is not supported\n");
14116 		return -EINVAL;
14117 	}
14118 
14119 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14120 		netdev_warn(tp->dev,
14121 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
14122 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14123 		return -EINVAL;
14124 	}
14125 
14126 	tp->eee = *edata;
14127 
14128 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14129 	tg3_warn_mgmt_link_flap(tp);
14130 
14131 	if (netif_running(tp->dev)) {
14132 		tg3_full_lock(tp, 0);
14133 		tg3_setup_eee(tp);
14134 		tg3_phy_reset(tp);
14135 		tg3_full_unlock(tp);
14136 	}
14137 
14138 	return 0;
14139 }
14140 
tg3_get_eee(struct net_device * dev,struct ethtool_eee * edata)14141 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14142 {
14143 	struct tg3 *tp = netdev_priv(dev);
14144 
14145 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14146 		netdev_warn(tp->dev,
14147 			    "Board does not support EEE!\n");
14148 		return -EOPNOTSUPP;
14149 	}
14150 
14151 	*edata = tp->eee;
14152 	return 0;
14153 }
14154 
14155 static const struct ethtool_ops tg3_ethtool_ops = {
14156 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14157 				     ETHTOOL_COALESCE_MAX_FRAMES |
14158 				     ETHTOOL_COALESCE_USECS_IRQ |
14159 				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14160 				     ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14161 	.get_drvinfo		= tg3_get_drvinfo,
14162 	.get_regs_len		= tg3_get_regs_len,
14163 	.get_regs		= tg3_get_regs,
14164 	.get_wol		= tg3_get_wol,
14165 	.set_wol		= tg3_set_wol,
14166 	.get_msglevel		= tg3_get_msglevel,
14167 	.set_msglevel		= tg3_set_msglevel,
14168 	.nway_reset		= tg3_nway_reset,
14169 	.get_link		= ethtool_op_get_link,
14170 	.get_eeprom_len		= tg3_get_eeprom_len,
14171 	.get_eeprom		= tg3_get_eeprom,
14172 	.set_eeprom		= tg3_set_eeprom,
14173 	.get_ringparam		= tg3_get_ringparam,
14174 	.set_ringparam		= tg3_set_ringparam,
14175 	.get_pauseparam		= tg3_get_pauseparam,
14176 	.set_pauseparam		= tg3_set_pauseparam,
14177 	.self_test		= tg3_self_test,
14178 	.get_strings		= tg3_get_strings,
14179 	.set_phys_id		= tg3_set_phys_id,
14180 	.get_ethtool_stats	= tg3_get_ethtool_stats,
14181 	.get_coalesce		= tg3_get_coalesce,
14182 	.set_coalesce		= tg3_set_coalesce,
14183 	.get_sset_count		= tg3_get_sset_count,
14184 	.get_rxnfc		= tg3_get_rxnfc,
14185 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14186 	.get_rxfh		= tg3_get_rxfh,
14187 	.set_rxfh		= tg3_set_rxfh,
14188 	.get_channels		= tg3_get_channels,
14189 	.set_channels		= tg3_set_channels,
14190 	.get_ts_info		= tg3_get_ts_info,
14191 	.get_eee		= tg3_get_eee,
14192 	.set_eee		= tg3_set_eee,
14193 	.get_link_ksettings	= tg3_get_link_ksettings,
14194 	.set_link_ksettings	= tg3_set_link_ksettings,
14195 };
14196 
tg3_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)14197 static void tg3_get_stats64(struct net_device *dev,
14198 			    struct rtnl_link_stats64 *stats)
14199 {
14200 	struct tg3 *tp = netdev_priv(dev);
14201 
14202 	spin_lock_bh(&tp->lock);
14203 	if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14204 		*stats = tp->net_stats_prev;
14205 		spin_unlock_bh(&tp->lock);
14206 		return;
14207 	}
14208 
14209 	tg3_get_nstats(tp, stats);
14210 	spin_unlock_bh(&tp->lock);
14211 }
14212 
tg3_set_rx_mode(struct net_device * dev)14213 static void tg3_set_rx_mode(struct net_device *dev)
14214 {
14215 	struct tg3 *tp = netdev_priv(dev);
14216 
14217 	if (!netif_running(dev))
14218 		return;
14219 
14220 	tg3_full_lock(tp, 0);
14221 	__tg3_set_rx_mode(dev);
14222 	tg3_full_unlock(tp);
14223 }
14224 
tg3_set_mtu(struct net_device * dev,struct tg3 * tp,int new_mtu)14225 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14226 			       int new_mtu)
14227 {
14228 	dev->mtu = new_mtu;
14229 
14230 	if (new_mtu > ETH_DATA_LEN) {
14231 		if (tg3_flag(tp, 5780_CLASS)) {
14232 			netdev_update_features(dev);
14233 			tg3_flag_clear(tp, TSO_CAPABLE);
14234 		} else {
14235 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
14236 		}
14237 	} else {
14238 		if (tg3_flag(tp, 5780_CLASS)) {
14239 			tg3_flag_set(tp, TSO_CAPABLE);
14240 			netdev_update_features(dev);
14241 		}
14242 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14243 	}
14244 }
14245 
tg3_change_mtu(struct net_device * dev,int new_mtu)14246 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14247 {
14248 	struct tg3 *tp = netdev_priv(dev);
14249 	int err;
14250 	bool reset_phy = false;
14251 
14252 	if (!netif_running(dev)) {
14253 		/* We'll just catch it later when the
14254 		 * device is up'd.
14255 		 */
14256 		tg3_set_mtu(dev, tp, new_mtu);
14257 		return 0;
14258 	}
14259 
14260 	tg3_phy_stop(tp);
14261 
14262 	tg3_netif_stop(tp);
14263 
14264 	tg3_set_mtu(dev, tp, new_mtu);
14265 
14266 	tg3_full_lock(tp, 1);
14267 
14268 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14269 
14270 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
14271 	 * breaks all requests to 256 bytes.
14272 	 */
14273 	if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14274 	    tg3_asic_rev(tp) == ASIC_REV_5717 ||
14275 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
14276 	    tg3_asic_rev(tp) == ASIC_REV_5720)
14277 		reset_phy = true;
14278 
14279 	err = tg3_restart_hw(tp, reset_phy);
14280 
14281 	if (!err)
14282 		tg3_netif_start(tp);
14283 
14284 	tg3_full_unlock(tp);
14285 
14286 	if (!err)
14287 		tg3_phy_start(tp);
14288 
14289 	return err;
14290 }
14291 
14292 static const struct net_device_ops tg3_netdev_ops = {
14293 	.ndo_open		= tg3_open,
14294 	.ndo_stop		= tg3_close,
14295 	.ndo_start_xmit		= tg3_start_xmit,
14296 	.ndo_get_stats64	= tg3_get_stats64,
14297 	.ndo_validate_addr	= eth_validate_addr,
14298 	.ndo_set_rx_mode	= tg3_set_rx_mode,
14299 	.ndo_set_mac_address	= tg3_set_mac_addr,
14300 	.ndo_eth_ioctl		= tg3_ioctl,
14301 	.ndo_tx_timeout		= tg3_tx_timeout,
14302 	.ndo_change_mtu		= tg3_change_mtu,
14303 	.ndo_fix_features	= tg3_fix_features,
14304 	.ndo_set_features	= tg3_set_features,
14305 #ifdef CONFIG_NET_POLL_CONTROLLER
14306 	.ndo_poll_controller	= tg3_poll_controller,
14307 #endif
14308 };
14309 
tg3_get_eeprom_size(struct tg3 * tp)14310 static void tg3_get_eeprom_size(struct tg3 *tp)
14311 {
14312 	u32 cursize, val, magic;
14313 
14314 	tp->nvram_size = EEPROM_CHIP_SIZE;
14315 
14316 	if (tg3_nvram_read(tp, 0, &magic) != 0)
14317 		return;
14318 
14319 	if ((magic != TG3_EEPROM_MAGIC) &&
14320 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14321 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14322 		return;
14323 
14324 	/*
14325 	 * Size the chip by reading offsets at increasing powers of two.
14326 	 * When we encounter our validation signature, we know the addressing
14327 	 * has wrapped around, and thus have our chip size.
14328 	 */
14329 	cursize = 0x10;
14330 
14331 	while (cursize < tp->nvram_size) {
14332 		if (tg3_nvram_read(tp, cursize, &val) != 0)
14333 			return;
14334 
14335 		if (val == magic)
14336 			break;
14337 
14338 		cursize <<= 1;
14339 	}
14340 
14341 	tp->nvram_size = cursize;
14342 }
14343 
tg3_get_nvram_size(struct tg3 * tp)14344 static void tg3_get_nvram_size(struct tg3 *tp)
14345 {
14346 	u32 val;
14347 
14348 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14349 		return;
14350 
14351 	/* Selfboot format */
14352 	if (val != TG3_EEPROM_MAGIC) {
14353 		tg3_get_eeprom_size(tp);
14354 		return;
14355 	}
14356 
14357 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14358 		if (val != 0) {
14359 			/* This is confusing.  We want to operate on the
14360 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14361 			 * call will read from NVRAM and byteswap the data
14362 			 * according to the byteswapping settings for all
14363 			 * other register accesses.  This ensures the data we
14364 			 * want will always reside in the lower 16-bits.
14365 			 * However, the data in NVRAM is in LE format, which
14366 			 * means the data from the NVRAM read will always be
14367 			 * opposite the endianness of the CPU.  The 16-bit
14368 			 * byteswap then brings the data to CPU endianness.
14369 			 */
14370 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14371 			return;
14372 		}
14373 	}
14374 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14375 }
14376 
tg3_get_nvram_info(struct tg3 * tp)14377 static void tg3_get_nvram_info(struct tg3 *tp)
14378 {
14379 	u32 nvcfg1;
14380 
14381 	nvcfg1 = tr32(NVRAM_CFG1);
14382 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14383 		tg3_flag_set(tp, FLASH);
14384 	} else {
14385 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14386 		tw32(NVRAM_CFG1, nvcfg1);
14387 	}
14388 
14389 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14390 	    tg3_flag(tp, 5780_CLASS)) {
14391 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14392 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14393 			tp->nvram_jedecnum = JEDEC_ATMEL;
14394 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14395 			tg3_flag_set(tp, NVRAM_BUFFERED);
14396 			break;
14397 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14398 			tp->nvram_jedecnum = JEDEC_ATMEL;
14399 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14400 			break;
14401 		case FLASH_VENDOR_ATMEL_EEPROM:
14402 			tp->nvram_jedecnum = JEDEC_ATMEL;
14403 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14404 			tg3_flag_set(tp, NVRAM_BUFFERED);
14405 			break;
14406 		case FLASH_VENDOR_ST:
14407 			tp->nvram_jedecnum = JEDEC_ST;
14408 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14409 			tg3_flag_set(tp, NVRAM_BUFFERED);
14410 			break;
14411 		case FLASH_VENDOR_SAIFUN:
14412 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14413 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14414 			break;
14415 		case FLASH_VENDOR_SST_SMALL:
14416 		case FLASH_VENDOR_SST_LARGE:
14417 			tp->nvram_jedecnum = JEDEC_SST;
14418 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14419 			break;
14420 		}
14421 	} else {
14422 		tp->nvram_jedecnum = JEDEC_ATMEL;
14423 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14424 		tg3_flag_set(tp, NVRAM_BUFFERED);
14425 	}
14426 }
14427 
tg3_nvram_get_pagesize(struct tg3 * tp,u32 nvmcfg1)14428 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14429 {
14430 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14431 	case FLASH_5752PAGE_SIZE_256:
14432 		tp->nvram_pagesize = 256;
14433 		break;
14434 	case FLASH_5752PAGE_SIZE_512:
14435 		tp->nvram_pagesize = 512;
14436 		break;
14437 	case FLASH_5752PAGE_SIZE_1K:
14438 		tp->nvram_pagesize = 1024;
14439 		break;
14440 	case FLASH_5752PAGE_SIZE_2K:
14441 		tp->nvram_pagesize = 2048;
14442 		break;
14443 	case FLASH_5752PAGE_SIZE_4K:
14444 		tp->nvram_pagesize = 4096;
14445 		break;
14446 	case FLASH_5752PAGE_SIZE_264:
14447 		tp->nvram_pagesize = 264;
14448 		break;
14449 	case FLASH_5752PAGE_SIZE_528:
14450 		tp->nvram_pagesize = 528;
14451 		break;
14452 	}
14453 }
14454 
tg3_get_5752_nvram_info(struct tg3 * tp)14455 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14456 {
14457 	u32 nvcfg1;
14458 
14459 	nvcfg1 = tr32(NVRAM_CFG1);
14460 
14461 	/* NVRAM protection for TPM */
14462 	if (nvcfg1 & (1 << 27))
14463 		tg3_flag_set(tp, PROTECTED_NVRAM);
14464 
14465 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14466 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14467 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14468 		tp->nvram_jedecnum = JEDEC_ATMEL;
14469 		tg3_flag_set(tp, NVRAM_BUFFERED);
14470 		break;
14471 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14472 		tp->nvram_jedecnum = JEDEC_ATMEL;
14473 		tg3_flag_set(tp, NVRAM_BUFFERED);
14474 		tg3_flag_set(tp, FLASH);
14475 		break;
14476 	case FLASH_5752VENDOR_ST_M45PE10:
14477 	case FLASH_5752VENDOR_ST_M45PE20:
14478 	case FLASH_5752VENDOR_ST_M45PE40:
14479 		tp->nvram_jedecnum = JEDEC_ST;
14480 		tg3_flag_set(tp, NVRAM_BUFFERED);
14481 		tg3_flag_set(tp, FLASH);
14482 		break;
14483 	}
14484 
14485 	if (tg3_flag(tp, FLASH)) {
14486 		tg3_nvram_get_pagesize(tp, nvcfg1);
14487 	} else {
14488 		/* For eeprom, set pagesize to maximum eeprom size */
14489 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14490 
14491 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14492 		tw32(NVRAM_CFG1, nvcfg1);
14493 	}
14494 }
14495 
tg3_get_5755_nvram_info(struct tg3 * tp)14496 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14497 {
14498 	u32 nvcfg1, protect = 0;
14499 
14500 	nvcfg1 = tr32(NVRAM_CFG1);
14501 
14502 	/* NVRAM protection for TPM */
14503 	if (nvcfg1 & (1 << 27)) {
14504 		tg3_flag_set(tp, PROTECTED_NVRAM);
14505 		protect = 1;
14506 	}
14507 
14508 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14509 	switch (nvcfg1) {
14510 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14511 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14512 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14513 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14514 		tp->nvram_jedecnum = JEDEC_ATMEL;
14515 		tg3_flag_set(tp, NVRAM_BUFFERED);
14516 		tg3_flag_set(tp, FLASH);
14517 		tp->nvram_pagesize = 264;
14518 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14519 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14520 			tp->nvram_size = (protect ? 0x3e200 :
14521 					  TG3_NVRAM_SIZE_512KB);
14522 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14523 			tp->nvram_size = (protect ? 0x1f200 :
14524 					  TG3_NVRAM_SIZE_256KB);
14525 		else
14526 			tp->nvram_size = (protect ? 0x1f200 :
14527 					  TG3_NVRAM_SIZE_128KB);
14528 		break;
14529 	case FLASH_5752VENDOR_ST_M45PE10:
14530 	case FLASH_5752VENDOR_ST_M45PE20:
14531 	case FLASH_5752VENDOR_ST_M45PE40:
14532 		tp->nvram_jedecnum = JEDEC_ST;
14533 		tg3_flag_set(tp, NVRAM_BUFFERED);
14534 		tg3_flag_set(tp, FLASH);
14535 		tp->nvram_pagesize = 256;
14536 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14537 			tp->nvram_size = (protect ?
14538 					  TG3_NVRAM_SIZE_64KB :
14539 					  TG3_NVRAM_SIZE_128KB);
14540 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14541 			tp->nvram_size = (protect ?
14542 					  TG3_NVRAM_SIZE_64KB :
14543 					  TG3_NVRAM_SIZE_256KB);
14544 		else
14545 			tp->nvram_size = (protect ?
14546 					  TG3_NVRAM_SIZE_128KB :
14547 					  TG3_NVRAM_SIZE_512KB);
14548 		break;
14549 	}
14550 }
14551 
tg3_get_5787_nvram_info(struct tg3 * tp)14552 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14553 {
14554 	u32 nvcfg1;
14555 
14556 	nvcfg1 = tr32(NVRAM_CFG1);
14557 
14558 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14559 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14560 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14561 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14562 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14563 		tp->nvram_jedecnum = JEDEC_ATMEL;
14564 		tg3_flag_set(tp, NVRAM_BUFFERED);
14565 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14566 
14567 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14568 		tw32(NVRAM_CFG1, nvcfg1);
14569 		break;
14570 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14571 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14572 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14573 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14574 		tp->nvram_jedecnum = JEDEC_ATMEL;
14575 		tg3_flag_set(tp, NVRAM_BUFFERED);
14576 		tg3_flag_set(tp, FLASH);
14577 		tp->nvram_pagesize = 264;
14578 		break;
14579 	case FLASH_5752VENDOR_ST_M45PE10:
14580 	case FLASH_5752VENDOR_ST_M45PE20:
14581 	case FLASH_5752VENDOR_ST_M45PE40:
14582 		tp->nvram_jedecnum = JEDEC_ST;
14583 		tg3_flag_set(tp, NVRAM_BUFFERED);
14584 		tg3_flag_set(tp, FLASH);
14585 		tp->nvram_pagesize = 256;
14586 		break;
14587 	}
14588 }
14589 
tg3_get_5761_nvram_info(struct tg3 * tp)14590 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14591 {
14592 	u32 nvcfg1, protect = 0;
14593 
14594 	nvcfg1 = tr32(NVRAM_CFG1);
14595 
14596 	/* NVRAM protection for TPM */
14597 	if (nvcfg1 & (1 << 27)) {
14598 		tg3_flag_set(tp, PROTECTED_NVRAM);
14599 		protect = 1;
14600 	}
14601 
14602 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14603 	switch (nvcfg1) {
14604 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14605 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14606 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14607 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14608 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14609 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14610 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14611 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14612 		tp->nvram_jedecnum = JEDEC_ATMEL;
14613 		tg3_flag_set(tp, NVRAM_BUFFERED);
14614 		tg3_flag_set(tp, FLASH);
14615 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14616 		tp->nvram_pagesize = 256;
14617 		break;
14618 	case FLASH_5761VENDOR_ST_A_M45PE20:
14619 	case FLASH_5761VENDOR_ST_A_M45PE40:
14620 	case FLASH_5761VENDOR_ST_A_M45PE80:
14621 	case FLASH_5761VENDOR_ST_A_M45PE16:
14622 	case FLASH_5761VENDOR_ST_M_M45PE20:
14623 	case FLASH_5761VENDOR_ST_M_M45PE40:
14624 	case FLASH_5761VENDOR_ST_M_M45PE80:
14625 	case FLASH_5761VENDOR_ST_M_M45PE16:
14626 		tp->nvram_jedecnum = JEDEC_ST;
14627 		tg3_flag_set(tp, NVRAM_BUFFERED);
14628 		tg3_flag_set(tp, FLASH);
14629 		tp->nvram_pagesize = 256;
14630 		break;
14631 	}
14632 
14633 	if (protect) {
14634 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14635 	} else {
14636 		switch (nvcfg1) {
14637 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14638 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14639 		case FLASH_5761VENDOR_ST_A_M45PE16:
14640 		case FLASH_5761VENDOR_ST_M_M45PE16:
14641 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14642 			break;
14643 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14644 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14645 		case FLASH_5761VENDOR_ST_A_M45PE80:
14646 		case FLASH_5761VENDOR_ST_M_M45PE80:
14647 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14648 			break;
14649 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14650 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14651 		case FLASH_5761VENDOR_ST_A_M45PE40:
14652 		case FLASH_5761VENDOR_ST_M_M45PE40:
14653 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14654 			break;
14655 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14656 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14657 		case FLASH_5761VENDOR_ST_A_M45PE20:
14658 		case FLASH_5761VENDOR_ST_M_M45PE20:
14659 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14660 			break;
14661 		}
14662 	}
14663 }
14664 
tg3_get_5906_nvram_info(struct tg3 * tp)14665 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14666 {
14667 	tp->nvram_jedecnum = JEDEC_ATMEL;
14668 	tg3_flag_set(tp, NVRAM_BUFFERED);
14669 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14670 }
14671 
tg3_get_57780_nvram_info(struct tg3 * tp)14672 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14673 {
14674 	u32 nvcfg1;
14675 
14676 	nvcfg1 = tr32(NVRAM_CFG1);
14677 
14678 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14679 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14680 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14681 		tp->nvram_jedecnum = JEDEC_ATMEL;
14682 		tg3_flag_set(tp, NVRAM_BUFFERED);
14683 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14684 
14685 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14686 		tw32(NVRAM_CFG1, nvcfg1);
14687 		return;
14688 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14689 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14690 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14691 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14692 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14693 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14694 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14695 		tp->nvram_jedecnum = JEDEC_ATMEL;
14696 		tg3_flag_set(tp, NVRAM_BUFFERED);
14697 		tg3_flag_set(tp, FLASH);
14698 
14699 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14700 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14701 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14702 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14703 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14704 			break;
14705 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14706 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14707 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14708 			break;
14709 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14710 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14711 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14712 			break;
14713 		}
14714 		break;
14715 	case FLASH_5752VENDOR_ST_M45PE10:
14716 	case FLASH_5752VENDOR_ST_M45PE20:
14717 	case FLASH_5752VENDOR_ST_M45PE40:
14718 		tp->nvram_jedecnum = JEDEC_ST;
14719 		tg3_flag_set(tp, NVRAM_BUFFERED);
14720 		tg3_flag_set(tp, FLASH);
14721 
14722 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14723 		case FLASH_5752VENDOR_ST_M45PE10:
14724 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14725 			break;
14726 		case FLASH_5752VENDOR_ST_M45PE20:
14727 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14728 			break;
14729 		case FLASH_5752VENDOR_ST_M45PE40:
14730 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14731 			break;
14732 		}
14733 		break;
14734 	default:
14735 		tg3_flag_set(tp, NO_NVRAM);
14736 		return;
14737 	}
14738 
14739 	tg3_nvram_get_pagesize(tp, nvcfg1);
14740 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14741 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14742 }
14743 
14744 
tg3_get_5717_nvram_info(struct tg3 * tp)14745 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14746 {
14747 	u32 nvcfg1;
14748 
14749 	nvcfg1 = tr32(NVRAM_CFG1);
14750 
14751 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14752 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14753 	case FLASH_5717VENDOR_MICRO_EEPROM:
14754 		tp->nvram_jedecnum = JEDEC_ATMEL;
14755 		tg3_flag_set(tp, NVRAM_BUFFERED);
14756 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14757 
14758 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14759 		tw32(NVRAM_CFG1, nvcfg1);
14760 		return;
14761 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14762 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14763 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14764 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14765 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14766 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14767 	case FLASH_5717VENDOR_ATMEL_45USPT:
14768 		tp->nvram_jedecnum = JEDEC_ATMEL;
14769 		tg3_flag_set(tp, NVRAM_BUFFERED);
14770 		tg3_flag_set(tp, FLASH);
14771 
14772 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14773 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14774 			/* Detect size with tg3_nvram_get_size() */
14775 			break;
14776 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14777 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14778 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14779 			break;
14780 		default:
14781 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14782 			break;
14783 		}
14784 		break;
14785 	case FLASH_5717VENDOR_ST_M_M25PE10:
14786 	case FLASH_5717VENDOR_ST_A_M25PE10:
14787 	case FLASH_5717VENDOR_ST_M_M45PE10:
14788 	case FLASH_5717VENDOR_ST_A_M45PE10:
14789 	case FLASH_5717VENDOR_ST_M_M25PE20:
14790 	case FLASH_5717VENDOR_ST_A_M25PE20:
14791 	case FLASH_5717VENDOR_ST_M_M45PE20:
14792 	case FLASH_5717VENDOR_ST_A_M45PE20:
14793 	case FLASH_5717VENDOR_ST_25USPT:
14794 	case FLASH_5717VENDOR_ST_45USPT:
14795 		tp->nvram_jedecnum = JEDEC_ST;
14796 		tg3_flag_set(tp, NVRAM_BUFFERED);
14797 		tg3_flag_set(tp, FLASH);
14798 
14799 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14800 		case FLASH_5717VENDOR_ST_M_M25PE20:
14801 		case FLASH_5717VENDOR_ST_M_M45PE20:
14802 			/* Detect size with tg3_nvram_get_size() */
14803 			break;
14804 		case FLASH_5717VENDOR_ST_A_M25PE20:
14805 		case FLASH_5717VENDOR_ST_A_M45PE20:
14806 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14807 			break;
14808 		default:
14809 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14810 			break;
14811 		}
14812 		break;
14813 	default:
14814 		tg3_flag_set(tp, NO_NVRAM);
14815 		return;
14816 	}
14817 
14818 	tg3_nvram_get_pagesize(tp, nvcfg1);
14819 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14820 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14821 }
14822 
tg3_get_5720_nvram_info(struct tg3 * tp)14823 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14824 {
14825 	u32 nvcfg1, nvmpinstrp, nv_status;
14826 
14827 	nvcfg1 = tr32(NVRAM_CFG1);
14828 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14829 
14830 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14831 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14832 			tg3_flag_set(tp, NO_NVRAM);
14833 			return;
14834 		}
14835 
14836 		switch (nvmpinstrp) {
14837 		case FLASH_5762_MX25L_100:
14838 		case FLASH_5762_MX25L_200:
14839 		case FLASH_5762_MX25L_400:
14840 		case FLASH_5762_MX25L_800:
14841 		case FLASH_5762_MX25L_160_320:
14842 			tp->nvram_pagesize = 4096;
14843 			tp->nvram_jedecnum = JEDEC_MACRONIX;
14844 			tg3_flag_set(tp, NVRAM_BUFFERED);
14845 			tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14846 			tg3_flag_set(tp, FLASH);
14847 			nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14848 			tp->nvram_size =
14849 				(1 << (nv_status >> AUTOSENSE_DEVID &
14850 						AUTOSENSE_DEVID_MASK)
14851 					<< AUTOSENSE_SIZE_IN_MB);
14852 			return;
14853 
14854 		case FLASH_5762_EEPROM_HD:
14855 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14856 			break;
14857 		case FLASH_5762_EEPROM_LD:
14858 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14859 			break;
14860 		case FLASH_5720VENDOR_M_ST_M45PE20:
14861 			/* This pinstrap supports multiple sizes, so force it
14862 			 * to read the actual size from location 0xf0.
14863 			 */
14864 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14865 			break;
14866 		}
14867 	}
14868 
14869 	switch (nvmpinstrp) {
14870 	case FLASH_5720_EEPROM_HD:
14871 	case FLASH_5720_EEPROM_LD:
14872 		tp->nvram_jedecnum = JEDEC_ATMEL;
14873 		tg3_flag_set(tp, NVRAM_BUFFERED);
14874 
14875 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14876 		tw32(NVRAM_CFG1, nvcfg1);
14877 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14878 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14879 		else
14880 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14881 		return;
14882 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14883 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14884 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14885 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14886 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14887 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14888 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14889 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14890 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14891 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14892 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14893 	case FLASH_5720VENDOR_ATMEL_45USPT:
14894 		tp->nvram_jedecnum = JEDEC_ATMEL;
14895 		tg3_flag_set(tp, NVRAM_BUFFERED);
14896 		tg3_flag_set(tp, FLASH);
14897 
14898 		switch (nvmpinstrp) {
14899 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14900 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14901 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14902 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14903 			break;
14904 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14905 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14906 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14907 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14908 			break;
14909 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14910 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14911 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14912 			break;
14913 		default:
14914 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14915 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14916 			break;
14917 		}
14918 		break;
14919 	case FLASH_5720VENDOR_M_ST_M25PE10:
14920 	case FLASH_5720VENDOR_M_ST_M45PE10:
14921 	case FLASH_5720VENDOR_A_ST_M25PE10:
14922 	case FLASH_5720VENDOR_A_ST_M45PE10:
14923 	case FLASH_5720VENDOR_M_ST_M25PE20:
14924 	case FLASH_5720VENDOR_M_ST_M45PE20:
14925 	case FLASH_5720VENDOR_A_ST_M25PE20:
14926 	case FLASH_5720VENDOR_A_ST_M45PE20:
14927 	case FLASH_5720VENDOR_M_ST_M25PE40:
14928 	case FLASH_5720VENDOR_M_ST_M45PE40:
14929 	case FLASH_5720VENDOR_A_ST_M25PE40:
14930 	case FLASH_5720VENDOR_A_ST_M45PE40:
14931 	case FLASH_5720VENDOR_M_ST_M25PE80:
14932 	case FLASH_5720VENDOR_M_ST_M45PE80:
14933 	case FLASH_5720VENDOR_A_ST_M25PE80:
14934 	case FLASH_5720VENDOR_A_ST_M45PE80:
14935 	case FLASH_5720VENDOR_ST_25USPT:
14936 	case FLASH_5720VENDOR_ST_45USPT:
14937 		tp->nvram_jedecnum = JEDEC_ST;
14938 		tg3_flag_set(tp, NVRAM_BUFFERED);
14939 		tg3_flag_set(tp, FLASH);
14940 
14941 		switch (nvmpinstrp) {
14942 		case FLASH_5720VENDOR_M_ST_M25PE20:
14943 		case FLASH_5720VENDOR_M_ST_M45PE20:
14944 		case FLASH_5720VENDOR_A_ST_M25PE20:
14945 		case FLASH_5720VENDOR_A_ST_M45PE20:
14946 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14947 			break;
14948 		case FLASH_5720VENDOR_M_ST_M25PE40:
14949 		case FLASH_5720VENDOR_M_ST_M45PE40:
14950 		case FLASH_5720VENDOR_A_ST_M25PE40:
14951 		case FLASH_5720VENDOR_A_ST_M45PE40:
14952 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14953 			break;
14954 		case FLASH_5720VENDOR_M_ST_M25PE80:
14955 		case FLASH_5720VENDOR_M_ST_M45PE80:
14956 		case FLASH_5720VENDOR_A_ST_M25PE80:
14957 		case FLASH_5720VENDOR_A_ST_M45PE80:
14958 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14959 			break;
14960 		default:
14961 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14962 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14963 			break;
14964 		}
14965 		break;
14966 	default:
14967 		tg3_flag_set(tp, NO_NVRAM);
14968 		return;
14969 	}
14970 
14971 	tg3_nvram_get_pagesize(tp, nvcfg1);
14972 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14973 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14974 
14975 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14976 		u32 val;
14977 
14978 		if (tg3_nvram_read(tp, 0, &val))
14979 			return;
14980 
14981 		if (val != TG3_EEPROM_MAGIC &&
14982 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14983 			tg3_flag_set(tp, NO_NVRAM);
14984 	}
14985 }
14986 
14987 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
tg3_nvram_init(struct tg3 * tp)14988 static void tg3_nvram_init(struct tg3 *tp)
14989 {
14990 	if (tg3_flag(tp, IS_SSB_CORE)) {
14991 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14992 		tg3_flag_clear(tp, NVRAM);
14993 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14994 		tg3_flag_set(tp, NO_NVRAM);
14995 		return;
14996 	}
14997 
14998 	tw32_f(GRC_EEPROM_ADDR,
14999 	     (EEPROM_ADDR_FSM_RESET |
15000 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
15001 	       EEPROM_ADDR_CLKPERD_SHIFT)));
15002 
15003 	msleep(1);
15004 
15005 	/* Enable seeprom accesses. */
15006 	tw32_f(GRC_LOCAL_CTRL,
15007 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15008 	udelay(100);
15009 
15010 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15011 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
15012 		tg3_flag_set(tp, NVRAM);
15013 
15014 		if (tg3_nvram_lock(tp)) {
15015 			netdev_warn(tp->dev,
15016 				    "Cannot get nvram lock, %s failed\n",
15017 				    __func__);
15018 			return;
15019 		}
15020 		tg3_enable_nvram_access(tp);
15021 
15022 		tp->nvram_size = 0;
15023 
15024 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
15025 			tg3_get_5752_nvram_info(tp);
15026 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15027 			tg3_get_5755_nvram_info(tp);
15028 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15029 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15030 			 tg3_asic_rev(tp) == ASIC_REV_5785)
15031 			tg3_get_5787_nvram_info(tp);
15032 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15033 			tg3_get_5761_nvram_info(tp);
15034 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15035 			tg3_get_5906_nvram_info(tp);
15036 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15037 			 tg3_flag(tp, 57765_CLASS))
15038 			tg3_get_57780_nvram_info(tp);
15039 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15040 			 tg3_asic_rev(tp) == ASIC_REV_5719)
15041 			tg3_get_5717_nvram_info(tp);
15042 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15043 			 tg3_asic_rev(tp) == ASIC_REV_5762)
15044 			tg3_get_5720_nvram_info(tp);
15045 		else
15046 			tg3_get_nvram_info(tp);
15047 
15048 		if (tp->nvram_size == 0)
15049 			tg3_get_nvram_size(tp);
15050 
15051 		tg3_disable_nvram_access(tp);
15052 		tg3_nvram_unlock(tp);
15053 
15054 	} else {
15055 		tg3_flag_clear(tp, NVRAM);
15056 		tg3_flag_clear(tp, NVRAM_BUFFERED);
15057 
15058 		tg3_get_eeprom_size(tp);
15059 	}
15060 }
15061 
15062 struct subsys_tbl_ent {
15063 	u16 subsys_vendor, subsys_devid;
15064 	u32 phy_id;
15065 };
15066 
15067 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15068 	/* Broadcom boards. */
15069 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15070 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15071 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15072 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15073 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15074 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15075 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15076 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15077 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15078 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15079 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15080 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15081 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15082 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15083 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15084 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15085 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15086 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15087 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15088 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15089 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15090 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15091 
15092 	/* 3com boards. */
15093 	{ TG3PCI_SUBVENDOR_ID_3COM,
15094 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15095 	{ TG3PCI_SUBVENDOR_ID_3COM,
15096 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15097 	{ TG3PCI_SUBVENDOR_ID_3COM,
15098 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15099 	{ TG3PCI_SUBVENDOR_ID_3COM,
15100 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15101 	{ TG3PCI_SUBVENDOR_ID_3COM,
15102 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15103 
15104 	/* DELL boards. */
15105 	{ TG3PCI_SUBVENDOR_ID_DELL,
15106 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15107 	{ TG3PCI_SUBVENDOR_ID_DELL,
15108 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15109 	{ TG3PCI_SUBVENDOR_ID_DELL,
15110 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15111 	{ TG3PCI_SUBVENDOR_ID_DELL,
15112 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15113 
15114 	/* Compaq boards. */
15115 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15116 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15117 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15118 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15119 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15120 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15121 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15122 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15123 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15124 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15125 
15126 	/* IBM boards. */
15127 	{ TG3PCI_SUBVENDOR_ID_IBM,
15128 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15129 };
15130 
tg3_lookup_by_subsys(struct tg3 * tp)15131 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15132 {
15133 	int i;
15134 
15135 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15136 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
15137 		     tp->pdev->subsystem_vendor) &&
15138 		    (subsys_id_to_phy_id[i].subsys_devid ==
15139 		     tp->pdev->subsystem_device))
15140 			return &subsys_id_to_phy_id[i];
15141 	}
15142 	return NULL;
15143 }
15144 
tg3_get_eeprom_hw_cfg(struct tg3 * tp)15145 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15146 {
15147 	u32 val;
15148 
15149 	tp->phy_id = TG3_PHY_ID_INVALID;
15150 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15151 
15152 	/* Assume an onboard device and WOL capable by default.  */
15153 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
15154 	tg3_flag_set(tp, WOL_CAP);
15155 
15156 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15157 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15158 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15159 			tg3_flag_set(tp, IS_NIC);
15160 		}
15161 		val = tr32(VCPU_CFGSHDW);
15162 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
15163 			tg3_flag_set(tp, ASPM_WORKAROUND);
15164 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15165 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15166 			tg3_flag_set(tp, WOL_ENABLE);
15167 			device_set_wakeup_enable(&tp->pdev->dev, true);
15168 		}
15169 		goto done;
15170 	}
15171 
15172 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15173 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15174 		u32 nic_cfg, led_cfg;
15175 		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15176 		u32 nic_phy_id, ver, eeprom_phy_id;
15177 		int eeprom_phy_serdes = 0;
15178 
15179 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15180 		tp->nic_sram_data_cfg = nic_cfg;
15181 
15182 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15183 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
15184 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15185 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15186 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
15187 		    (ver > 0) && (ver < 0x100))
15188 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15189 
15190 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
15191 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15192 
15193 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15194 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15195 		    tg3_asic_rev(tp) == ASIC_REV_5720)
15196 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15197 
15198 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15199 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15200 			eeprom_phy_serdes = 1;
15201 
15202 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15203 		if (nic_phy_id != 0) {
15204 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15205 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15206 
15207 			eeprom_phy_id  = (id1 >> 16) << 10;
15208 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
15209 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15210 		} else
15211 			eeprom_phy_id = 0;
15212 
15213 		tp->phy_id = eeprom_phy_id;
15214 		if (eeprom_phy_serdes) {
15215 			if (!tg3_flag(tp, 5705_PLUS))
15216 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15217 			else
15218 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15219 		}
15220 
15221 		if (tg3_flag(tp, 5750_PLUS))
15222 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15223 				    SHASTA_EXT_LED_MODE_MASK);
15224 		else
15225 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15226 
15227 		switch (led_cfg) {
15228 		default:
15229 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15230 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15231 			break;
15232 
15233 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15234 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15235 			break;
15236 
15237 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15238 			tp->led_ctrl = LED_CTRL_MODE_MAC;
15239 
15240 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
15241 			 * read on some older 5700/5701 bootcode.
15242 			 */
15243 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15244 			    tg3_asic_rev(tp) == ASIC_REV_5701)
15245 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15246 
15247 			break;
15248 
15249 		case SHASTA_EXT_LED_SHARED:
15250 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
15251 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15252 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15253 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15254 						 LED_CTRL_MODE_PHY_2);
15255 
15256 			if (tg3_flag(tp, 5717_PLUS) ||
15257 			    tg3_asic_rev(tp) == ASIC_REV_5762)
15258 				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15259 						LED_CTRL_BLINK_RATE_MASK;
15260 
15261 			break;
15262 
15263 		case SHASTA_EXT_LED_MAC:
15264 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15265 			break;
15266 
15267 		case SHASTA_EXT_LED_COMBO:
15268 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
15269 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15270 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15271 						 LED_CTRL_MODE_PHY_2);
15272 			break;
15273 
15274 		}
15275 
15276 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15277 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
15278 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15279 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15280 
15281 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15282 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15283 
15284 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15285 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
15286 			if ((tp->pdev->subsystem_vendor ==
15287 			     PCI_VENDOR_ID_ARIMA) &&
15288 			    (tp->pdev->subsystem_device == 0x205a ||
15289 			     tp->pdev->subsystem_device == 0x2063))
15290 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15291 		} else {
15292 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15293 			tg3_flag_set(tp, IS_NIC);
15294 		}
15295 
15296 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15297 			tg3_flag_set(tp, ENABLE_ASF);
15298 			if (tg3_flag(tp, 5750_PLUS))
15299 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15300 		}
15301 
15302 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15303 		    tg3_flag(tp, 5750_PLUS))
15304 			tg3_flag_set(tp, ENABLE_APE);
15305 
15306 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15307 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15308 			tg3_flag_clear(tp, WOL_CAP);
15309 
15310 		if (tg3_flag(tp, WOL_CAP) &&
15311 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15312 			tg3_flag_set(tp, WOL_ENABLE);
15313 			device_set_wakeup_enable(&tp->pdev->dev, true);
15314 		}
15315 
15316 		if (cfg2 & (1 << 17))
15317 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15318 
15319 		/* serdes signal pre-emphasis in register 0x590 set by */
15320 		/* bootcode if bit 18 is set */
15321 		if (cfg2 & (1 << 18))
15322 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15323 
15324 		if ((tg3_flag(tp, 57765_PLUS) ||
15325 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15326 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15327 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15328 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15329 
15330 		if (tg3_flag(tp, PCI_EXPRESS)) {
15331 			u32 cfg3;
15332 
15333 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15334 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15335 			    !tg3_flag(tp, 57765_PLUS) &&
15336 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15337 				tg3_flag_set(tp, ASPM_WORKAROUND);
15338 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15339 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15340 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15341 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15342 		}
15343 
15344 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15345 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15346 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15347 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15348 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15349 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15350 
15351 		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15352 			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15353 	}
15354 done:
15355 	if (tg3_flag(tp, WOL_CAP))
15356 		device_set_wakeup_enable(&tp->pdev->dev,
15357 					 tg3_flag(tp, WOL_ENABLE));
15358 	else
15359 		device_set_wakeup_capable(&tp->pdev->dev, false);
15360 }
15361 
tg3_ape_otp_read(struct tg3 * tp,u32 offset,u32 * val)15362 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15363 {
15364 	int i, err;
15365 	u32 val2, off = offset * 8;
15366 
15367 	err = tg3_nvram_lock(tp);
15368 	if (err)
15369 		return err;
15370 
15371 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15372 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15373 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15374 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15375 	udelay(10);
15376 
15377 	for (i = 0; i < 100; i++) {
15378 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15379 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15380 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15381 			break;
15382 		}
15383 		udelay(10);
15384 	}
15385 
15386 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15387 
15388 	tg3_nvram_unlock(tp);
15389 	if (val2 & APE_OTP_STATUS_CMD_DONE)
15390 		return 0;
15391 
15392 	return -EBUSY;
15393 }
15394 
tg3_issue_otp_command(struct tg3 * tp,u32 cmd)15395 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15396 {
15397 	int i;
15398 	u32 val;
15399 
15400 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15401 	tw32(OTP_CTRL, cmd);
15402 
15403 	/* Wait for up to 1 ms for command to execute. */
15404 	for (i = 0; i < 100; i++) {
15405 		val = tr32(OTP_STATUS);
15406 		if (val & OTP_STATUS_CMD_DONE)
15407 			break;
15408 		udelay(10);
15409 	}
15410 
15411 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15412 }
15413 
15414 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15415  * configuration is a 32-bit value that straddles the alignment boundary.
15416  * We do two 32-bit reads and then shift and merge the results.
15417  */
tg3_read_otp_phycfg(struct tg3 * tp)15418 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15419 {
15420 	u32 bhalf_otp, thalf_otp;
15421 
15422 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15423 
15424 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15425 		return 0;
15426 
15427 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15428 
15429 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15430 		return 0;
15431 
15432 	thalf_otp = tr32(OTP_READ_DATA);
15433 
15434 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15435 
15436 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15437 		return 0;
15438 
15439 	bhalf_otp = tr32(OTP_READ_DATA);
15440 
15441 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15442 }
15443 
tg3_phy_init_link_config(struct tg3 * tp)15444 static void tg3_phy_init_link_config(struct tg3 *tp)
15445 {
15446 	u32 adv = ADVERTISED_Autoneg;
15447 
15448 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15449 		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15450 			adv |= ADVERTISED_1000baseT_Half;
15451 		adv |= ADVERTISED_1000baseT_Full;
15452 	}
15453 
15454 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15455 		adv |= ADVERTISED_100baseT_Half |
15456 		       ADVERTISED_100baseT_Full |
15457 		       ADVERTISED_10baseT_Half |
15458 		       ADVERTISED_10baseT_Full |
15459 		       ADVERTISED_TP;
15460 	else
15461 		adv |= ADVERTISED_FIBRE;
15462 
15463 	tp->link_config.advertising = adv;
15464 	tp->link_config.speed = SPEED_UNKNOWN;
15465 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15466 	tp->link_config.autoneg = AUTONEG_ENABLE;
15467 	tp->link_config.active_speed = SPEED_UNKNOWN;
15468 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15469 
15470 	tp->old_link = -1;
15471 }
15472 
tg3_phy_probe(struct tg3 * tp)15473 static int tg3_phy_probe(struct tg3 *tp)
15474 {
15475 	u32 hw_phy_id_1, hw_phy_id_2;
15476 	u32 hw_phy_id, hw_phy_id_masked;
15477 	int err;
15478 
15479 	/* flow control autonegotiation is default behavior */
15480 	tg3_flag_set(tp, PAUSE_AUTONEG);
15481 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15482 
15483 	if (tg3_flag(tp, ENABLE_APE)) {
15484 		switch (tp->pci_fn) {
15485 		case 0:
15486 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15487 			break;
15488 		case 1:
15489 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15490 			break;
15491 		case 2:
15492 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15493 			break;
15494 		case 3:
15495 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15496 			break;
15497 		}
15498 	}
15499 
15500 	if (!tg3_flag(tp, ENABLE_ASF) &&
15501 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15502 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15503 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15504 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15505 
15506 	if (tg3_flag(tp, USE_PHYLIB))
15507 		return tg3_phy_init(tp);
15508 
15509 	/* Reading the PHY ID register can conflict with ASF
15510 	 * firmware access to the PHY hardware.
15511 	 */
15512 	err = 0;
15513 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15514 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15515 	} else {
15516 		/* Now read the physical PHY_ID from the chip and verify
15517 		 * that it is sane.  If it doesn't look good, we fall back
15518 		 * to either the hard-coded table based PHY_ID and failing
15519 		 * that the value found in the eeprom area.
15520 		 */
15521 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15522 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15523 
15524 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15525 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15526 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15527 
15528 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15529 	}
15530 
15531 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15532 		tp->phy_id = hw_phy_id;
15533 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15534 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15535 		else
15536 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15537 	} else {
15538 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15539 			/* Do nothing, phy ID already set up in
15540 			 * tg3_get_eeprom_hw_cfg().
15541 			 */
15542 		} else {
15543 			struct subsys_tbl_ent *p;
15544 
15545 			/* No eeprom signature?  Try the hardcoded
15546 			 * subsys device table.
15547 			 */
15548 			p = tg3_lookup_by_subsys(tp);
15549 			if (p) {
15550 				tp->phy_id = p->phy_id;
15551 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15552 				/* For now we saw the IDs 0xbc050cd0,
15553 				 * 0xbc050f80 and 0xbc050c30 on devices
15554 				 * connected to an BCM4785 and there are
15555 				 * probably more. Just assume that the phy is
15556 				 * supported when it is connected to a SSB core
15557 				 * for now.
15558 				 */
15559 				return -ENODEV;
15560 			}
15561 
15562 			if (!tp->phy_id ||
15563 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15564 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15565 		}
15566 	}
15567 
15568 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15569 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15570 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15571 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15572 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15573 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15574 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15575 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15576 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15577 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15578 
15579 		tp->eee.supported = SUPPORTED_100baseT_Full |
15580 				    SUPPORTED_1000baseT_Full;
15581 		tp->eee.advertised = ADVERTISED_100baseT_Full |
15582 				     ADVERTISED_1000baseT_Full;
15583 		tp->eee.eee_enabled = 1;
15584 		tp->eee.tx_lpi_enabled = 1;
15585 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15586 	}
15587 
15588 	tg3_phy_init_link_config(tp);
15589 
15590 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15591 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15592 	    !tg3_flag(tp, ENABLE_APE) &&
15593 	    !tg3_flag(tp, ENABLE_ASF)) {
15594 		u32 bmsr, dummy;
15595 
15596 		tg3_readphy(tp, MII_BMSR, &bmsr);
15597 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15598 		    (bmsr & BMSR_LSTATUS))
15599 			goto skip_phy_reset;
15600 
15601 		err = tg3_phy_reset(tp);
15602 		if (err)
15603 			return err;
15604 
15605 		tg3_phy_set_wirespeed(tp);
15606 
15607 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15608 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15609 					    tp->link_config.flowctrl);
15610 
15611 			tg3_writephy(tp, MII_BMCR,
15612 				     BMCR_ANENABLE | BMCR_ANRESTART);
15613 		}
15614 	}
15615 
15616 skip_phy_reset:
15617 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15618 		err = tg3_init_5401phy_dsp(tp);
15619 		if (err)
15620 			return err;
15621 
15622 		err = tg3_init_5401phy_dsp(tp);
15623 	}
15624 
15625 	return err;
15626 }
15627 
tg3_read_vpd(struct tg3 * tp)15628 static void tg3_read_vpd(struct tg3 *tp)
15629 {
15630 	u8 *vpd_data;
15631 	unsigned int len, vpdlen;
15632 	int i;
15633 
15634 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15635 	if (!vpd_data)
15636 		goto out_no_vpd;
15637 
15638 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15639 					 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15640 	if (i < 0)
15641 		goto partno;
15642 
15643 	if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15644 		goto partno;
15645 
15646 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15647 					 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15648 	if (i < 0)
15649 		goto partno;
15650 
15651 	memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15652 	snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15653 
15654 partno:
15655 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15656 					 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15657 	if (i < 0)
15658 		goto out_not_found;
15659 
15660 	if (len > TG3_BPN_SIZE)
15661 		goto out_not_found;
15662 
15663 	memcpy(tp->board_part_number, &vpd_data[i], len);
15664 
15665 out_not_found:
15666 	kfree(vpd_data);
15667 	if (tp->board_part_number[0])
15668 		return;
15669 
15670 out_no_vpd:
15671 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15672 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15673 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15674 			strcpy(tp->board_part_number, "BCM5717");
15675 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15676 			strcpy(tp->board_part_number, "BCM5718");
15677 		else
15678 			goto nomatch;
15679 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15680 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15681 			strcpy(tp->board_part_number, "BCM57780");
15682 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15683 			strcpy(tp->board_part_number, "BCM57760");
15684 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15685 			strcpy(tp->board_part_number, "BCM57790");
15686 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15687 			strcpy(tp->board_part_number, "BCM57788");
15688 		else
15689 			goto nomatch;
15690 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15691 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15692 			strcpy(tp->board_part_number, "BCM57761");
15693 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15694 			strcpy(tp->board_part_number, "BCM57765");
15695 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15696 			strcpy(tp->board_part_number, "BCM57781");
15697 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15698 			strcpy(tp->board_part_number, "BCM57785");
15699 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15700 			strcpy(tp->board_part_number, "BCM57791");
15701 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15702 			strcpy(tp->board_part_number, "BCM57795");
15703 		else
15704 			goto nomatch;
15705 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15706 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15707 			strcpy(tp->board_part_number, "BCM57762");
15708 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15709 			strcpy(tp->board_part_number, "BCM57766");
15710 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15711 			strcpy(tp->board_part_number, "BCM57782");
15712 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15713 			strcpy(tp->board_part_number, "BCM57786");
15714 		else
15715 			goto nomatch;
15716 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15717 		strcpy(tp->board_part_number, "BCM95906");
15718 	} else {
15719 nomatch:
15720 		strcpy(tp->board_part_number, "none");
15721 	}
15722 }
15723 
tg3_fw_img_is_valid(struct tg3 * tp,u32 offset)15724 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15725 {
15726 	u32 val;
15727 
15728 	if (tg3_nvram_read(tp, offset, &val) ||
15729 	    (val & 0xfc000000) != 0x0c000000 ||
15730 	    tg3_nvram_read(tp, offset + 4, &val) ||
15731 	    val != 0)
15732 		return 0;
15733 
15734 	return 1;
15735 }
15736 
tg3_read_bc_ver(struct tg3 * tp)15737 static void tg3_read_bc_ver(struct tg3 *tp)
15738 {
15739 	u32 val, offset, start, ver_offset;
15740 	int i, dst_off;
15741 	bool newver = false;
15742 
15743 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15744 	    tg3_nvram_read(tp, 0x4, &start))
15745 		return;
15746 
15747 	offset = tg3_nvram_logical_addr(tp, offset);
15748 
15749 	if (tg3_nvram_read(tp, offset, &val))
15750 		return;
15751 
15752 	if ((val & 0xfc000000) == 0x0c000000) {
15753 		if (tg3_nvram_read(tp, offset + 4, &val))
15754 			return;
15755 
15756 		if (val == 0)
15757 			newver = true;
15758 	}
15759 
15760 	dst_off = strlen(tp->fw_ver);
15761 
15762 	if (newver) {
15763 		if (TG3_VER_SIZE - dst_off < 16 ||
15764 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15765 			return;
15766 
15767 		offset = offset + ver_offset - start;
15768 		for (i = 0; i < 16; i += 4) {
15769 			__be32 v;
15770 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15771 				return;
15772 
15773 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15774 		}
15775 	} else {
15776 		u32 major, minor;
15777 
15778 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15779 			return;
15780 
15781 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15782 			TG3_NVM_BCVER_MAJSFT;
15783 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15784 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15785 			 "v%d.%02d", major, minor);
15786 	}
15787 }
15788 
tg3_read_hwsb_ver(struct tg3 * tp)15789 static void tg3_read_hwsb_ver(struct tg3 *tp)
15790 {
15791 	u32 val, major, minor;
15792 
15793 	/* Use native endian representation */
15794 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15795 		return;
15796 
15797 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15798 		TG3_NVM_HWSB_CFG1_MAJSFT;
15799 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15800 		TG3_NVM_HWSB_CFG1_MINSFT;
15801 
15802 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15803 }
15804 
tg3_read_sb_ver(struct tg3 * tp,u32 val)15805 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15806 {
15807 	u32 offset, major, minor, build;
15808 
15809 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15810 
15811 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15812 		return;
15813 
15814 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15815 	case TG3_EEPROM_SB_REVISION_0:
15816 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15817 		break;
15818 	case TG3_EEPROM_SB_REVISION_2:
15819 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15820 		break;
15821 	case TG3_EEPROM_SB_REVISION_3:
15822 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15823 		break;
15824 	case TG3_EEPROM_SB_REVISION_4:
15825 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15826 		break;
15827 	case TG3_EEPROM_SB_REVISION_5:
15828 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15829 		break;
15830 	case TG3_EEPROM_SB_REVISION_6:
15831 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15832 		break;
15833 	default:
15834 		return;
15835 	}
15836 
15837 	if (tg3_nvram_read(tp, offset, &val))
15838 		return;
15839 
15840 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15841 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15842 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15843 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15844 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15845 
15846 	if (minor > 99 || build > 26)
15847 		return;
15848 
15849 	offset = strlen(tp->fw_ver);
15850 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15851 		 " v%d.%02d", major, minor);
15852 
15853 	if (build > 0) {
15854 		offset = strlen(tp->fw_ver);
15855 		if (offset < TG3_VER_SIZE - 1)
15856 			tp->fw_ver[offset] = 'a' + build - 1;
15857 	}
15858 }
15859 
tg3_read_mgmtfw_ver(struct tg3 * tp)15860 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15861 {
15862 	u32 val, offset, start;
15863 	int i, vlen;
15864 
15865 	for (offset = TG3_NVM_DIR_START;
15866 	     offset < TG3_NVM_DIR_END;
15867 	     offset += TG3_NVM_DIRENT_SIZE) {
15868 		if (tg3_nvram_read(tp, offset, &val))
15869 			return;
15870 
15871 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15872 			break;
15873 	}
15874 
15875 	if (offset == TG3_NVM_DIR_END)
15876 		return;
15877 
15878 	if (!tg3_flag(tp, 5705_PLUS))
15879 		start = 0x08000000;
15880 	else if (tg3_nvram_read(tp, offset - 4, &start))
15881 		return;
15882 
15883 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15884 	    !tg3_fw_img_is_valid(tp, offset) ||
15885 	    tg3_nvram_read(tp, offset + 8, &val))
15886 		return;
15887 
15888 	offset += val - start;
15889 
15890 	vlen = strlen(tp->fw_ver);
15891 
15892 	tp->fw_ver[vlen++] = ',';
15893 	tp->fw_ver[vlen++] = ' ';
15894 
15895 	for (i = 0; i < 4; i++) {
15896 		__be32 v;
15897 		if (tg3_nvram_read_be32(tp, offset, &v))
15898 			return;
15899 
15900 		offset += sizeof(v);
15901 
15902 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15903 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15904 			break;
15905 		}
15906 
15907 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15908 		vlen += sizeof(v);
15909 	}
15910 }
15911 
tg3_probe_ncsi(struct tg3 * tp)15912 static void tg3_probe_ncsi(struct tg3 *tp)
15913 {
15914 	u32 apedata;
15915 
15916 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15917 	if (apedata != APE_SEG_SIG_MAGIC)
15918 		return;
15919 
15920 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15921 	if (!(apedata & APE_FW_STATUS_READY))
15922 		return;
15923 
15924 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15925 		tg3_flag_set(tp, APE_HAS_NCSI);
15926 }
15927 
tg3_read_dash_ver(struct tg3 * tp)15928 static void tg3_read_dash_ver(struct tg3 *tp)
15929 {
15930 	int vlen;
15931 	u32 apedata;
15932 	char *fwtype;
15933 
15934 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15935 
15936 	if (tg3_flag(tp, APE_HAS_NCSI))
15937 		fwtype = "NCSI";
15938 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15939 		fwtype = "SMASH";
15940 	else
15941 		fwtype = "DASH";
15942 
15943 	vlen = strlen(tp->fw_ver);
15944 
15945 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15946 		 fwtype,
15947 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15948 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15949 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15950 		 (apedata & APE_FW_VERSION_BLDMSK));
15951 }
15952 
tg3_read_otp_ver(struct tg3 * tp)15953 static void tg3_read_otp_ver(struct tg3 *tp)
15954 {
15955 	u32 val, val2;
15956 
15957 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
15958 		return;
15959 
15960 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15961 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15962 	    TG3_OTP_MAGIC0_VALID(val)) {
15963 		u64 val64 = (u64) val << 32 | val2;
15964 		u32 ver = 0;
15965 		int i, vlen;
15966 
15967 		for (i = 0; i < 7; i++) {
15968 			if ((val64 & 0xff) == 0)
15969 				break;
15970 			ver = val64 & 0xff;
15971 			val64 >>= 8;
15972 		}
15973 		vlen = strlen(tp->fw_ver);
15974 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15975 	}
15976 }
15977 
tg3_read_fw_ver(struct tg3 * tp)15978 static void tg3_read_fw_ver(struct tg3 *tp)
15979 {
15980 	u32 val;
15981 	bool vpd_vers = false;
15982 
15983 	if (tp->fw_ver[0] != 0)
15984 		vpd_vers = true;
15985 
15986 	if (tg3_flag(tp, NO_NVRAM)) {
15987 		strcat(tp->fw_ver, "sb");
15988 		tg3_read_otp_ver(tp);
15989 		return;
15990 	}
15991 
15992 	if (tg3_nvram_read(tp, 0, &val))
15993 		return;
15994 
15995 	if (val == TG3_EEPROM_MAGIC)
15996 		tg3_read_bc_ver(tp);
15997 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15998 		tg3_read_sb_ver(tp, val);
15999 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16000 		tg3_read_hwsb_ver(tp);
16001 
16002 	if (tg3_flag(tp, ENABLE_ASF)) {
16003 		if (tg3_flag(tp, ENABLE_APE)) {
16004 			tg3_probe_ncsi(tp);
16005 			if (!vpd_vers)
16006 				tg3_read_dash_ver(tp);
16007 		} else if (!vpd_vers) {
16008 			tg3_read_mgmtfw_ver(tp);
16009 		}
16010 	}
16011 
16012 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16013 }
16014 
tg3_rx_ret_ring_size(struct tg3 * tp)16015 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16016 {
16017 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
16018 		return TG3_RX_RET_MAX_SIZE_5717;
16019 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16020 		return TG3_RX_RET_MAX_SIZE_5700;
16021 	else
16022 		return TG3_RX_RET_MAX_SIZE_5705;
16023 }
16024 
16025 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16026 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16027 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16028 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16029 	{ },
16030 };
16031 
tg3_find_peer(struct tg3 * tp)16032 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16033 {
16034 	struct pci_dev *peer;
16035 	unsigned int func, devnr = tp->pdev->devfn & ~7;
16036 
16037 	for (func = 0; func < 8; func++) {
16038 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
16039 		if (peer && peer != tp->pdev)
16040 			break;
16041 		pci_dev_put(peer);
16042 	}
16043 	/* 5704 can be configured in single-port mode, set peer to
16044 	 * tp->pdev in that case.
16045 	 */
16046 	if (!peer) {
16047 		peer = tp->pdev;
16048 		return peer;
16049 	}
16050 
16051 	/*
16052 	 * We don't need to keep the refcount elevated; there's no way
16053 	 * to remove one half of this device without removing the other
16054 	 */
16055 	pci_dev_put(peer);
16056 
16057 	return peer;
16058 }
16059 
tg3_detect_asic_rev(struct tg3 * tp,u32 misc_ctrl_reg)16060 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16061 {
16062 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16063 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16064 		u32 reg;
16065 
16066 		/* All devices that use the alternate
16067 		 * ASIC REV location have a CPMU.
16068 		 */
16069 		tg3_flag_set(tp, CPMU_PRESENT);
16070 
16071 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16072 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16073 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16074 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16075 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16076 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16077 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16078 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16079 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16080 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16081 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16082 			reg = TG3PCI_GEN2_PRODID_ASICREV;
16083 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16084 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16085 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16086 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16087 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16088 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16089 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16090 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16091 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16092 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16093 			reg = TG3PCI_GEN15_PRODID_ASICREV;
16094 		else
16095 			reg = TG3PCI_PRODID_ASICREV;
16096 
16097 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16098 	}
16099 
16100 	/* Wrong chip ID in 5752 A0. This code can be removed later
16101 	 * as A0 is not in production.
16102 	 */
16103 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16104 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16105 
16106 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16107 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16108 
16109 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16110 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16111 	    tg3_asic_rev(tp) == ASIC_REV_5720)
16112 		tg3_flag_set(tp, 5717_PLUS);
16113 
16114 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16115 	    tg3_asic_rev(tp) == ASIC_REV_57766)
16116 		tg3_flag_set(tp, 57765_CLASS);
16117 
16118 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16119 	     tg3_asic_rev(tp) == ASIC_REV_5762)
16120 		tg3_flag_set(tp, 57765_PLUS);
16121 
16122 	/* Intentionally exclude ASIC_REV_5906 */
16123 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16124 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16125 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16126 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16127 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
16128 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16129 	    tg3_flag(tp, 57765_PLUS))
16130 		tg3_flag_set(tp, 5755_PLUS);
16131 
16132 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16133 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16134 		tg3_flag_set(tp, 5780_CLASS);
16135 
16136 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16137 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16138 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
16139 	    tg3_flag(tp, 5755_PLUS) ||
16140 	    tg3_flag(tp, 5780_CLASS))
16141 		tg3_flag_set(tp, 5750_PLUS);
16142 
16143 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16144 	    tg3_flag(tp, 5750_PLUS))
16145 		tg3_flag_set(tp, 5705_PLUS);
16146 }
16147 
tg3_10_100_only_device(struct tg3 * tp,const struct pci_device_id * ent)16148 static bool tg3_10_100_only_device(struct tg3 *tp,
16149 				   const struct pci_device_id *ent)
16150 {
16151 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16152 
16153 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16154 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16155 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
16156 		return true;
16157 
16158 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16159 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16160 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16161 				return true;
16162 		} else {
16163 			return true;
16164 		}
16165 	}
16166 
16167 	return false;
16168 }
16169 
tg3_get_invariants(struct tg3 * tp,const struct pci_device_id * ent)16170 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16171 {
16172 	u32 misc_ctrl_reg;
16173 	u32 pci_state_reg, grc_misc_cfg;
16174 	u32 val;
16175 	u16 pci_cmd;
16176 	int err;
16177 
16178 	/* Force memory write invalidate off.  If we leave it on,
16179 	 * then on 5700_BX chips we have to enable a workaround.
16180 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16181 	 * to match the cacheline size.  The Broadcom driver have this
16182 	 * workaround but turns MWI off all the times so never uses
16183 	 * it.  This seems to suggest that the workaround is insufficient.
16184 	 */
16185 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16186 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16187 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16188 
16189 	/* Important! -- Make sure register accesses are byteswapped
16190 	 * correctly.  Also, for those chips that require it, make
16191 	 * sure that indirect register accesses are enabled before
16192 	 * the first operation.
16193 	 */
16194 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16195 			      &misc_ctrl_reg);
16196 	tp->misc_host_ctrl |= (misc_ctrl_reg &
16197 			       MISC_HOST_CTRL_CHIPREV);
16198 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16199 			       tp->misc_host_ctrl);
16200 
16201 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
16202 
16203 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16204 	 * we need to disable memory and use config. cycles
16205 	 * only to access all registers. The 5702/03 chips
16206 	 * can mistakenly decode the special cycles from the
16207 	 * ICH chipsets as memory write cycles, causing corruption
16208 	 * of register and memory space. Only certain ICH bridges
16209 	 * will drive special cycles with non-zero data during the
16210 	 * address phase which can fall within the 5703's address
16211 	 * range. This is not an ICH bug as the PCI spec allows
16212 	 * non-zero address during special cycles. However, only
16213 	 * these ICH bridges are known to drive non-zero addresses
16214 	 * during special cycles.
16215 	 *
16216 	 * Since special cycles do not cross PCI bridges, we only
16217 	 * enable this workaround if the 5703 is on the secondary
16218 	 * bus of these ICH bridges.
16219 	 */
16220 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16221 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16222 		static struct tg3_dev_id {
16223 			u32	vendor;
16224 			u32	device;
16225 			u32	rev;
16226 		} ich_chipsets[] = {
16227 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16228 			  PCI_ANY_ID },
16229 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16230 			  PCI_ANY_ID },
16231 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16232 			  0xa },
16233 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16234 			  PCI_ANY_ID },
16235 			{ },
16236 		};
16237 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
16238 		struct pci_dev *bridge = NULL;
16239 
16240 		while (pci_id->vendor != 0) {
16241 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
16242 						bridge);
16243 			if (!bridge) {
16244 				pci_id++;
16245 				continue;
16246 			}
16247 			if (pci_id->rev != PCI_ANY_ID) {
16248 				if (bridge->revision > pci_id->rev)
16249 					continue;
16250 			}
16251 			if (bridge->subordinate &&
16252 			    (bridge->subordinate->number ==
16253 			     tp->pdev->bus->number)) {
16254 				tg3_flag_set(tp, ICH_WORKAROUND);
16255 				pci_dev_put(bridge);
16256 				break;
16257 			}
16258 		}
16259 	}
16260 
16261 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16262 		static struct tg3_dev_id {
16263 			u32	vendor;
16264 			u32	device;
16265 		} bridge_chipsets[] = {
16266 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16267 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16268 			{ },
16269 		};
16270 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16271 		struct pci_dev *bridge = NULL;
16272 
16273 		while (pci_id->vendor != 0) {
16274 			bridge = pci_get_device(pci_id->vendor,
16275 						pci_id->device,
16276 						bridge);
16277 			if (!bridge) {
16278 				pci_id++;
16279 				continue;
16280 			}
16281 			if (bridge->subordinate &&
16282 			    (bridge->subordinate->number <=
16283 			     tp->pdev->bus->number) &&
16284 			    (bridge->subordinate->busn_res.end >=
16285 			     tp->pdev->bus->number)) {
16286 				tg3_flag_set(tp, 5701_DMA_BUG);
16287 				pci_dev_put(bridge);
16288 				break;
16289 			}
16290 		}
16291 	}
16292 
16293 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
16294 	 * DMA addresses > 40-bit. This bridge may have other additional
16295 	 * 57xx devices behind it in some 4-port NIC designs for example.
16296 	 * Any tg3 device found behind the bridge will also need the 40-bit
16297 	 * DMA workaround.
16298 	 */
16299 	if (tg3_flag(tp, 5780_CLASS)) {
16300 		tg3_flag_set(tp, 40BIT_DMA_BUG);
16301 		tp->msi_cap = tp->pdev->msi_cap;
16302 	} else {
16303 		struct pci_dev *bridge = NULL;
16304 
16305 		do {
16306 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16307 						PCI_DEVICE_ID_SERVERWORKS_EPB,
16308 						bridge);
16309 			if (bridge && bridge->subordinate &&
16310 			    (bridge->subordinate->number <=
16311 			     tp->pdev->bus->number) &&
16312 			    (bridge->subordinate->busn_res.end >=
16313 			     tp->pdev->bus->number)) {
16314 				tg3_flag_set(tp, 40BIT_DMA_BUG);
16315 				pci_dev_put(bridge);
16316 				break;
16317 			}
16318 		} while (bridge);
16319 	}
16320 
16321 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16322 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16323 		tp->pdev_peer = tg3_find_peer(tp);
16324 
16325 	/* Determine TSO capabilities */
16326 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16327 		; /* Do nothing. HW bug. */
16328 	else if (tg3_flag(tp, 57765_PLUS))
16329 		tg3_flag_set(tp, HW_TSO_3);
16330 	else if (tg3_flag(tp, 5755_PLUS) ||
16331 		 tg3_asic_rev(tp) == ASIC_REV_5906)
16332 		tg3_flag_set(tp, HW_TSO_2);
16333 	else if (tg3_flag(tp, 5750_PLUS)) {
16334 		tg3_flag_set(tp, HW_TSO_1);
16335 		tg3_flag_set(tp, TSO_BUG);
16336 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16337 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16338 			tg3_flag_clear(tp, TSO_BUG);
16339 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16340 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16341 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16342 		tg3_flag_set(tp, FW_TSO);
16343 		tg3_flag_set(tp, TSO_BUG);
16344 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16345 			tp->fw_needed = FIRMWARE_TG3TSO5;
16346 		else
16347 			tp->fw_needed = FIRMWARE_TG3TSO;
16348 	}
16349 
16350 	/* Selectively allow TSO based on operating conditions */
16351 	if (tg3_flag(tp, HW_TSO_1) ||
16352 	    tg3_flag(tp, HW_TSO_2) ||
16353 	    tg3_flag(tp, HW_TSO_3) ||
16354 	    tg3_flag(tp, FW_TSO)) {
16355 		/* For firmware TSO, assume ASF is disabled.
16356 		 * We'll disable TSO later if we discover ASF
16357 		 * is enabled in tg3_get_eeprom_hw_cfg().
16358 		 */
16359 		tg3_flag_set(tp, TSO_CAPABLE);
16360 	} else {
16361 		tg3_flag_clear(tp, TSO_CAPABLE);
16362 		tg3_flag_clear(tp, TSO_BUG);
16363 		tp->fw_needed = NULL;
16364 	}
16365 
16366 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16367 		tp->fw_needed = FIRMWARE_TG3;
16368 
16369 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16370 		tp->fw_needed = FIRMWARE_TG357766;
16371 
16372 	tp->irq_max = 1;
16373 
16374 	if (tg3_flag(tp, 5750_PLUS)) {
16375 		tg3_flag_set(tp, SUPPORT_MSI);
16376 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16377 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16378 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16379 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16380 		     tp->pdev_peer == tp->pdev))
16381 			tg3_flag_clear(tp, SUPPORT_MSI);
16382 
16383 		if (tg3_flag(tp, 5755_PLUS) ||
16384 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16385 			tg3_flag_set(tp, 1SHOT_MSI);
16386 		}
16387 
16388 		if (tg3_flag(tp, 57765_PLUS)) {
16389 			tg3_flag_set(tp, SUPPORT_MSIX);
16390 			tp->irq_max = TG3_IRQ_MAX_VECS;
16391 		}
16392 	}
16393 
16394 	tp->txq_max = 1;
16395 	tp->rxq_max = 1;
16396 	if (tp->irq_max > 1) {
16397 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16398 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16399 
16400 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16401 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16402 			tp->txq_max = tp->irq_max - 1;
16403 	}
16404 
16405 	if (tg3_flag(tp, 5755_PLUS) ||
16406 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16407 		tg3_flag_set(tp, SHORT_DMA_BUG);
16408 
16409 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16410 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16411 
16412 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16413 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16414 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16415 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16416 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16417 
16418 	if (tg3_flag(tp, 57765_PLUS) &&
16419 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16420 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16421 
16422 	if (!tg3_flag(tp, 5705_PLUS) ||
16423 	    tg3_flag(tp, 5780_CLASS) ||
16424 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16425 		tg3_flag_set(tp, JUMBO_CAPABLE);
16426 
16427 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16428 			      &pci_state_reg);
16429 
16430 	if (pci_is_pcie(tp->pdev)) {
16431 		u16 lnkctl;
16432 
16433 		tg3_flag_set(tp, PCI_EXPRESS);
16434 
16435 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16436 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16437 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16438 				tg3_flag_clear(tp, HW_TSO_2);
16439 				tg3_flag_clear(tp, TSO_CAPABLE);
16440 			}
16441 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16442 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16443 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16444 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16445 				tg3_flag_set(tp, CLKREQ_BUG);
16446 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16447 			tg3_flag_set(tp, L1PLLPD_EN);
16448 		}
16449 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16450 		/* BCM5785 devices are effectively PCIe devices, and should
16451 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16452 		 * section.
16453 		 */
16454 		tg3_flag_set(tp, PCI_EXPRESS);
16455 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16456 		   tg3_flag(tp, 5780_CLASS)) {
16457 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16458 		if (!tp->pcix_cap) {
16459 			dev_err(&tp->pdev->dev,
16460 				"Cannot find PCI-X capability, aborting\n");
16461 			return -EIO;
16462 		}
16463 
16464 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16465 			tg3_flag_set(tp, PCIX_MODE);
16466 	}
16467 
16468 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16469 	 * reordering to the mailbox registers done by the host
16470 	 * controller can cause major troubles.  We read back from
16471 	 * every mailbox register write to force the writes to be
16472 	 * posted to the chip in order.
16473 	 */
16474 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16475 	    !tg3_flag(tp, PCI_EXPRESS))
16476 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16477 
16478 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16479 			     &tp->pci_cacheline_sz);
16480 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16481 			     &tp->pci_lat_timer);
16482 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16483 	    tp->pci_lat_timer < 64) {
16484 		tp->pci_lat_timer = 64;
16485 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16486 				      tp->pci_lat_timer);
16487 	}
16488 
16489 	/* Important! -- It is critical that the PCI-X hw workaround
16490 	 * situation is decided before the first MMIO register access.
16491 	 */
16492 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16493 		/* 5700 BX chips need to have their TX producer index
16494 		 * mailboxes written twice to workaround a bug.
16495 		 */
16496 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16497 
16498 		/* If we are in PCI-X mode, enable register write workaround.
16499 		 *
16500 		 * The workaround is to use indirect register accesses
16501 		 * for all chip writes not to mailbox registers.
16502 		 */
16503 		if (tg3_flag(tp, PCIX_MODE)) {
16504 			u32 pm_reg;
16505 
16506 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16507 
16508 			/* The chip can have it's power management PCI config
16509 			 * space registers clobbered due to this bug.
16510 			 * So explicitly force the chip into D0 here.
16511 			 */
16512 			pci_read_config_dword(tp->pdev,
16513 					      tp->pdev->pm_cap + PCI_PM_CTRL,
16514 					      &pm_reg);
16515 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16516 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16517 			pci_write_config_dword(tp->pdev,
16518 					       tp->pdev->pm_cap + PCI_PM_CTRL,
16519 					       pm_reg);
16520 
16521 			/* Also, force SERR#/PERR# in PCI command. */
16522 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16523 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16524 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16525 		}
16526 	}
16527 
16528 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16529 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16530 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16531 		tg3_flag_set(tp, PCI_32BIT);
16532 
16533 	/* Chip-specific fixup from Broadcom driver */
16534 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16535 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16536 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16537 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16538 	}
16539 
16540 	/* Default fast path register access methods */
16541 	tp->read32 = tg3_read32;
16542 	tp->write32 = tg3_write32;
16543 	tp->read32_mbox = tg3_read32;
16544 	tp->write32_mbox = tg3_write32;
16545 	tp->write32_tx_mbox = tg3_write32;
16546 	tp->write32_rx_mbox = tg3_write32;
16547 
16548 	/* Various workaround register access methods */
16549 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16550 		tp->write32 = tg3_write_indirect_reg32;
16551 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16552 		 (tg3_flag(tp, PCI_EXPRESS) &&
16553 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16554 		/*
16555 		 * Back to back register writes can cause problems on these
16556 		 * chips, the workaround is to read back all reg writes
16557 		 * except those to mailbox regs.
16558 		 *
16559 		 * See tg3_write_indirect_reg32().
16560 		 */
16561 		tp->write32 = tg3_write_flush_reg32;
16562 	}
16563 
16564 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16565 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16566 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16567 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16568 	}
16569 
16570 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16571 		tp->read32 = tg3_read_indirect_reg32;
16572 		tp->write32 = tg3_write_indirect_reg32;
16573 		tp->read32_mbox = tg3_read_indirect_mbox;
16574 		tp->write32_mbox = tg3_write_indirect_mbox;
16575 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16576 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16577 
16578 		iounmap(tp->regs);
16579 		tp->regs = NULL;
16580 
16581 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16582 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16583 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16584 	}
16585 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16586 		tp->read32_mbox = tg3_read32_mbox_5906;
16587 		tp->write32_mbox = tg3_write32_mbox_5906;
16588 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16589 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16590 	}
16591 
16592 	if (tp->write32 == tg3_write_indirect_reg32 ||
16593 	    (tg3_flag(tp, PCIX_MODE) &&
16594 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16595 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16596 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16597 
16598 	/* The memory arbiter has to be enabled in order for SRAM accesses
16599 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16600 	 * sure it is enabled, but other entities such as system netboot
16601 	 * code might disable it.
16602 	 */
16603 	val = tr32(MEMARB_MODE);
16604 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16605 
16606 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16607 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16608 	    tg3_flag(tp, 5780_CLASS)) {
16609 		if (tg3_flag(tp, PCIX_MODE)) {
16610 			pci_read_config_dword(tp->pdev,
16611 					      tp->pcix_cap + PCI_X_STATUS,
16612 					      &val);
16613 			tp->pci_fn = val & 0x7;
16614 		}
16615 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16616 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16617 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16618 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16619 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16620 			val = tr32(TG3_CPMU_STATUS);
16621 
16622 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16623 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16624 		else
16625 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16626 				     TG3_CPMU_STATUS_FSHFT_5719;
16627 	}
16628 
16629 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16630 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16631 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16632 	}
16633 
16634 	/* Get eeprom hw config before calling tg3_set_power_state().
16635 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16636 	 * determined before calling tg3_set_power_state() so that
16637 	 * we know whether or not to switch out of Vaux power.
16638 	 * When the flag is set, it means that GPIO1 is used for eeprom
16639 	 * write protect and also implies that it is a LOM where GPIOs
16640 	 * are not used to switch power.
16641 	 */
16642 	tg3_get_eeprom_hw_cfg(tp);
16643 
16644 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16645 		tg3_flag_clear(tp, TSO_CAPABLE);
16646 		tg3_flag_clear(tp, TSO_BUG);
16647 		tp->fw_needed = NULL;
16648 	}
16649 
16650 	if (tg3_flag(tp, ENABLE_APE)) {
16651 		/* Allow reads and writes to the
16652 		 * APE register and memory space.
16653 		 */
16654 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16655 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16656 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16657 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16658 				       pci_state_reg);
16659 
16660 		tg3_ape_lock_init(tp);
16661 		tp->ape_hb_interval =
16662 			msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16663 	}
16664 
16665 	/* Set up tp->grc_local_ctrl before calling
16666 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16667 	 * will bring 5700's external PHY out of reset.
16668 	 * It is also used as eeprom write protect on LOMs.
16669 	 */
16670 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16671 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16672 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16673 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16674 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16675 	/* Unused GPIO3 must be driven as output on 5752 because there
16676 	 * are no pull-up resistors on unused GPIO pins.
16677 	 */
16678 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16679 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16680 
16681 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16682 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16683 	    tg3_flag(tp, 57765_CLASS))
16684 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16685 
16686 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16687 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16688 		/* Turn off the debug UART. */
16689 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16690 		if (tg3_flag(tp, IS_NIC))
16691 			/* Keep VMain power. */
16692 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16693 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16694 	}
16695 
16696 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16697 		tp->grc_local_ctrl |=
16698 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16699 
16700 	/* Switch out of Vaux if it is a NIC */
16701 	tg3_pwrsrc_switch_to_vmain(tp);
16702 
16703 	/* Derive initial jumbo mode from MTU assigned in
16704 	 * ether_setup() via the alloc_etherdev() call
16705 	 */
16706 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16707 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16708 
16709 	/* Determine WakeOnLan speed to use. */
16710 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16711 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16712 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16713 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16714 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16715 	} else {
16716 		tg3_flag_set(tp, WOL_SPEED_100MB);
16717 	}
16718 
16719 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16720 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16721 
16722 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16723 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16724 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16725 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16726 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16727 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16728 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16729 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16730 
16731 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16732 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16733 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16734 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16735 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16736 
16737 	if (tg3_flag(tp, 5705_PLUS) &&
16738 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16739 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16740 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16741 	    !tg3_flag(tp, 57765_PLUS)) {
16742 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16743 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16744 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16745 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16746 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16747 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16748 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16749 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16750 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16751 		} else
16752 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16753 	}
16754 
16755 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16756 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16757 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16758 		if (tp->phy_otp == 0)
16759 			tp->phy_otp = TG3_OTP_DEFAULT;
16760 	}
16761 
16762 	if (tg3_flag(tp, CPMU_PRESENT))
16763 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16764 	else
16765 		tp->mi_mode = MAC_MI_MODE_BASE;
16766 
16767 	tp->coalesce_mode = 0;
16768 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16769 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16770 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16771 
16772 	/* Set these bits to enable statistics workaround. */
16773 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16774 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
16775 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16776 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16777 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16778 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16779 	}
16780 
16781 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16782 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16783 		tg3_flag_set(tp, USE_PHYLIB);
16784 
16785 	err = tg3_mdio_init(tp);
16786 	if (err)
16787 		return err;
16788 
16789 	/* Initialize data/descriptor byte/word swapping. */
16790 	val = tr32(GRC_MODE);
16791 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16792 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16793 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16794 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16795 			GRC_MODE_B2HRX_ENABLE |
16796 			GRC_MODE_HTX2B_ENABLE |
16797 			GRC_MODE_HOST_STACKUP);
16798 	else
16799 		val &= GRC_MODE_HOST_STACKUP;
16800 
16801 	tw32(GRC_MODE, val | tp->grc_mode);
16802 
16803 	tg3_switch_clocks(tp);
16804 
16805 	/* Clear this out for sanity. */
16806 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16807 
16808 	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16809 	tw32(TG3PCI_REG_BASE_ADDR, 0);
16810 
16811 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16812 			      &pci_state_reg);
16813 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16814 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16815 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16816 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16817 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16818 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16819 			void __iomem *sram_base;
16820 
16821 			/* Write some dummy words into the SRAM status block
16822 			 * area, see if it reads back correctly.  If the return
16823 			 * value is bad, force enable the PCIX workaround.
16824 			 */
16825 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16826 
16827 			writel(0x00000000, sram_base);
16828 			writel(0x00000000, sram_base + 4);
16829 			writel(0xffffffff, sram_base + 4);
16830 			if (readl(sram_base) != 0x00000000)
16831 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16832 		}
16833 	}
16834 
16835 	udelay(50);
16836 	tg3_nvram_init(tp);
16837 
16838 	/* If the device has an NVRAM, no need to load patch firmware */
16839 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16840 	    !tg3_flag(tp, NO_NVRAM))
16841 		tp->fw_needed = NULL;
16842 
16843 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16844 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16845 
16846 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16847 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16848 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16849 		tg3_flag_set(tp, IS_5788);
16850 
16851 	if (!tg3_flag(tp, IS_5788) &&
16852 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16853 		tg3_flag_set(tp, TAGGED_STATUS);
16854 	if (tg3_flag(tp, TAGGED_STATUS)) {
16855 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16856 				      HOSTCC_MODE_CLRTICK_TXBD);
16857 
16858 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16859 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16860 				       tp->misc_host_ctrl);
16861 	}
16862 
16863 	/* Preserve the APE MAC_MODE bits */
16864 	if (tg3_flag(tp, ENABLE_APE))
16865 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16866 	else
16867 		tp->mac_mode = 0;
16868 
16869 	if (tg3_10_100_only_device(tp, ent))
16870 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16871 
16872 	err = tg3_phy_probe(tp);
16873 	if (err) {
16874 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16875 		/* ... but do not return immediately ... */
16876 		tg3_mdio_fini(tp);
16877 	}
16878 
16879 	tg3_read_vpd(tp);
16880 	tg3_read_fw_ver(tp);
16881 
16882 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16883 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16884 	} else {
16885 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16886 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16887 		else
16888 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16889 	}
16890 
16891 	/* 5700 {AX,BX} chips have a broken status block link
16892 	 * change bit implementation, so we must use the
16893 	 * status register in those cases.
16894 	 */
16895 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16896 		tg3_flag_set(tp, USE_LINKCHG_REG);
16897 	else
16898 		tg3_flag_clear(tp, USE_LINKCHG_REG);
16899 
16900 	/* The led_ctrl is set during tg3_phy_probe, here we might
16901 	 * have to force the link status polling mechanism based
16902 	 * upon subsystem IDs.
16903 	 */
16904 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16905 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16906 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16907 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16908 		tg3_flag_set(tp, USE_LINKCHG_REG);
16909 	}
16910 
16911 	/* For all SERDES we poll the MAC status register. */
16912 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16913 		tg3_flag_set(tp, POLL_SERDES);
16914 	else
16915 		tg3_flag_clear(tp, POLL_SERDES);
16916 
16917 	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16918 		tg3_flag_set(tp, POLL_CPMU_LINK);
16919 
16920 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16921 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16922 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16923 	    tg3_flag(tp, PCIX_MODE)) {
16924 		tp->rx_offset = NET_SKB_PAD;
16925 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16926 		tp->rx_copy_thresh = ~(u16)0;
16927 #endif
16928 	}
16929 
16930 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16931 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16932 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16933 
16934 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16935 
16936 	/* Increment the rx prod index on the rx std ring by at most
16937 	 * 8 for these chips to workaround hw errata.
16938 	 */
16939 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16940 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16941 	    tg3_asic_rev(tp) == ASIC_REV_5755)
16942 		tp->rx_std_max_post = 8;
16943 
16944 	if (tg3_flag(tp, ASPM_WORKAROUND))
16945 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16946 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
16947 
16948 	return err;
16949 }
16950 
tg3_get_device_address(struct tg3 * tp,u8 * addr)16951 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
16952 {
16953 	u32 hi, lo, mac_offset;
16954 	int addr_ok = 0;
16955 	int err;
16956 
16957 	if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
16958 		return 0;
16959 
16960 	if (tg3_flag(tp, IS_SSB_CORE)) {
16961 		err = ssb_gige_get_macaddr(tp->pdev, addr);
16962 		if (!err && is_valid_ether_addr(addr))
16963 			return 0;
16964 	}
16965 
16966 	mac_offset = 0x7c;
16967 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16968 	    tg3_flag(tp, 5780_CLASS)) {
16969 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16970 			mac_offset = 0xcc;
16971 		if (tg3_nvram_lock(tp))
16972 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16973 		else
16974 			tg3_nvram_unlock(tp);
16975 	} else if (tg3_flag(tp, 5717_PLUS)) {
16976 		if (tp->pci_fn & 1)
16977 			mac_offset = 0xcc;
16978 		if (tp->pci_fn > 1)
16979 			mac_offset += 0x18c;
16980 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16981 		mac_offset = 0x10;
16982 
16983 	/* First try to get it from MAC address mailbox. */
16984 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16985 	if ((hi >> 16) == 0x484b) {
16986 		addr[0] = (hi >>  8) & 0xff;
16987 		addr[1] = (hi >>  0) & 0xff;
16988 
16989 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16990 		addr[2] = (lo >> 24) & 0xff;
16991 		addr[3] = (lo >> 16) & 0xff;
16992 		addr[4] = (lo >>  8) & 0xff;
16993 		addr[5] = (lo >>  0) & 0xff;
16994 
16995 		/* Some old bootcode may report a 0 MAC address in SRAM */
16996 		addr_ok = is_valid_ether_addr(addr);
16997 	}
16998 	if (!addr_ok) {
16999 		/* Next, try NVRAM. */
17000 		if (!tg3_flag(tp, NO_NVRAM) &&
17001 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17002 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17003 			memcpy(&addr[0], ((char *)&hi) + 2, 2);
17004 			memcpy(&addr[2], (char *)&lo, sizeof(lo));
17005 		}
17006 		/* Finally just fetch it out of the MAC control regs. */
17007 		else {
17008 			hi = tr32(MAC_ADDR_0_HIGH);
17009 			lo = tr32(MAC_ADDR_0_LOW);
17010 
17011 			addr[5] = lo & 0xff;
17012 			addr[4] = (lo >> 8) & 0xff;
17013 			addr[3] = (lo >> 16) & 0xff;
17014 			addr[2] = (lo >> 24) & 0xff;
17015 			addr[1] = hi & 0xff;
17016 			addr[0] = (hi >> 8) & 0xff;
17017 		}
17018 	}
17019 
17020 	if (!is_valid_ether_addr(addr))
17021 		return -EINVAL;
17022 	return 0;
17023 }
17024 
17025 #define BOUNDARY_SINGLE_CACHELINE	1
17026 #define BOUNDARY_MULTI_CACHELINE	2
17027 
tg3_calc_dma_bndry(struct tg3 * tp,u32 val)17028 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17029 {
17030 	int cacheline_size;
17031 	u8 byte;
17032 	int goal;
17033 
17034 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17035 	if (byte == 0)
17036 		cacheline_size = 1024;
17037 	else
17038 		cacheline_size = (int) byte * 4;
17039 
17040 	/* On 5703 and later chips, the boundary bits have no
17041 	 * effect.
17042 	 */
17043 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17044 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
17045 	    !tg3_flag(tp, PCI_EXPRESS))
17046 		goto out;
17047 
17048 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17049 	goal = BOUNDARY_MULTI_CACHELINE;
17050 #else
17051 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17052 	goal = BOUNDARY_SINGLE_CACHELINE;
17053 #else
17054 	goal = 0;
17055 #endif
17056 #endif
17057 
17058 	if (tg3_flag(tp, 57765_PLUS)) {
17059 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17060 		goto out;
17061 	}
17062 
17063 	if (!goal)
17064 		goto out;
17065 
17066 	/* PCI controllers on most RISC systems tend to disconnect
17067 	 * when a device tries to burst across a cache-line boundary.
17068 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17069 	 *
17070 	 * Unfortunately, for PCI-E there are only limited
17071 	 * write-side controls for this, and thus for reads
17072 	 * we will still get the disconnects.  We'll also waste
17073 	 * these PCI cycles for both read and write for chips
17074 	 * other than 5700 and 5701 which do not implement the
17075 	 * boundary bits.
17076 	 */
17077 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17078 		switch (cacheline_size) {
17079 		case 16:
17080 		case 32:
17081 		case 64:
17082 		case 128:
17083 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17084 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17085 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17086 			} else {
17087 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17088 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17089 			}
17090 			break;
17091 
17092 		case 256:
17093 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17094 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17095 			break;
17096 
17097 		default:
17098 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17099 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17100 			break;
17101 		}
17102 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
17103 		switch (cacheline_size) {
17104 		case 16:
17105 		case 32:
17106 		case 64:
17107 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17108 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17109 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17110 				break;
17111 			}
17112 			fallthrough;
17113 		case 128:
17114 		default:
17115 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17116 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17117 			break;
17118 		}
17119 	} else {
17120 		switch (cacheline_size) {
17121 		case 16:
17122 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17123 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
17124 					DMA_RWCTRL_WRITE_BNDRY_16);
17125 				break;
17126 			}
17127 			fallthrough;
17128 		case 32:
17129 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17130 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
17131 					DMA_RWCTRL_WRITE_BNDRY_32);
17132 				break;
17133 			}
17134 			fallthrough;
17135 		case 64:
17136 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17137 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
17138 					DMA_RWCTRL_WRITE_BNDRY_64);
17139 				break;
17140 			}
17141 			fallthrough;
17142 		case 128:
17143 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17144 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
17145 					DMA_RWCTRL_WRITE_BNDRY_128);
17146 				break;
17147 			}
17148 			fallthrough;
17149 		case 256:
17150 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
17151 				DMA_RWCTRL_WRITE_BNDRY_256);
17152 			break;
17153 		case 512:
17154 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
17155 				DMA_RWCTRL_WRITE_BNDRY_512);
17156 			break;
17157 		case 1024:
17158 		default:
17159 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17160 				DMA_RWCTRL_WRITE_BNDRY_1024);
17161 			break;
17162 		}
17163 	}
17164 
17165 out:
17166 	return val;
17167 }
17168 
tg3_do_test_dma(struct tg3 * tp,u32 * buf,dma_addr_t buf_dma,int size,bool to_device)17169 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17170 			   int size, bool to_device)
17171 {
17172 	struct tg3_internal_buffer_desc test_desc;
17173 	u32 sram_dma_descs;
17174 	int i, ret;
17175 
17176 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17177 
17178 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17179 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17180 	tw32(RDMAC_STATUS, 0);
17181 	tw32(WDMAC_STATUS, 0);
17182 
17183 	tw32(BUFMGR_MODE, 0);
17184 	tw32(FTQ_RESET, 0);
17185 
17186 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
17187 	test_desc.addr_lo = buf_dma & 0xffffffff;
17188 	test_desc.nic_mbuf = 0x00002100;
17189 	test_desc.len = size;
17190 
17191 	/*
17192 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17193 	 * the *second* time the tg3 driver was getting loaded after an
17194 	 * initial scan.
17195 	 *
17196 	 * Broadcom tells me:
17197 	 *   ...the DMA engine is connected to the GRC block and a DMA
17198 	 *   reset may affect the GRC block in some unpredictable way...
17199 	 *   The behavior of resets to individual blocks has not been tested.
17200 	 *
17201 	 * Broadcom noted the GRC reset will also reset all sub-components.
17202 	 */
17203 	if (to_device) {
17204 		test_desc.cqid_sqid = (13 << 8) | 2;
17205 
17206 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17207 		udelay(40);
17208 	} else {
17209 		test_desc.cqid_sqid = (16 << 8) | 7;
17210 
17211 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17212 		udelay(40);
17213 	}
17214 	test_desc.flags = 0x00000005;
17215 
17216 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17217 		u32 val;
17218 
17219 		val = *(((u32 *)&test_desc) + i);
17220 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17221 				       sram_dma_descs + (i * sizeof(u32)));
17222 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17223 	}
17224 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17225 
17226 	if (to_device)
17227 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17228 	else
17229 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17230 
17231 	ret = -ENODEV;
17232 	for (i = 0; i < 40; i++) {
17233 		u32 val;
17234 
17235 		if (to_device)
17236 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17237 		else
17238 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17239 		if ((val & 0xffff) == sram_dma_descs) {
17240 			ret = 0;
17241 			break;
17242 		}
17243 
17244 		udelay(100);
17245 	}
17246 
17247 	return ret;
17248 }
17249 
17250 #define TEST_BUFFER_SIZE	0x2000
17251 
17252 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17253 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17254 	{ },
17255 };
17256 
tg3_test_dma(struct tg3 * tp)17257 static int tg3_test_dma(struct tg3 *tp)
17258 {
17259 	dma_addr_t buf_dma;
17260 	u32 *buf, saved_dma_rwctrl;
17261 	int ret = 0;
17262 
17263 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17264 				 &buf_dma, GFP_KERNEL);
17265 	if (!buf) {
17266 		ret = -ENOMEM;
17267 		goto out_nofree;
17268 	}
17269 
17270 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17271 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17272 
17273 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17274 
17275 	if (tg3_flag(tp, 57765_PLUS))
17276 		goto out;
17277 
17278 	if (tg3_flag(tp, PCI_EXPRESS)) {
17279 		/* DMA read watermark not used on PCIE */
17280 		tp->dma_rwctrl |= 0x00180000;
17281 	} else if (!tg3_flag(tp, PCIX_MODE)) {
17282 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17283 		    tg3_asic_rev(tp) == ASIC_REV_5750)
17284 			tp->dma_rwctrl |= 0x003f0000;
17285 		else
17286 			tp->dma_rwctrl |= 0x003f000f;
17287 	} else {
17288 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17289 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17290 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17291 			u32 read_water = 0x7;
17292 
17293 			/* If the 5704 is behind the EPB bridge, we can
17294 			 * do the less restrictive ONE_DMA workaround for
17295 			 * better performance.
17296 			 */
17297 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17298 			    tg3_asic_rev(tp) == ASIC_REV_5704)
17299 				tp->dma_rwctrl |= 0x8000;
17300 			else if (ccval == 0x6 || ccval == 0x7)
17301 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17302 
17303 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17304 				read_water = 4;
17305 			/* Set bit 23 to enable PCIX hw bug fix */
17306 			tp->dma_rwctrl |=
17307 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17308 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17309 				(1 << 23);
17310 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17311 			/* 5780 always in PCIX mode */
17312 			tp->dma_rwctrl |= 0x00144000;
17313 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17314 			/* 5714 always in PCIX mode */
17315 			tp->dma_rwctrl |= 0x00148000;
17316 		} else {
17317 			tp->dma_rwctrl |= 0x001b000f;
17318 		}
17319 	}
17320 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17321 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17322 
17323 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17324 	    tg3_asic_rev(tp) == ASIC_REV_5704)
17325 		tp->dma_rwctrl &= 0xfffffff0;
17326 
17327 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17328 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17329 		/* Remove this if it causes problems for some boards. */
17330 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17331 
17332 		/* On 5700/5701 chips, we need to set this bit.
17333 		 * Otherwise the chip will issue cacheline transactions
17334 		 * to streamable DMA memory with not all the byte
17335 		 * enables turned on.  This is an error on several
17336 		 * RISC PCI controllers, in particular sparc64.
17337 		 *
17338 		 * On 5703/5704 chips, this bit has been reassigned
17339 		 * a different meaning.  In particular, it is used
17340 		 * on those chips to enable a PCI-X workaround.
17341 		 */
17342 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17343 	}
17344 
17345 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17346 
17347 
17348 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17349 	    tg3_asic_rev(tp) != ASIC_REV_5701)
17350 		goto out;
17351 
17352 	/* It is best to perform DMA test with maximum write burst size
17353 	 * to expose the 5700/5701 write DMA bug.
17354 	 */
17355 	saved_dma_rwctrl = tp->dma_rwctrl;
17356 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17357 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17358 
17359 	while (1) {
17360 		u32 *p = buf, i;
17361 
17362 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17363 			p[i] = i;
17364 
17365 		/* Send the buffer to the chip. */
17366 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17367 		if (ret) {
17368 			dev_err(&tp->pdev->dev,
17369 				"%s: Buffer write failed. err = %d\n",
17370 				__func__, ret);
17371 			break;
17372 		}
17373 
17374 		/* Now read it back. */
17375 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17376 		if (ret) {
17377 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17378 				"err = %d\n", __func__, ret);
17379 			break;
17380 		}
17381 
17382 		/* Verify it. */
17383 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17384 			if (p[i] == i)
17385 				continue;
17386 
17387 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17388 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17389 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17390 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17391 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17392 				break;
17393 			} else {
17394 				dev_err(&tp->pdev->dev,
17395 					"%s: Buffer corrupted on read back! "
17396 					"(%d != %d)\n", __func__, p[i], i);
17397 				ret = -ENODEV;
17398 				goto out;
17399 			}
17400 		}
17401 
17402 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17403 			/* Success. */
17404 			ret = 0;
17405 			break;
17406 		}
17407 	}
17408 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17409 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17410 		/* DMA test passed without adjusting DMA boundary,
17411 		 * now look for chipsets that are known to expose the
17412 		 * DMA bug without failing the test.
17413 		 */
17414 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17415 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17416 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17417 		} else {
17418 			/* Safe to use the calculated DMA boundary. */
17419 			tp->dma_rwctrl = saved_dma_rwctrl;
17420 		}
17421 
17422 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17423 	}
17424 
17425 out:
17426 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17427 out_nofree:
17428 	return ret;
17429 }
17430 
tg3_init_bufmgr_config(struct tg3 * tp)17431 static void tg3_init_bufmgr_config(struct tg3 *tp)
17432 {
17433 	if (tg3_flag(tp, 57765_PLUS)) {
17434 		tp->bufmgr_config.mbuf_read_dma_low_water =
17435 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17436 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17437 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17438 		tp->bufmgr_config.mbuf_high_water =
17439 			DEFAULT_MB_HIGH_WATER_57765;
17440 
17441 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17442 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17443 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17444 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17445 		tp->bufmgr_config.mbuf_high_water_jumbo =
17446 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17447 	} else if (tg3_flag(tp, 5705_PLUS)) {
17448 		tp->bufmgr_config.mbuf_read_dma_low_water =
17449 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17450 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17451 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17452 		tp->bufmgr_config.mbuf_high_water =
17453 			DEFAULT_MB_HIGH_WATER_5705;
17454 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17455 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17456 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17457 			tp->bufmgr_config.mbuf_high_water =
17458 				DEFAULT_MB_HIGH_WATER_5906;
17459 		}
17460 
17461 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17462 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17463 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17464 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17465 		tp->bufmgr_config.mbuf_high_water_jumbo =
17466 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17467 	} else {
17468 		tp->bufmgr_config.mbuf_read_dma_low_water =
17469 			DEFAULT_MB_RDMA_LOW_WATER;
17470 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17471 			DEFAULT_MB_MACRX_LOW_WATER;
17472 		tp->bufmgr_config.mbuf_high_water =
17473 			DEFAULT_MB_HIGH_WATER;
17474 
17475 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17476 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17477 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17478 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17479 		tp->bufmgr_config.mbuf_high_water_jumbo =
17480 			DEFAULT_MB_HIGH_WATER_JUMBO;
17481 	}
17482 
17483 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17484 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17485 }
17486 
tg3_phy_string(struct tg3 * tp)17487 static char *tg3_phy_string(struct tg3 *tp)
17488 {
17489 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17490 	case TG3_PHY_ID_BCM5400:	return "5400";
17491 	case TG3_PHY_ID_BCM5401:	return "5401";
17492 	case TG3_PHY_ID_BCM5411:	return "5411";
17493 	case TG3_PHY_ID_BCM5701:	return "5701";
17494 	case TG3_PHY_ID_BCM5703:	return "5703";
17495 	case TG3_PHY_ID_BCM5704:	return "5704";
17496 	case TG3_PHY_ID_BCM5705:	return "5705";
17497 	case TG3_PHY_ID_BCM5750:	return "5750";
17498 	case TG3_PHY_ID_BCM5752:	return "5752";
17499 	case TG3_PHY_ID_BCM5714:	return "5714";
17500 	case TG3_PHY_ID_BCM5780:	return "5780";
17501 	case TG3_PHY_ID_BCM5755:	return "5755";
17502 	case TG3_PHY_ID_BCM5787:	return "5787";
17503 	case TG3_PHY_ID_BCM5784:	return "5784";
17504 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17505 	case TG3_PHY_ID_BCM5906:	return "5906";
17506 	case TG3_PHY_ID_BCM5761:	return "5761";
17507 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17508 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17509 	case TG3_PHY_ID_BCM57765:	return "57765";
17510 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17511 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17512 	case TG3_PHY_ID_BCM5762:	return "5762C";
17513 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17514 	case 0:			return "serdes";
17515 	default:		return "unknown";
17516 	}
17517 }
17518 
tg3_bus_string(struct tg3 * tp,char * str)17519 static char *tg3_bus_string(struct tg3 *tp, char *str)
17520 {
17521 	if (tg3_flag(tp, PCI_EXPRESS)) {
17522 		strcpy(str, "PCI Express");
17523 		return str;
17524 	} else if (tg3_flag(tp, PCIX_MODE)) {
17525 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17526 
17527 		strcpy(str, "PCIX:");
17528 
17529 		if ((clock_ctrl == 7) ||
17530 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17531 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17532 			strcat(str, "133MHz");
17533 		else if (clock_ctrl == 0)
17534 			strcat(str, "33MHz");
17535 		else if (clock_ctrl == 2)
17536 			strcat(str, "50MHz");
17537 		else if (clock_ctrl == 4)
17538 			strcat(str, "66MHz");
17539 		else if (clock_ctrl == 6)
17540 			strcat(str, "100MHz");
17541 	} else {
17542 		strcpy(str, "PCI:");
17543 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17544 			strcat(str, "66MHz");
17545 		else
17546 			strcat(str, "33MHz");
17547 	}
17548 	if (tg3_flag(tp, PCI_32BIT))
17549 		strcat(str, ":32-bit");
17550 	else
17551 		strcat(str, ":64-bit");
17552 	return str;
17553 }
17554 
tg3_init_coal(struct tg3 * tp)17555 static void tg3_init_coal(struct tg3 *tp)
17556 {
17557 	struct ethtool_coalesce *ec = &tp->coal;
17558 
17559 	memset(ec, 0, sizeof(*ec));
17560 	ec->cmd = ETHTOOL_GCOALESCE;
17561 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17562 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17563 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17564 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17565 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17566 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17567 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17568 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17569 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17570 
17571 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17572 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17573 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17574 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17575 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17576 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17577 	}
17578 
17579 	if (tg3_flag(tp, 5705_PLUS)) {
17580 		ec->rx_coalesce_usecs_irq = 0;
17581 		ec->tx_coalesce_usecs_irq = 0;
17582 		ec->stats_block_coalesce_usecs = 0;
17583 	}
17584 }
17585 
tg3_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)17586 static int tg3_init_one(struct pci_dev *pdev,
17587 				  const struct pci_device_id *ent)
17588 {
17589 	struct net_device *dev;
17590 	struct tg3 *tp;
17591 	int i, err;
17592 	u32 sndmbx, rcvmbx, intmbx;
17593 	char str[40];
17594 	u64 dma_mask, persist_dma_mask;
17595 	netdev_features_t features = 0;
17596 	u8 addr[ETH_ALEN] __aligned(2);
17597 
17598 	err = pci_enable_device(pdev);
17599 	if (err) {
17600 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17601 		return err;
17602 	}
17603 
17604 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17605 	if (err) {
17606 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17607 		goto err_out_disable_pdev;
17608 	}
17609 
17610 	pci_set_master(pdev);
17611 
17612 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17613 	if (!dev) {
17614 		err = -ENOMEM;
17615 		goto err_out_free_res;
17616 	}
17617 
17618 	SET_NETDEV_DEV(dev, &pdev->dev);
17619 
17620 	tp = netdev_priv(dev);
17621 	tp->pdev = pdev;
17622 	tp->dev = dev;
17623 	tp->rx_mode = TG3_DEF_RX_MODE;
17624 	tp->tx_mode = TG3_DEF_TX_MODE;
17625 	tp->irq_sync = 1;
17626 	tp->pcierr_recovery = false;
17627 
17628 	if (tg3_debug > 0)
17629 		tp->msg_enable = tg3_debug;
17630 	else
17631 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17632 
17633 	if (pdev_is_ssb_gige_core(pdev)) {
17634 		tg3_flag_set(tp, IS_SSB_CORE);
17635 		if (ssb_gige_must_flush_posted_writes(pdev))
17636 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17637 		if (ssb_gige_one_dma_at_once(pdev))
17638 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17639 		if (ssb_gige_have_roboswitch(pdev)) {
17640 			tg3_flag_set(tp, USE_PHYLIB);
17641 			tg3_flag_set(tp, ROBOSWITCH);
17642 		}
17643 		if (ssb_gige_is_rgmii(pdev))
17644 			tg3_flag_set(tp, RGMII_MODE);
17645 	}
17646 
17647 	/* The word/byte swap controls here control register access byte
17648 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17649 	 * setting below.
17650 	 */
17651 	tp->misc_host_ctrl =
17652 		MISC_HOST_CTRL_MASK_PCI_INT |
17653 		MISC_HOST_CTRL_WORD_SWAP |
17654 		MISC_HOST_CTRL_INDIR_ACCESS |
17655 		MISC_HOST_CTRL_PCISTATE_RW;
17656 
17657 	/* The NONFRM (non-frame) byte/word swap controls take effect
17658 	 * on descriptor entries, anything which isn't packet data.
17659 	 *
17660 	 * The StrongARM chips on the board (one for tx, one for rx)
17661 	 * are running in big-endian mode.
17662 	 */
17663 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17664 			GRC_MODE_WSWAP_NONFRM_DATA);
17665 #ifdef __BIG_ENDIAN
17666 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17667 #endif
17668 	spin_lock_init(&tp->lock);
17669 	spin_lock_init(&tp->indirect_lock);
17670 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17671 
17672 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17673 	if (!tp->regs) {
17674 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17675 		err = -ENOMEM;
17676 		goto err_out_free_dev;
17677 	}
17678 
17679 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17680 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17681 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17682 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17683 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17684 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17685 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17686 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17687 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17688 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17689 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17690 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17691 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17692 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17693 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17694 		tg3_flag_set(tp, ENABLE_APE);
17695 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17696 		if (!tp->aperegs) {
17697 			dev_err(&pdev->dev,
17698 				"Cannot map APE registers, aborting\n");
17699 			err = -ENOMEM;
17700 			goto err_out_iounmap;
17701 		}
17702 	}
17703 
17704 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17705 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17706 
17707 	dev->ethtool_ops = &tg3_ethtool_ops;
17708 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17709 	dev->netdev_ops = &tg3_netdev_ops;
17710 	dev->irq = pdev->irq;
17711 
17712 	err = tg3_get_invariants(tp, ent);
17713 	if (err) {
17714 		dev_err(&pdev->dev,
17715 			"Problem fetching invariants of chip, aborting\n");
17716 		goto err_out_apeunmap;
17717 	}
17718 
17719 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17720 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17721 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17722 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17723 	 * do DMA address check in tg3_start_xmit().
17724 	 */
17725 	if (tg3_flag(tp, IS_5788))
17726 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17727 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17728 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17729 #ifdef CONFIG_HIGHMEM
17730 		dma_mask = DMA_BIT_MASK(64);
17731 #endif
17732 	} else
17733 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17734 
17735 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
17736 		persist_dma_mask = DMA_BIT_MASK(31);
17737 
17738 	/* Configure DMA attributes. */
17739 	if (dma_mask > DMA_BIT_MASK(32)) {
17740 		err = dma_set_mask(&pdev->dev, dma_mask);
17741 		if (!err) {
17742 			features |= NETIF_F_HIGHDMA;
17743 			err = dma_set_coherent_mask(&pdev->dev,
17744 						    persist_dma_mask);
17745 			if (err < 0) {
17746 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17747 					"DMA for consistent allocations\n");
17748 				goto err_out_apeunmap;
17749 			}
17750 		}
17751 	}
17752 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17753 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17754 		if (err) {
17755 			dev_err(&pdev->dev,
17756 				"No usable DMA configuration, aborting\n");
17757 			goto err_out_apeunmap;
17758 		}
17759 	}
17760 
17761 	tg3_init_bufmgr_config(tp);
17762 
17763 	/* 5700 B0 chips do not support checksumming correctly due
17764 	 * to hardware bugs.
17765 	 */
17766 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17767 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17768 
17769 		if (tg3_flag(tp, 5755_PLUS))
17770 			features |= NETIF_F_IPV6_CSUM;
17771 	}
17772 
17773 	/* TSO is on by default on chips that support hardware TSO.
17774 	 * Firmware TSO on older chips gives lower performance, so it
17775 	 * is off by default, but can be enabled using ethtool.
17776 	 */
17777 	if ((tg3_flag(tp, HW_TSO_1) ||
17778 	     tg3_flag(tp, HW_TSO_2) ||
17779 	     tg3_flag(tp, HW_TSO_3)) &&
17780 	    (features & NETIF_F_IP_CSUM))
17781 		features |= NETIF_F_TSO;
17782 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17783 		if (features & NETIF_F_IPV6_CSUM)
17784 			features |= NETIF_F_TSO6;
17785 		if (tg3_flag(tp, HW_TSO_3) ||
17786 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17787 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17788 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17789 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17790 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17791 			features |= NETIF_F_TSO_ECN;
17792 	}
17793 
17794 	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17795 			 NETIF_F_HW_VLAN_CTAG_RX;
17796 	dev->vlan_features |= features;
17797 
17798 	/*
17799 	 * Add loopback capability only for a subset of devices that support
17800 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17801 	 * loopback for the remaining devices.
17802 	 */
17803 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17804 	    !tg3_flag(tp, CPMU_PRESENT))
17805 		/* Add the loopback capability */
17806 		features |= NETIF_F_LOOPBACK;
17807 
17808 	dev->hw_features |= features;
17809 	dev->priv_flags |= IFF_UNICAST_FLT;
17810 
17811 	/* MTU range: 60 - 9000 or 1500, depending on hardware */
17812 	dev->min_mtu = TG3_MIN_MTU;
17813 	dev->max_mtu = TG3_MAX_MTU(tp);
17814 
17815 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17816 	    !tg3_flag(tp, TSO_CAPABLE) &&
17817 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17818 		tg3_flag_set(tp, MAX_RXPEND_64);
17819 		tp->rx_pending = 63;
17820 	}
17821 
17822 	err = tg3_get_device_address(tp, addr);
17823 	if (err) {
17824 		dev_err(&pdev->dev,
17825 			"Could not obtain valid ethernet address, aborting\n");
17826 		goto err_out_apeunmap;
17827 	}
17828 	eth_hw_addr_set(dev, addr);
17829 
17830 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17831 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17832 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17833 	for (i = 0; i < tp->irq_max; i++) {
17834 		struct tg3_napi *tnapi = &tp->napi[i];
17835 
17836 		tnapi->tp = tp;
17837 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17838 
17839 		tnapi->int_mbox = intmbx;
17840 		intmbx += 0x8;
17841 
17842 		tnapi->consmbox = rcvmbx;
17843 		tnapi->prodmbox = sndmbx;
17844 
17845 		if (i)
17846 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17847 		else
17848 			tnapi->coal_now = HOSTCC_MODE_NOW;
17849 
17850 		if (!tg3_flag(tp, SUPPORT_MSIX))
17851 			break;
17852 
17853 		/*
17854 		 * If we support MSIX, we'll be using RSS.  If we're using
17855 		 * RSS, the first vector only handles link interrupts and the
17856 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17857 		 * mailbox values for the next iteration.  The values we setup
17858 		 * above are still useful for the single vectored mode.
17859 		 */
17860 		if (!i)
17861 			continue;
17862 
17863 		rcvmbx += 0x8;
17864 
17865 		if (sndmbx & 0x4)
17866 			sndmbx -= 0x4;
17867 		else
17868 			sndmbx += 0xc;
17869 	}
17870 
17871 	/*
17872 	 * Reset chip in case UNDI or EFI driver did not shutdown
17873 	 * DMA self test will enable WDMAC and we'll see (spurious)
17874 	 * pending DMA on the PCI bus at that point.
17875 	 */
17876 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17877 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17878 		tg3_full_lock(tp, 0);
17879 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17880 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17881 		tg3_full_unlock(tp);
17882 	}
17883 
17884 	err = tg3_test_dma(tp);
17885 	if (err) {
17886 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17887 		goto err_out_apeunmap;
17888 	}
17889 
17890 	tg3_init_coal(tp);
17891 
17892 	pci_set_drvdata(pdev, dev);
17893 
17894 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17895 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17896 	    tg3_asic_rev(tp) == ASIC_REV_5762)
17897 		tg3_flag_set(tp, PTP_CAPABLE);
17898 
17899 	tg3_timer_init(tp);
17900 
17901 	tg3_carrier_off(tp);
17902 
17903 	err = register_netdev(dev);
17904 	if (err) {
17905 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17906 		goto err_out_apeunmap;
17907 	}
17908 
17909 	if (tg3_flag(tp, PTP_CAPABLE)) {
17910 		tg3_ptp_init(tp);
17911 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17912 						   &tp->pdev->dev);
17913 		if (IS_ERR(tp->ptp_clock))
17914 			tp->ptp_clock = NULL;
17915 	}
17916 
17917 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17918 		    tp->board_part_number,
17919 		    tg3_chip_rev_id(tp),
17920 		    tg3_bus_string(tp, str),
17921 		    dev->dev_addr);
17922 
17923 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17924 		char *ethtype;
17925 
17926 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17927 			ethtype = "10/100Base-TX";
17928 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17929 			ethtype = "1000Base-SX";
17930 		else
17931 			ethtype = "10/100/1000Base-T";
17932 
17933 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17934 			    "(WireSpeed[%d], EEE[%d])\n",
17935 			    tg3_phy_string(tp), ethtype,
17936 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17937 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17938 	}
17939 
17940 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17941 		    (dev->features & NETIF_F_RXCSUM) != 0,
17942 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
17943 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17944 		    tg3_flag(tp, ENABLE_ASF) != 0,
17945 		    tg3_flag(tp, TSO_CAPABLE) != 0);
17946 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17947 		    tp->dma_rwctrl,
17948 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17949 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17950 
17951 	pci_save_state(pdev);
17952 
17953 	return 0;
17954 
17955 err_out_apeunmap:
17956 	if (tp->aperegs) {
17957 		iounmap(tp->aperegs);
17958 		tp->aperegs = NULL;
17959 	}
17960 
17961 err_out_iounmap:
17962 	if (tp->regs) {
17963 		iounmap(tp->regs);
17964 		tp->regs = NULL;
17965 	}
17966 
17967 err_out_free_dev:
17968 	free_netdev(dev);
17969 
17970 err_out_free_res:
17971 	pci_release_regions(pdev);
17972 
17973 err_out_disable_pdev:
17974 	if (pci_is_enabled(pdev))
17975 		pci_disable_device(pdev);
17976 	return err;
17977 }
17978 
tg3_remove_one(struct pci_dev * pdev)17979 static void tg3_remove_one(struct pci_dev *pdev)
17980 {
17981 	struct net_device *dev = pci_get_drvdata(pdev);
17982 
17983 	if (dev) {
17984 		struct tg3 *tp = netdev_priv(dev);
17985 
17986 		tg3_ptp_fini(tp);
17987 
17988 		release_firmware(tp->fw);
17989 
17990 		tg3_reset_task_cancel(tp);
17991 
17992 		if (tg3_flag(tp, USE_PHYLIB)) {
17993 			tg3_phy_fini(tp);
17994 			tg3_mdio_fini(tp);
17995 		}
17996 
17997 		unregister_netdev(dev);
17998 		if (tp->aperegs) {
17999 			iounmap(tp->aperegs);
18000 			tp->aperegs = NULL;
18001 		}
18002 		if (tp->regs) {
18003 			iounmap(tp->regs);
18004 			tp->regs = NULL;
18005 		}
18006 		free_netdev(dev);
18007 		pci_release_regions(pdev);
18008 		pci_disable_device(pdev);
18009 	}
18010 }
18011 
18012 #ifdef CONFIG_PM_SLEEP
tg3_suspend(struct device * device)18013 static int tg3_suspend(struct device *device)
18014 {
18015 	struct net_device *dev = dev_get_drvdata(device);
18016 	struct tg3 *tp = netdev_priv(dev);
18017 	int err = 0;
18018 
18019 	rtnl_lock();
18020 
18021 	if (!netif_running(dev))
18022 		goto unlock;
18023 
18024 	tg3_reset_task_cancel(tp);
18025 	tg3_phy_stop(tp);
18026 	tg3_netif_stop(tp);
18027 
18028 	tg3_timer_stop(tp);
18029 
18030 	tg3_full_lock(tp, 1);
18031 	tg3_disable_ints(tp);
18032 	tg3_full_unlock(tp);
18033 
18034 	netif_device_detach(dev);
18035 
18036 	tg3_full_lock(tp, 0);
18037 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18038 	tg3_flag_clear(tp, INIT_COMPLETE);
18039 	tg3_full_unlock(tp);
18040 
18041 	err = tg3_power_down_prepare(tp);
18042 	if (err) {
18043 		int err2;
18044 
18045 		tg3_full_lock(tp, 0);
18046 
18047 		tg3_flag_set(tp, INIT_COMPLETE);
18048 		err2 = tg3_restart_hw(tp, true);
18049 		if (err2)
18050 			goto out;
18051 
18052 		tg3_timer_start(tp);
18053 
18054 		netif_device_attach(dev);
18055 		tg3_netif_start(tp);
18056 
18057 out:
18058 		tg3_full_unlock(tp);
18059 
18060 		if (!err2)
18061 			tg3_phy_start(tp);
18062 	}
18063 
18064 unlock:
18065 	rtnl_unlock();
18066 	return err;
18067 }
18068 
tg3_resume(struct device * device)18069 static int tg3_resume(struct device *device)
18070 {
18071 	struct net_device *dev = dev_get_drvdata(device);
18072 	struct tg3 *tp = netdev_priv(dev);
18073 	int err = 0;
18074 
18075 	rtnl_lock();
18076 
18077 	if (!netif_running(dev))
18078 		goto unlock;
18079 
18080 	netif_device_attach(dev);
18081 
18082 	tg3_full_lock(tp, 0);
18083 
18084 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18085 
18086 	tg3_flag_set(tp, INIT_COMPLETE);
18087 	err = tg3_restart_hw(tp,
18088 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18089 	if (err)
18090 		goto out;
18091 
18092 	tg3_timer_start(tp);
18093 
18094 	tg3_netif_start(tp);
18095 
18096 out:
18097 	tg3_full_unlock(tp);
18098 
18099 	if (!err)
18100 		tg3_phy_start(tp);
18101 
18102 unlock:
18103 	rtnl_unlock();
18104 	return err;
18105 }
18106 #endif /* CONFIG_PM_SLEEP */
18107 
18108 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18109 
18110 /* Systems where ACPI _PTS (Prepare To Sleep) S5 will result in a fatal
18111  * PCIe AER event on the tg3 device if the tg3 device is not, or cannot
18112  * be, powered down.
18113  */
18114 static const struct dmi_system_id tg3_restart_aer_quirk_table[] = {
18115 	{
18116 		.matches = {
18117 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18118 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R440"),
18119 		},
18120 	},
18121 	{
18122 		.matches = {
18123 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18124 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"),
18125 		},
18126 	},
18127 	{
18128 		.matches = {
18129 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18130 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R640"),
18131 		},
18132 	},
18133 	{
18134 		.matches = {
18135 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18136 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R650"),
18137 		},
18138 	},
18139 	{
18140 		.matches = {
18141 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18142 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"),
18143 		},
18144 	},
18145 	{
18146 		.matches = {
18147 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18148 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R750"),
18149 		},
18150 	},
18151 	{}
18152 };
18153 
tg3_shutdown(struct pci_dev * pdev)18154 static void tg3_shutdown(struct pci_dev *pdev)
18155 {
18156 	struct net_device *dev = pci_get_drvdata(pdev);
18157 	struct tg3 *tp = netdev_priv(dev);
18158 
18159 	tg3_reset_task_cancel(tp);
18160 
18161 	rtnl_lock();
18162 
18163 	netif_device_detach(dev);
18164 
18165 	if (netif_running(dev))
18166 		dev_close(dev);
18167 
18168 	if (system_state == SYSTEM_POWER_OFF)
18169 		tg3_power_down(tp);
18170 	else if (system_state == SYSTEM_RESTART &&
18171 		 dmi_first_match(tg3_restart_aer_quirk_table) &&
18172 		 pdev->current_state != PCI_D3cold &&
18173 		 pdev->current_state != PCI_UNKNOWN) {
18174 		/* Disable PCIe AER on the tg3 to avoid a fatal
18175 		 * error during this system restart.
18176 		 */
18177 		pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL,
18178 					   PCI_EXP_DEVCTL_CERE |
18179 					   PCI_EXP_DEVCTL_NFERE |
18180 					   PCI_EXP_DEVCTL_FERE |
18181 					   PCI_EXP_DEVCTL_URRE);
18182 	}
18183 
18184 	rtnl_unlock();
18185 
18186 	pci_disable_device(pdev);
18187 }
18188 
18189 /**
18190  * tg3_io_error_detected - called when PCI error is detected
18191  * @pdev: Pointer to PCI device
18192  * @state: The current pci connection state
18193  *
18194  * This function is called after a PCI bus error affecting
18195  * this device has been detected.
18196  */
tg3_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)18197 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18198 					      pci_channel_state_t state)
18199 {
18200 	struct net_device *netdev = pci_get_drvdata(pdev);
18201 	struct tg3 *tp = netdev_priv(netdev);
18202 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18203 
18204 	netdev_info(netdev, "PCI I/O error detected\n");
18205 
18206 	/* Want to make sure that the reset task doesn't run */
18207 	tg3_reset_task_cancel(tp);
18208 
18209 	rtnl_lock();
18210 
18211 	/* Could be second call or maybe we don't have netdev yet */
18212 	if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18213 		goto done;
18214 
18215 	/* We needn't recover from permanent error */
18216 	if (state == pci_channel_io_frozen)
18217 		tp->pcierr_recovery = true;
18218 
18219 	tg3_phy_stop(tp);
18220 
18221 	tg3_netif_stop(tp);
18222 
18223 	tg3_timer_stop(tp);
18224 
18225 	netif_device_detach(netdev);
18226 
18227 	/* Clean up software state, even if MMIO is blocked */
18228 	tg3_full_lock(tp, 0);
18229 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18230 	tg3_full_unlock(tp);
18231 
18232 done:
18233 	if (state == pci_channel_io_perm_failure) {
18234 		if (netdev) {
18235 			tg3_napi_enable(tp);
18236 			dev_close(netdev);
18237 		}
18238 		err = PCI_ERS_RESULT_DISCONNECT;
18239 	} else {
18240 		pci_disable_device(pdev);
18241 	}
18242 
18243 	rtnl_unlock();
18244 
18245 	return err;
18246 }
18247 
18248 /**
18249  * tg3_io_slot_reset - called after the pci bus has been reset.
18250  * @pdev: Pointer to PCI device
18251  *
18252  * Restart the card from scratch, as if from a cold-boot.
18253  * At this point, the card has exprienced a hard reset,
18254  * followed by fixups by BIOS, and has its config space
18255  * set up identically to what it was at cold boot.
18256  */
tg3_io_slot_reset(struct pci_dev * pdev)18257 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18258 {
18259 	struct net_device *netdev = pci_get_drvdata(pdev);
18260 	struct tg3 *tp = netdev_priv(netdev);
18261 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18262 	int err;
18263 
18264 	rtnl_lock();
18265 
18266 	if (pci_enable_device(pdev)) {
18267 		dev_err(&pdev->dev,
18268 			"Cannot re-enable PCI device after reset.\n");
18269 		goto done;
18270 	}
18271 
18272 	pci_set_master(pdev);
18273 	pci_restore_state(pdev);
18274 	pci_save_state(pdev);
18275 
18276 	if (!netdev || !netif_running(netdev)) {
18277 		rc = PCI_ERS_RESULT_RECOVERED;
18278 		goto done;
18279 	}
18280 
18281 	err = tg3_power_up(tp);
18282 	if (err)
18283 		goto done;
18284 
18285 	rc = PCI_ERS_RESULT_RECOVERED;
18286 
18287 done:
18288 	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18289 		tg3_napi_enable(tp);
18290 		dev_close(netdev);
18291 	}
18292 	rtnl_unlock();
18293 
18294 	return rc;
18295 }
18296 
18297 /**
18298  * tg3_io_resume - called when traffic can start flowing again.
18299  * @pdev: Pointer to PCI device
18300  *
18301  * This callback is called when the error recovery driver tells
18302  * us that its OK to resume normal operation.
18303  */
tg3_io_resume(struct pci_dev * pdev)18304 static void tg3_io_resume(struct pci_dev *pdev)
18305 {
18306 	struct net_device *netdev = pci_get_drvdata(pdev);
18307 	struct tg3 *tp = netdev_priv(netdev);
18308 	int err;
18309 
18310 	rtnl_lock();
18311 
18312 	if (!netdev || !netif_running(netdev))
18313 		goto done;
18314 
18315 	tg3_full_lock(tp, 0);
18316 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18317 	tg3_flag_set(tp, INIT_COMPLETE);
18318 	err = tg3_restart_hw(tp, true);
18319 	if (err) {
18320 		tg3_full_unlock(tp);
18321 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
18322 		goto done;
18323 	}
18324 
18325 	netif_device_attach(netdev);
18326 
18327 	tg3_timer_start(tp);
18328 
18329 	tg3_netif_start(tp);
18330 
18331 	tg3_full_unlock(tp);
18332 
18333 	tg3_phy_start(tp);
18334 
18335 done:
18336 	tp->pcierr_recovery = false;
18337 	rtnl_unlock();
18338 }
18339 
18340 static const struct pci_error_handlers tg3_err_handler = {
18341 	.error_detected	= tg3_io_error_detected,
18342 	.slot_reset	= tg3_io_slot_reset,
18343 	.resume		= tg3_io_resume
18344 };
18345 
18346 static struct pci_driver tg3_driver = {
18347 	.name		= DRV_MODULE_NAME,
18348 	.id_table	= tg3_pci_tbl,
18349 	.probe		= tg3_init_one,
18350 	.remove		= tg3_remove_one,
18351 	.err_handler	= &tg3_err_handler,
18352 	.driver.pm	= &tg3_pm_ops,
18353 	.shutdown	= tg3_shutdown,
18354 };
18355 
18356 module_pci_driver(tg3_driver);
18357