1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
11 *
12 * Firmware is:
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
18 *
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
22 */
23
24
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58
59 #include <net/checksum.h>
60 #include <net/gso.h>
61 #include <net/ip.h>
62
63 #include <linux/io.h>
64 #include <asm/byteorder.h>
65 #include <linux/uaccess.h>
66
67 #include <uapi/linux/net_tstamp.h>
68 #include <linux/ptp_clock_kernel.h>
69
70 #define BAR_0 0
71 #define BAR_2 2
72
73 #include "tg3.h"
74
75 /* Functions & macros to verify TG3_FLAGS types */
76
_tg3_flag(enum TG3_FLAGS flag,unsigned long * bits)77 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79 return test_bit(flag, bits);
80 }
81
_tg3_flag_set(enum TG3_FLAGS flag,unsigned long * bits)82 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 {
84 set_bit(flag, bits);
85 }
86
_tg3_flag_clear(enum TG3_FLAGS flag,unsigned long * bits)87 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
88 {
89 clear_bit(flag, bits);
90 }
91
92 #define tg3_flag(tp, flag) \
93 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
94 #define tg3_flag_set(tp, flag) \
95 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
96 #define tg3_flag_clear(tp, flag) \
97 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98
99 #define DRV_MODULE_NAME "tg3"
100 /* DO NOT UPDATE TG3_*_NUM defines */
101 #define TG3_MAJ_NUM 3
102 #define TG3_MIN_NUM 137
103
104 #define RESET_KIND_SHUTDOWN 0
105 #define RESET_KIND_INIT 1
106 #define RESET_KIND_SUSPEND 2
107
108 #define TG3_DEF_RX_MODE 0
109 #define TG3_DEF_TX_MODE 0
110 #define TG3_DEF_MSG_ENABLE \
111 (NETIF_MSG_DRV | \
112 NETIF_MSG_PROBE | \
113 NETIF_MSG_LINK | \
114 NETIF_MSG_TIMER | \
115 NETIF_MSG_IFDOWN | \
116 NETIF_MSG_IFUP | \
117 NETIF_MSG_RX_ERR | \
118 NETIF_MSG_TX_ERR)
119
120 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
121
122 /* length of time before we decide the hardware is borked,
123 * and dev->tx_timeout() should be called to fix the problem
124 */
125
126 #define TG3_TX_TIMEOUT (5 * HZ)
127
128 /* hardware minimum and maximum for a single frame's data payload */
129 #define TG3_MIN_MTU ETH_ZLEN
130 #define TG3_MAX_MTU(tp) \
131 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
132
133 /* These numbers seem to be hard coded in the NIC firmware somehow.
134 * You can't change the ring sizes, but you can change where you place
135 * them in the NIC onboard memory.
136 */
137 #define TG3_RX_STD_RING_SIZE(tp) \
138 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
140 #define TG3_DEF_RX_RING_PENDING 200
141 #define TG3_RX_JMB_RING_SIZE(tp) \
142 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
143 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
144 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
145
146 /* Do not place this n-ring entries value into the tp struct itself,
147 * we really want to expose these constants to GCC so that modulo et
148 * al. operations are done with shifts and masks instead of with
149 * hw multiply/modulo instructions. Another solution would be to
150 * replace things like '% foo' with '& (foo - 1)'.
151 */
152
153 #define TG3_TX_RING_SIZE 512
154 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
155
156 #define TG3_RX_STD_RING_BYTES(tp) \
157 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
158 #define TG3_RX_JMB_RING_BYTES(tp) \
159 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
160 #define TG3_RX_RCB_RING_BYTES(tp) \
161 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
162 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
163 TG3_TX_RING_SIZE)
164 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
165
166 #define TG3_DMA_BYTE_ENAB 64
167
168 #define TG3_RX_STD_DMA_SZ 1536
169 #define TG3_RX_JMB_DMA_SZ 9046
170
171 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
172
173 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
174 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
175
176 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
177 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
178
179 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
180 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
181
182 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
183 * that are at least dword aligned when used in PCIX mode. The driver
184 * works around this bug by double copying the packet. This workaround
185 * is built into the normal double copy length check for efficiency.
186 *
187 * However, the double copy is only necessary on those architectures
188 * where unaligned memory accesses are inefficient. For those architectures
189 * where unaligned memory accesses incur little penalty, we can reintegrate
190 * the 5701 in the normal rx path. Doing so saves a device structure
191 * dereference by hardcoding the double copy threshold in place.
192 */
193 #define TG3_RX_COPY_THRESHOLD 256
194 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
195 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
196 #else
197 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #endif
199
200 #if (NET_IP_ALIGN != 0)
201 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
202 #else
203 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 #endif
205
206 /* minimum number of free TX descriptors required to wake up TX process */
207 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
208 #define TG3_TX_BD_DMA_MAX_2K 2048
209 #define TG3_TX_BD_DMA_MAX_4K 4096
210
211 #define TG3_RAW_IP_ALIGN 2
212
213 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
214 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
215
216 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
217 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
218
219 #define FIRMWARE_TG3 "tigon/tg3.bin"
220 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
221 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
222 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
223
224 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
225 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
226 MODULE_LICENSE("GPL");
227 MODULE_FIRMWARE(FIRMWARE_TG3);
228 MODULE_FIRMWARE(FIRMWARE_TG357766);
229 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
231
232 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
233 module_param(tg3_debug, int, 0);
234 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
235
236 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
237 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
238
239 static const struct pci_device_id tg3_pci_tbl[] = {
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 TG3_DRV_DATA_FLAG_5705_10_100},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
262 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263 TG3_DRV_DATA_FLAG_5705_10_100},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267 TG3_DRV_DATA_FLAG_5705_10_100},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
274 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
280 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
288 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
289 PCI_VENDOR_ID_LENOVO,
290 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
291 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
294 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
315 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
317 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
318 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
322 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
334 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
347 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
353 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
354 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
355 {}
356 };
357
358 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
359
360 static const struct {
361 const char string[ETH_GSTRING_LEN];
362 } ethtool_stats_keys[] = {
363 { "rx_octets" },
364 { "rx_fragments" },
365 { "rx_ucast_packets" },
366 { "rx_mcast_packets" },
367 { "rx_bcast_packets" },
368 { "rx_fcs_errors" },
369 { "rx_align_errors" },
370 { "rx_xon_pause_rcvd" },
371 { "rx_xoff_pause_rcvd" },
372 { "rx_mac_ctrl_rcvd" },
373 { "rx_xoff_entered" },
374 { "rx_frame_too_long_errors" },
375 { "rx_jabbers" },
376 { "rx_undersize_packets" },
377 { "rx_in_length_errors" },
378 { "rx_out_length_errors" },
379 { "rx_64_or_less_octet_packets" },
380 { "rx_65_to_127_octet_packets" },
381 { "rx_128_to_255_octet_packets" },
382 { "rx_256_to_511_octet_packets" },
383 { "rx_512_to_1023_octet_packets" },
384 { "rx_1024_to_1522_octet_packets" },
385 { "rx_1523_to_2047_octet_packets" },
386 { "rx_2048_to_4095_octet_packets" },
387 { "rx_4096_to_8191_octet_packets" },
388 { "rx_8192_to_9022_octet_packets" },
389
390 { "tx_octets" },
391 { "tx_collisions" },
392
393 { "tx_xon_sent" },
394 { "tx_xoff_sent" },
395 { "tx_flow_control" },
396 { "tx_mac_errors" },
397 { "tx_single_collisions" },
398 { "tx_mult_collisions" },
399 { "tx_deferred" },
400 { "tx_excessive_collisions" },
401 { "tx_late_collisions" },
402 { "tx_collide_2times" },
403 { "tx_collide_3times" },
404 { "tx_collide_4times" },
405 { "tx_collide_5times" },
406 { "tx_collide_6times" },
407 { "tx_collide_7times" },
408 { "tx_collide_8times" },
409 { "tx_collide_9times" },
410 { "tx_collide_10times" },
411 { "tx_collide_11times" },
412 { "tx_collide_12times" },
413 { "tx_collide_13times" },
414 { "tx_collide_14times" },
415 { "tx_collide_15times" },
416 { "tx_ucast_packets" },
417 { "tx_mcast_packets" },
418 { "tx_bcast_packets" },
419 { "tx_carrier_sense_errors" },
420 { "tx_discards" },
421 { "tx_errors" },
422
423 { "dma_writeq_full" },
424 { "dma_write_prioq_full" },
425 { "rxbds_empty" },
426 { "rx_discards" },
427 { "rx_errors" },
428 { "rx_threshold_hit" },
429
430 { "dma_readq_full" },
431 { "dma_read_prioq_full" },
432 { "tx_comp_queue_full" },
433
434 { "ring_set_send_prod_index" },
435 { "ring_status_update" },
436 { "nic_irqs" },
437 { "nic_avoided_irqs" },
438 { "nic_tx_threshold_hit" },
439
440 { "mbuf_lwm_thresh_hit" },
441 };
442
443 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
444 #define TG3_NVRAM_TEST 0
445 #define TG3_LINK_TEST 1
446 #define TG3_REGISTER_TEST 2
447 #define TG3_MEMORY_TEST 3
448 #define TG3_MAC_LOOPB_TEST 4
449 #define TG3_PHY_LOOPB_TEST 5
450 #define TG3_EXT_LOOPB_TEST 6
451 #define TG3_INTERRUPT_TEST 7
452
453
454 static const struct {
455 const char string[ETH_GSTRING_LEN];
456 } ethtool_test_keys[] = {
457 [TG3_NVRAM_TEST] = { "nvram test (online) " },
458 [TG3_LINK_TEST] = { "link test (online) " },
459 [TG3_REGISTER_TEST] = { "register test (offline)" },
460 [TG3_MEMORY_TEST] = { "memory test (offline)" },
461 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
462 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
463 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
464 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
465 };
466
467 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
468
469
tg3_write32(struct tg3 * tp,u32 off,u32 val)470 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
471 {
472 writel(val, tp->regs + off);
473 }
474
tg3_read32(struct tg3 * tp,u32 off)475 static u32 tg3_read32(struct tg3 *tp, u32 off)
476 {
477 return readl(tp->regs + off);
478 }
479
tg3_ape_write32(struct tg3 * tp,u32 off,u32 val)480 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
481 {
482 writel(val, tp->aperegs + off);
483 }
484
tg3_ape_read32(struct tg3 * tp,u32 off)485 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
486 {
487 return readl(tp->aperegs + off);
488 }
489
tg3_write_indirect_reg32(struct tg3 * tp,u32 off,u32 val)490 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
491 {
492 unsigned long flags;
493
494 spin_lock_irqsave(&tp->indirect_lock, flags);
495 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
496 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
497 spin_unlock_irqrestore(&tp->indirect_lock, flags);
498 }
499
tg3_write_flush_reg32(struct tg3 * tp,u32 off,u32 val)500 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
501 {
502 writel(val, tp->regs + off);
503 readl(tp->regs + off);
504 }
505
tg3_read_indirect_reg32(struct tg3 * tp,u32 off)506 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
507 {
508 unsigned long flags;
509 u32 val;
510
511 spin_lock_irqsave(&tp->indirect_lock, flags);
512 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
513 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
514 spin_unlock_irqrestore(&tp->indirect_lock, flags);
515 return val;
516 }
517
tg3_write_indirect_mbox(struct tg3 * tp,u32 off,u32 val)518 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
519 {
520 unsigned long flags;
521
522 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
523 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
524 TG3_64BIT_REG_LOW, val);
525 return;
526 }
527 if (off == TG3_RX_STD_PROD_IDX_REG) {
528 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
529 TG3_64BIT_REG_LOW, val);
530 return;
531 }
532
533 spin_lock_irqsave(&tp->indirect_lock, flags);
534 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
535 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
536 spin_unlock_irqrestore(&tp->indirect_lock, flags);
537
538 /* In indirect mode when disabling interrupts, we also need
539 * to clear the interrupt bit in the GRC local ctrl register.
540 */
541 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
542 (val == 0x1)) {
543 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
544 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
545 }
546 }
547
tg3_read_indirect_mbox(struct tg3 * tp,u32 off)548 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
549 {
550 unsigned long flags;
551 u32 val;
552
553 spin_lock_irqsave(&tp->indirect_lock, flags);
554 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
555 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
556 spin_unlock_irqrestore(&tp->indirect_lock, flags);
557 return val;
558 }
559
560 /* usec_wait specifies the wait time in usec when writing to certain registers
561 * where it is unsafe to read back the register without some delay.
562 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
563 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
564 */
_tw32_flush(struct tg3 * tp,u32 off,u32 val,u32 usec_wait)565 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
566 {
567 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
568 /* Non-posted methods */
569 tp->write32(tp, off, val);
570 else {
571 /* Posted method */
572 tg3_write32(tp, off, val);
573 if (usec_wait)
574 udelay(usec_wait);
575 tp->read32(tp, off);
576 }
577 /* Wait again after the read for the posted method to guarantee that
578 * the wait time is met.
579 */
580 if (usec_wait)
581 udelay(usec_wait);
582 }
583
tw32_mailbox_flush(struct tg3 * tp,u32 off,u32 val)584 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
585 {
586 tp->write32_mbox(tp, off, val);
587 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
588 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
589 !tg3_flag(tp, ICH_WORKAROUND)))
590 tp->read32_mbox(tp, off);
591 }
592
tg3_write32_tx_mbox(struct tg3 * tp,u32 off,u32 val)593 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
594 {
595 void __iomem *mbox = tp->regs + off;
596 writel(val, mbox);
597 if (tg3_flag(tp, TXD_MBOX_HWBUG))
598 writel(val, mbox);
599 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
600 tg3_flag(tp, FLUSH_POSTED_WRITES))
601 readl(mbox);
602 }
603
tg3_read32_mbox_5906(struct tg3 * tp,u32 off)604 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
605 {
606 return readl(tp->regs + off + GRCMBOX_BASE);
607 }
608
tg3_write32_mbox_5906(struct tg3 * tp,u32 off,u32 val)609 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
610 {
611 writel(val, tp->regs + off + GRCMBOX_BASE);
612 }
613
614 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
615 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
616 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
617 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
618 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
619
620 #define tw32(reg, val) tp->write32(tp, reg, val)
621 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
622 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
623 #define tr32(reg) tp->read32(tp, reg)
624
tg3_write_mem(struct tg3 * tp,u32 off,u32 val)625 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
626 {
627 unsigned long flags;
628
629 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
630 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
631 return;
632
633 spin_lock_irqsave(&tp->indirect_lock, flags);
634 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
635 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
637
638 /* Always leave this as zero. */
639 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 } else {
641 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
642 tw32_f(TG3PCI_MEM_WIN_DATA, val);
643
644 /* Always leave this as zero. */
645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
646 }
647 spin_unlock_irqrestore(&tp->indirect_lock, flags);
648 }
649
tg3_read_mem(struct tg3 * tp,u32 off,u32 * val)650 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
651 {
652 unsigned long flags;
653
654 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
655 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
656 *val = 0;
657 return;
658 }
659
660 spin_lock_irqsave(&tp->indirect_lock, flags);
661 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
662 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
663 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
664
665 /* Always leave this as zero. */
666 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 } else {
668 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
669 *val = tr32(TG3PCI_MEM_WIN_DATA);
670
671 /* Always leave this as zero. */
672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
673 }
674 spin_unlock_irqrestore(&tp->indirect_lock, flags);
675 }
676
tg3_ape_lock_init(struct tg3 * tp)677 static void tg3_ape_lock_init(struct tg3 *tp)
678 {
679 int i;
680 u32 regbase, bit;
681
682 if (tg3_asic_rev(tp) == ASIC_REV_5761)
683 regbase = TG3_APE_LOCK_GRANT;
684 else
685 regbase = TG3_APE_PER_LOCK_GRANT;
686
687 /* Make sure the driver hasn't any stale locks. */
688 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
689 switch (i) {
690 case TG3_APE_LOCK_PHY0:
691 case TG3_APE_LOCK_PHY1:
692 case TG3_APE_LOCK_PHY2:
693 case TG3_APE_LOCK_PHY3:
694 bit = APE_LOCK_GRANT_DRIVER;
695 break;
696 default:
697 if (!tp->pci_fn)
698 bit = APE_LOCK_GRANT_DRIVER;
699 else
700 bit = 1 << tp->pci_fn;
701 }
702 tg3_ape_write32(tp, regbase + 4 * i, bit);
703 }
704
705 }
706
tg3_ape_lock(struct tg3 * tp,int locknum)707 static int tg3_ape_lock(struct tg3 *tp, int locknum)
708 {
709 int i, off;
710 int ret = 0;
711 u32 status, req, gnt, bit;
712
713 if (!tg3_flag(tp, ENABLE_APE))
714 return 0;
715
716 switch (locknum) {
717 case TG3_APE_LOCK_GPIO:
718 if (tg3_asic_rev(tp) == ASIC_REV_5761)
719 return 0;
720 fallthrough;
721 case TG3_APE_LOCK_GRC:
722 case TG3_APE_LOCK_MEM:
723 if (!tp->pci_fn)
724 bit = APE_LOCK_REQ_DRIVER;
725 else
726 bit = 1 << tp->pci_fn;
727 break;
728 case TG3_APE_LOCK_PHY0:
729 case TG3_APE_LOCK_PHY1:
730 case TG3_APE_LOCK_PHY2:
731 case TG3_APE_LOCK_PHY3:
732 bit = APE_LOCK_REQ_DRIVER;
733 break;
734 default:
735 return -EINVAL;
736 }
737
738 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739 req = TG3_APE_LOCK_REQ;
740 gnt = TG3_APE_LOCK_GRANT;
741 } else {
742 req = TG3_APE_PER_LOCK_REQ;
743 gnt = TG3_APE_PER_LOCK_GRANT;
744 }
745
746 off = 4 * locknum;
747
748 tg3_ape_write32(tp, req + off, bit);
749
750 /* Wait for up to 1 millisecond to acquire lock. */
751 for (i = 0; i < 100; i++) {
752 status = tg3_ape_read32(tp, gnt + off);
753 if (status == bit)
754 break;
755 if (pci_channel_offline(tp->pdev))
756 break;
757
758 udelay(10);
759 }
760
761 if (status != bit) {
762 /* Revoke the lock request. */
763 tg3_ape_write32(tp, gnt + off, bit);
764 ret = -EBUSY;
765 }
766
767 return ret;
768 }
769
tg3_ape_unlock(struct tg3 * tp,int locknum)770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771 {
772 u32 gnt, bit;
773
774 if (!tg3_flag(tp, ENABLE_APE))
775 return;
776
777 switch (locknum) {
778 case TG3_APE_LOCK_GPIO:
779 if (tg3_asic_rev(tp) == ASIC_REV_5761)
780 return;
781 fallthrough;
782 case TG3_APE_LOCK_GRC:
783 case TG3_APE_LOCK_MEM:
784 if (!tp->pci_fn)
785 bit = APE_LOCK_GRANT_DRIVER;
786 else
787 bit = 1 << tp->pci_fn;
788 break;
789 case TG3_APE_LOCK_PHY0:
790 case TG3_APE_LOCK_PHY1:
791 case TG3_APE_LOCK_PHY2:
792 case TG3_APE_LOCK_PHY3:
793 bit = APE_LOCK_GRANT_DRIVER;
794 break;
795 default:
796 return;
797 }
798
799 if (tg3_asic_rev(tp) == ASIC_REV_5761)
800 gnt = TG3_APE_LOCK_GRANT;
801 else
802 gnt = TG3_APE_PER_LOCK_GRANT;
803
804 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
805 }
806
tg3_ape_event_lock(struct tg3 * tp,u32 timeout_us)807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
808 {
809 u32 apedata;
810
811 while (timeout_us) {
812 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
813 return -EBUSY;
814
815 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817 break;
818
819 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
820
821 udelay(10);
822 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
823 }
824
825 return timeout_us ? 0 : -EBUSY;
826 }
827
828 #ifdef CONFIG_TIGON3_HWMON
tg3_ape_wait_for_event(struct tg3 * tp,u32 timeout_us)829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
830 {
831 u32 i, apedata;
832
833 for (i = 0; i < timeout_us / 10; i++) {
834 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
835
836 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
837 break;
838
839 udelay(10);
840 }
841
842 return i == timeout_us / 10;
843 }
844
tg3_ape_scratchpad_read(struct tg3 * tp,u32 * data,u32 base_off,u32 len)845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
846 u32 len)
847 {
848 int err;
849 u32 i, bufoff, msgoff, maxlen, apedata;
850
851 if (!tg3_flag(tp, APE_HAS_NCSI))
852 return 0;
853
854 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
855 if (apedata != APE_SEG_SIG_MAGIC)
856 return -ENODEV;
857
858 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
859 if (!(apedata & APE_FW_STATUS_READY))
860 return -EAGAIN;
861
862 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
863 TG3_APE_SHMEM_BASE;
864 msgoff = bufoff + 2 * sizeof(u32);
865 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
866
867 while (len) {
868 u32 length;
869
870 /* Cap xfer sizes to scratchpad limits. */
871 length = (len > maxlen) ? maxlen : len;
872 len -= length;
873
874 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
875 if (!(apedata & APE_FW_STATUS_READY))
876 return -EAGAIN;
877
878 /* Wait for up to 1 msec for APE to service previous event. */
879 err = tg3_ape_event_lock(tp, 1000);
880 if (err)
881 return err;
882
883 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
884 APE_EVENT_STATUS_SCRTCHPD_READ |
885 APE_EVENT_STATUS_EVENT_PENDING;
886 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
887
888 tg3_ape_write32(tp, bufoff, base_off);
889 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
890
891 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
892 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
893
894 base_off += length;
895
896 if (tg3_ape_wait_for_event(tp, 30000))
897 return -EAGAIN;
898
899 for (i = 0; length; i += 4, length -= 4) {
900 u32 val = tg3_ape_read32(tp, msgoff + i);
901 memcpy(data, &val, sizeof(u32));
902 data++;
903 }
904 }
905
906 return 0;
907 }
908 #endif
909
tg3_ape_send_event(struct tg3 * tp,u32 event)910 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
911 {
912 int err;
913 u32 apedata;
914
915 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
916 if (apedata != APE_SEG_SIG_MAGIC)
917 return -EAGAIN;
918
919 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
920 if (!(apedata & APE_FW_STATUS_READY))
921 return -EAGAIN;
922
923 /* Wait for up to 20 millisecond for APE to service previous event. */
924 err = tg3_ape_event_lock(tp, 20000);
925 if (err)
926 return err;
927
928 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
929 event | APE_EVENT_STATUS_EVENT_PENDING);
930
931 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
932 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
933
934 return 0;
935 }
936
tg3_ape_driver_state_change(struct tg3 * tp,int kind)937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
938 {
939 u32 event;
940 u32 apedata;
941
942 if (!tg3_flag(tp, ENABLE_APE))
943 return;
944
945 switch (kind) {
946 case RESET_KIND_INIT:
947 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
948 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
949 APE_HOST_SEG_SIG_MAGIC);
950 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
951 APE_HOST_SEG_LEN_MAGIC);
952 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
953 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
954 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
955 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
956 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
957 APE_HOST_BEHAV_NO_PHYLOCK);
958 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
959 TG3_APE_HOST_DRVR_STATE_START);
960
961 event = APE_EVENT_STATUS_STATE_START;
962 break;
963 case RESET_KIND_SHUTDOWN:
964 if (device_may_wakeup(&tp->pdev->dev) &&
965 tg3_flag(tp, WOL_ENABLE)) {
966 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
967 TG3_APE_HOST_WOL_SPEED_AUTO);
968 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
969 } else
970 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
971
972 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
973
974 event = APE_EVENT_STATUS_STATE_UNLOAD;
975 break;
976 default:
977 return;
978 }
979
980 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
981
982 tg3_ape_send_event(tp, event);
983 }
984
tg3_send_ape_heartbeat(struct tg3 * tp,unsigned long interval)985 static void tg3_send_ape_heartbeat(struct tg3 *tp,
986 unsigned long interval)
987 {
988 /* Check if hb interval has exceeded */
989 if (!tg3_flag(tp, ENABLE_APE) ||
990 time_before(jiffies, tp->ape_hb_jiffies + interval))
991 return;
992
993 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
994 tp->ape_hb_jiffies = jiffies;
995 }
996
tg3_disable_ints(struct tg3 * tp)997 static void tg3_disable_ints(struct tg3 *tp)
998 {
999 int i;
1000
1001 tw32(TG3PCI_MISC_HOST_CTRL,
1002 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1003 for (i = 0; i < tp->irq_max; i++)
1004 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1005 }
1006
tg3_enable_ints(struct tg3 * tp)1007 static void tg3_enable_ints(struct tg3 *tp)
1008 {
1009 int i;
1010
1011 tp->irq_sync = 0;
1012 wmb();
1013
1014 tw32(TG3PCI_MISC_HOST_CTRL,
1015 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1016
1017 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1018 for (i = 0; i < tp->irq_cnt; i++) {
1019 struct tg3_napi *tnapi = &tp->napi[i];
1020
1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022 if (tg3_flag(tp, 1SHOT_MSI))
1023 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1024
1025 tp->coal_now |= tnapi->coal_now;
1026 }
1027
1028 /* Force an initial interrupt */
1029 if (!tg3_flag(tp, TAGGED_STATUS) &&
1030 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1031 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1032 else
1033 tw32(HOSTCC_MODE, tp->coal_now);
1034
1035 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1036 }
1037
tg3_has_work(struct tg3_napi * tnapi)1038 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1039 {
1040 struct tg3 *tp = tnapi->tp;
1041 struct tg3_hw_status *sblk = tnapi->hw_status;
1042 unsigned int work_exists = 0;
1043
1044 /* check for phy events */
1045 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1046 if (sblk->status & SD_STATUS_LINK_CHG)
1047 work_exists = 1;
1048 }
1049
1050 /* check for TX work to do */
1051 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1052 work_exists = 1;
1053
1054 /* check for RX work to do */
1055 if (tnapi->rx_rcb_prod_idx &&
1056 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1057 work_exists = 1;
1058
1059 return work_exists;
1060 }
1061
1062 /* tg3_int_reenable
1063 * similar to tg3_enable_ints, but it accurately determines whether there
1064 * is new work pending and can return without flushing the PIO write
1065 * which reenables interrupts
1066 */
tg3_int_reenable(struct tg3_napi * tnapi)1067 static void tg3_int_reenable(struct tg3_napi *tnapi)
1068 {
1069 struct tg3 *tp = tnapi->tp;
1070
1071 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1072
1073 /* When doing tagged status, this work check is unnecessary.
1074 * The last_tag we write above tells the chip which piece of
1075 * work we've completed.
1076 */
1077 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1078 tw32(HOSTCC_MODE, tp->coalesce_mode |
1079 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1080 }
1081
tg3_switch_clocks(struct tg3 * tp)1082 static void tg3_switch_clocks(struct tg3 *tp)
1083 {
1084 u32 clock_ctrl;
1085 u32 orig_clock_ctrl;
1086
1087 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1088 return;
1089
1090 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1091
1092 orig_clock_ctrl = clock_ctrl;
1093 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1094 CLOCK_CTRL_CLKRUN_OENABLE |
1095 0x1f);
1096 tp->pci_clock_ctrl = clock_ctrl;
1097
1098 if (tg3_flag(tp, 5705_PLUS)) {
1099 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1100 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1102 }
1103 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1104 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1105 clock_ctrl |
1106 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1107 40);
1108 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1109 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1110 40);
1111 }
1112 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1113 }
1114
1115 #define PHY_BUSY_LOOPS 5000
1116
__tg3_readphy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 * val)1117 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1118 u32 *val)
1119 {
1120 u32 frame_val;
1121 unsigned int loops;
1122 int ret;
1123
1124 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1125 tw32_f(MAC_MI_MODE,
1126 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1127 udelay(80);
1128 }
1129
1130 tg3_ape_lock(tp, tp->phy_ape_lock);
1131
1132 *val = 0x0;
1133
1134 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1135 MI_COM_PHY_ADDR_MASK);
1136 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1137 MI_COM_REG_ADDR_MASK);
1138 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1139
1140 tw32_f(MAC_MI_COM, frame_val);
1141
1142 loops = PHY_BUSY_LOOPS;
1143 while (loops != 0) {
1144 udelay(10);
1145 frame_val = tr32(MAC_MI_COM);
1146
1147 if ((frame_val & MI_COM_BUSY) == 0) {
1148 udelay(5);
1149 frame_val = tr32(MAC_MI_COM);
1150 break;
1151 }
1152 loops -= 1;
1153 }
1154
1155 ret = -EBUSY;
1156 if (loops != 0) {
1157 *val = frame_val & MI_COM_DATA_MASK;
1158 ret = 0;
1159 }
1160
1161 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1162 tw32_f(MAC_MI_MODE, tp->mi_mode);
1163 udelay(80);
1164 }
1165
1166 tg3_ape_unlock(tp, tp->phy_ape_lock);
1167
1168 return ret;
1169 }
1170
tg3_readphy(struct tg3 * tp,int reg,u32 * val)1171 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1172 {
1173 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1174 }
1175
__tg3_writephy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 val)1176 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1177 u32 val)
1178 {
1179 u32 frame_val;
1180 unsigned int loops;
1181 int ret;
1182
1183 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1184 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1185 return 0;
1186
1187 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1188 tw32_f(MAC_MI_MODE,
1189 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1190 udelay(80);
1191 }
1192
1193 tg3_ape_lock(tp, tp->phy_ape_lock);
1194
1195 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1196 MI_COM_PHY_ADDR_MASK);
1197 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1198 MI_COM_REG_ADDR_MASK);
1199 frame_val |= (val & MI_COM_DATA_MASK);
1200 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1201
1202 tw32_f(MAC_MI_COM, frame_val);
1203
1204 loops = PHY_BUSY_LOOPS;
1205 while (loops != 0) {
1206 udelay(10);
1207 frame_val = tr32(MAC_MI_COM);
1208 if ((frame_val & MI_COM_BUSY) == 0) {
1209 udelay(5);
1210 frame_val = tr32(MAC_MI_COM);
1211 break;
1212 }
1213 loops -= 1;
1214 }
1215
1216 ret = -EBUSY;
1217 if (loops != 0)
1218 ret = 0;
1219
1220 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1221 tw32_f(MAC_MI_MODE, tp->mi_mode);
1222 udelay(80);
1223 }
1224
1225 tg3_ape_unlock(tp, tp->phy_ape_lock);
1226
1227 return ret;
1228 }
1229
tg3_writephy(struct tg3 * tp,int reg,u32 val)1230 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1231 {
1232 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1233 }
1234
tg3_phy_cl45_write(struct tg3 * tp,u32 devad,u32 addr,u32 val)1235 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1236 {
1237 int err;
1238
1239 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1240 if (err)
1241 goto done;
1242
1243 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1244 if (err)
1245 goto done;
1246
1247 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1248 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1249 if (err)
1250 goto done;
1251
1252 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1253
1254 done:
1255 return err;
1256 }
1257
tg3_phy_cl45_read(struct tg3 * tp,u32 devad,u32 addr,u32 * val)1258 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1259 {
1260 int err;
1261
1262 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1263 if (err)
1264 goto done;
1265
1266 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1267 if (err)
1268 goto done;
1269
1270 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1271 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1272 if (err)
1273 goto done;
1274
1275 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1276
1277 done:
1278 return err;
1279 }
1280
tg3_phydsp_read(struct tg3 * tp,u32 reg,u32 * val)1281 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1282 {
1283 int err;
1284
1285 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1286 if (!err)
1287 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1288
1289 return err;
1290 }
1291
tg3_phydsp_write(struct tg3 * tp,u32 reg,u32 val)1292 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1293 {
1294 int err;
1295
1296 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1297 if (!err)
1298 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1299
1300 return err;
1301 }
1302
tg3_phy_auxctl_read(struct tg3 * tp,int reg,u32 * val)1303 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1304 {
1305 int err;
1306
1307 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1308 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1309 MII_TG3_AUXCTL_SHDWSEL_MISC);
1310 if (!err)
1311 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1312
1313 return err;
1314 }
1315
tg3_phy_auxctl_write(struct tg3 * tp,int reg,u32 set)1316 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1317 {
1318 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1319 set |= MII_TG3_AUXCTL_MISC_WREN;
1320
1321 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1322 }
1323
tg3_phy_toggle_auxctl_smdsp(struct tg3 * tp,bool enable)1324 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1325 {
1326 u32 val;
1327 int err;
1328
1329 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1330
1331 if (err)
1332 return err;
1333
1334 if (enable)
1335 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1336 else
1337 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1338
1339 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1340 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1341
1342 return err;
1343 }
1344
tg3_phy_shdw_write(struct tg3 * tp,int reg,u32 val)1345 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1346 {
1347 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1348 reg | val | MII_TG3_MISC_SHDW_WREN);
1349 }
1350
tg3_bmcr_reset(struct tg3 * tp)1351 static int tg3_bmcr_reset(struct tg3 *tp)
1352 {
1353 u32 phy_control;
1354 int limit, err;
1355
1356 /* OK, reset it, and poll the BMCR_RESET bit until it
1357 * clears or we time out.
1358 */
1359 phy_control = BMCR_RESET;
1360 err = tg3_writephy(tp, MII_BMCR, phy_control);
1361 if (err != 0)
1362 return -EBUSY;
1363
1364 limit = 5000;
1365 while (limit--) {
1366 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1367 if (err != 0)
1368 return -EBUSY;
1369
1370 if ((phy_control & BMCR_RESET) == 0) {
1371 udelay(40);
1372 break;
1373 }
1374 udelay(10);
1375 }
1376 if (limit < 0)
1377 return -EBUSY;
1378
1379 return 0;
1380 }
1381
tg3_mdio_read(struct mii_bus * bp,int mii_id,int reg)1382 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1383 {
1384 struct tg3 *tp = bp->priv;
1385 u32 val;
1386
1387 spin_lock_bh(&tp->lock);
1388
1389 if (__tg3_readphy(tp, mii_id, reg, &val))
1390 val = -EIO;
1391
1392 spin_unlock_bh(&tp->lock);
1393
1394 return val;
1395 }
1396
tg3_mdio_write(struct mii_bus * bp,int mii_id,int reg,u16 val)1397 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1398 {
1399 struct tg3 *tp = bp->priv;
1400 u32 ret = 0;
1401
1402 spin_lock_bh(&tp->lock);
1403
1404 if (__tg3_writephy(tp, mii_id, reg, val))
1405 ret = -EIO;
1406
1407 spin_unlock_bh(&tp->lock);
1408
1409 return ret;
1410 }
1411
tg3_mdio_config_5785(struct tg3 * tp)1412 static void tg3_mdio_config_5785(struct tg3 *tp)
1413 {
1414 u32 val;
1415 struct phy_device *phydev;
1416
1417 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1418 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1419 case PHY_ID_BCM50610:
1420 case PHY_ID_BCM50610M:
1421 val = MAC_PHYCFG2_50610_LED_MODES;
1422 break;
1423 case PHY_ID_BCMAC131:
1424 val = MAC_PHYCFG2_AC131_LED_MODES;
1425 break;
1426 case PHY_ID_RTL8211C:
1427 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1428 break;
1429 case PHY_ID_RTL8201E:
1430 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1431 break;
1432 default:
1433 return;
1434 }
1435
1436 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1437 tw32(MAC_PHYCFG2, val);
1438
1439 val = tr32(MAC_PHYCFG1);
1440 val &= ~(MAC_PHYCFG1_RGMII_INT |
1441 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1442 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1443 tw32(MAC_PHYCFG1, val);
1444
1445 return;
1446 }
1447
1448 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1449 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1450 MAC_PHYCFG2_FMODE_MASK_MASK |
1451 MAC_PHYCFG2_GMODE_MASK_MASK |
1452 MAC_PHYCFG2_ACT_MASK_MASK |
1453 MAC_PHYCFG2_QUAL_MASK_MASK |
1454 MAC_PHYCFG2_INBAND_ENABLE;
1455
1456 tw32(MAC_PHYCFG2, val);
1457
1458 val = tr32(MAC_PHYCFG1);
1459 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1460 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1461 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1464 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1465 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1466 }
1467 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1468 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1469 tw32(MAC_PHYCFG1, val);
1470
1471 val = tr32(MAC_EXT_RGMII_MODE);
1472 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1473 MAC_RGMII_MODE_RX_QUALITY |
1474 MAC_RGMII_MODE_RX_ACTIVITY |
1475 MAC_RGMII_MODE_RX_ENG_DET |
1476 MAC_RGMII_MODE_TX_ENABLE |
1477 MAC_RGMII_MODE_TX_LOWPWR |
1478 MAC_RGMII_MODE_TX_RESET);
1479 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1480 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1481 val |= MAC_RGMII_MODE_RX_INT_B |
1482 MAC_RGMII_MODE_RX_QUALITY |
1483 MAC_RGMII_MODE_RX_ACTIVITY |
1484 MAC_RGMII_MODE_RX_ENG_DET;
1485 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1486 val |= MAC_RGMII_MODE_TX_ENABLE |
1487 MAC_RGMII_MODE_TX_LOWPWR |
1488 MAC_RGMII_MODE_TX_RESET;
1489 }
1490 tw32(MAC_EXT_RGMII_MODE, val);
1491 }
1492
tg3_mdio_start(struct tg3 * tp)1493 static void tg3_mdio_start(struct tg3 *tp)
1494 {
1495 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1496 tw32_f(MAC_MI_MODE, tp->mi_mode);
1497 udelay(80);
1498
1499 if (tg3_flag(tp, MDIOBUS_INITED) &&
1500 tg3_asic_rev(tp) == ASIC_REV_5785)
1501 tg3_mdio_config_5785(tp);
1502 }
1503
tg3_mdio_init(struct tg3 * tp)1504 static int tg3_mdio_init(struct tg3 *tp)
1505 {
1506 int i;
1507 u32 reg;
1508 struct phy_device *phydev;
1509
1510 if (tg3_flag(tp, 5717_PLUS)) {
1511 u32 is_serdes;
1512
1513 tp->phy_addr = tp->pci_fn + 1;
1514
1515 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1516 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1517 else
1518 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1519 TG3_CPMU_PHY_STRAP_IS_SERDES;
1520 if (is_serdes)
1521 tp->phy_addr += 7;
1522 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1523 int addr;
1524
1525 addr = ssb_gige_get_phyaddr(tp->pdev);
1526 if (addr < 0)
1527 return addr;
1528 tp->phy_addr = addr;
1529 } else
1530 tp->phy_addr = TG3_PHY_MII_ADDR;
1531
1532 tg3_mdio_start(tp);
1533
1534 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1535 return 0;
1536
1537 tp->mdio_bus = mdiobus_alloc();
1538 if (tp->mdio_bus == NULL)
1539 return -ENOMEM;
1540
1541 tp->mdio_bus->name = "tg3 mdio bus";
1542 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
1543 tp->mdio_bus->priv = tp;
1544 tp->mdio_bus->parent = &tp->pdev->dev;
1545 tp->mdio_bus->read = &tg3_mdio_read;
1546 tp->mdio_bus->write = &tg3_mdio_write;
1547 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1548
1549 /* The bus registration will look for all the PHYs on the mdio bus.
1550 * Unfortunately, it does not ensure the PHY is powered up before
1551 * accessing the PHY ID registers. A chip reset is the
1552 * quickest way to bring the device back to an operational state..
1553 */
1554 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1555 tg3_bmcr_reset(tp);
1556
1557 i = mdiobus_register(tp->mdio_bus);
1558 if (i) {
1559 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1560 mdiobus_free(tp->mdio_bus);
1561 return i;
1562 }
1563
1564 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1565
1566 if (!phydev || !phydev->drv) {
1567 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1568 mdiobus_unregister(tp->mdio_bus);
1569 mdiobus_free(tp->mdio_bus);
1570 return -ENODEV;
1571 }
1572
1573 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1574 case PHY_ID_BCM57780:
1575 phydev->interface = PHY_INTERFACE_MODE_GMII;
1576 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577 break;
1578 case PHY_ID_BCM50610:
1579 case PHY_ID_BCM50610M:
1580 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1581 PHY_BRCM_RX_REFCLK_UNUSED |
1582 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1583 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1584 fallthrough;
1585 case PHY_ID_RTL8211C:
1586 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1587 break;
1588 case PHY_ID_RTL8201E:
1589 case PHY_ID_BCMAC131:
1590 phydev->interface = PHY_INTERFACE_MODE_MII;
1591 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1592 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1593 break;
1594 }
1595
1596 tg3_flag_set(tp, MDIOBUS_INITED);
1597
1598 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1599 tg3_mdio_config_5785(tp);
1600
1601 return 0;
1602 }
1603
tg3_mdio_fini(struct tg3 * tp)1604 static void tg3_mdio_fini(struct tg3 *tp)
1605 {
1606 if (tg3_flag(tp, MDIOBUS_INITED)) {
1607 tg3_flag_clear(tp, MDIOBUS_INITED);
1608 mdiobus_unregister(tp->mdio_bus);
1609 mdiobus_free(tp->mdio_bus);
1610 }
1611 }
1612
1613 /* tp->lock is held. */
tg3_generate_fw_event(struct tg3 * tp)1614 static inline void tg3_generate_fw_event(struct tg3 *tp)
1615 {
1616 u32 val;
1617
1618 val = tr32(GRC_RX_CPU_EVENT);
1619 val |= GRC_RX_CPU_DRIVER_EVENT;
1620 tw32_f(GRC_RX_CPU_EVENT, val);
1621
1622 tp->last_event_jiffies = jiffies;
1623 }
1624
1625 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1626
1627 /* tp->lock is held. */
tg3_wait_for_event_ack(struct tg3 * tp)1628 static void tg3_wait_for_event_ack(struct tg3 *tp)
1629 {
1630 int i;
1631 unsigned int delay_cnt;
1632 long time_remain;
1633
1634 /* If enough time has passed, no wait is necessary. */
1635 time_remain = (long)(tp->last_event_jiffies + 1 +
1636 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1637 (long)jiffies;
1638 if (time_remain < 0)
1639 return;
1640
1641 /* Check if we can shorten the wait time. */
1642 delay_cnt = jiffies_to_usecs(time_remain);
1643 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1644 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1645 delay_cnt = (delay_cnt >> 3) + 1;
1646
1647 for (i = 0; i < delay_cnt; i++) {
1648 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1649 break;
1650 if (pci_channel_offline(tp->pdev))
1651 break;
1652
1653 udelay(8);
1654 }
1655 }
1656
1657 /* tp->lock is held. */
tg3_phy_gather_ump_data(struct tg3 * tp,u32 * data)1658 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1659 {
1660 u32 reg, val;
1661
1662 val = 0;
1663 if (!tg3_readphy(tp, MII_BMCR, ®))
1664 val = reg << 16;
1665 if (!tg3_readphy(tp, MII_BMSR, ®))
1666 val |= (reg & 0xffff);
1667 *data++ = val;
1668
1669 val = 0;
1670 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1671 val = reg << 16;
1672 if (!tg3_readphy(tp, MII_LPA, ®))
1673 val |= (reg & 0xffff);
1674 *data++ = val;
1675
1676 val = 0;
1677 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1678 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1679 val = reg << 16;
1680 if (!tg3_readphy(tp, MII_STAT1000, ®))
1681 val |= (reg & 0xffff);
1682 }
1683 *data++ = val;
1684
1685 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1686 val = reg << 16;
1687 else
1688 val = 0;
1689 *data++ = val;
1690 }
1691
1692 /* tp->lock is held. */
tg3_ump_link_report(struct tg3 * tp)1693 static void tg3_ump_link_report(struct tg3 *tp)
1694 {
1695 u32 data[4];
1696
1697 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1698 return;
1699
1700 tg3_phy_gather_ump_data(tp, data);
1701
1702 tg3_wait_for_event_ack(tp);
1703
1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1710
1711 tg3_generate_fw_event(tp);
1712 }
1713
1714 /* tp->lock is held. */
tg3_stop_fw(struct tg3 * tp)1715 static void tg3_stop_fw(struct tg3 *tp)
1716 {
1717 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1718 /* Wait for RX cpu to ACK the previous event. */
1719 tg3_wait_for_event_ack(tp);
1720
1721 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1722
1723 tg3_generate_fw_event(tp);
1724
1725 /* Wait for RX cpu to ACK this event. */
1726 tg3_wait_for_event_ack(tp);
1727 }
1728 }
1729
1730 /* tp->lock is held. */
tg3_write_sig_pre_reset(struct tg3 * tp,int kind)1731 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1732 {
1733 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1734 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1735
1736 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1737 switch (kind) {
1738 case RESET_KIND_INIT:
1739 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1740 DRV_STATE_START);
1741 break;
1742
1743 case RESET_KIND_SHUTDOWN:
1744 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1745 DRV_STATE_UNLOAD);
1746 break;
1747
1748 case RESET_KIND_SUSPEND:
1749 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750 DRV_STATE_SUSPEND);
1751 break;
1752
1753 default:
1754 break;
1755 }
1756 }
1757 }
1758
1759 /* tp->lock is held. */
tg3_write_sig_post_reset(struct tg3 * tp,int kind)1760 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1761 {
1762 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1763 switch (kind) {
1764 case RESET_KIND_INIT:
1765 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1766 DRV_STATE_START_DONE);
1767 break;
1768
1769 case RESET_KIND_SHUTDOWN:
1770 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1771 DRV_STATE_UNLOAD_DONE);
1772 break;
1773
1774 default:
1775 break;
1776 }
1777 }
1778 }
1779
1780 /* tp->lock is held. */
tg3_write_sig_legacy(struct tg3 * tp,int kind)1781 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1782 {
1783 if (tg3_flag(tp, ENABLE_ASF)) {
1784 switch (kind) {
1785 case RESET_KIND_INIT:
1786 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1787 DRV_STATE_START);
1788 break;
1789
1790 case RESET_KIND_SHUTDOWN:
1791 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1792 DRV_STATE_UNLOAD);
1793 break;
1794
1795 case RESET_KIND_SUSPEND:
1796 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1797 DRV_STATE_SUSPEND);
1798 break;
1799
1800 default:
1801 break;
1802 }
1803 }
1804 }
1805
tg3_poll_fw(struct tg3 * tp)1806 static int tg3_poll_fw(struct tg3 *tp)
1807 {
1808 int i;
1809 u32 val;
1810
1811 if (tg3_flag(tp, NO_FWARE_REPORTED))
1812 return 0;
1813
1814 if (tg3_flag(tp, IS_SSB_CORE)) {
1815 /* We don't use firmware. */
1816 return 0;
1817 }
1818
1819 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1820 /* Wait up to 20ms for init done. */
1821 for (i = 0; i < 200; i++) {
1822 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1823 return 0;
1824 if (pci_channel_offline(tp->pdev))
1825 return -ENODEV;
1826
1827 udelay(100);
1828 }
1829 return -ENODEV;
1830 }
1831
1832 /* Wait for firmware initialization to complete. */
1833 for (i = 0; i < 100000; i++) {
1834 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1835 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1836 break;
1837 if (pci_channel_offline(tp->pdev)) {
1838 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1839 tg3_flag_set(tp, NO_FWARE_REPORTED);
1840 netdev_info(tp->dev, "No firmware running\n");
1841 }
1842
1843 break;
1844 }
1845
1846 udelay(10);
1847 }
1848
1849 /* Chip might not be fitted with firmware. Some Sun onboard
1850 * parts are configured like that. So don't signal the timeout
1851 * of the above loop as an error, but do report the lack of
1852 * running firmware once.
1853 */
1854 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1855 tg3_flag_set(tp, NO_FWARE_REPORTED);
1856
1857 netdev_info(tp->dev, "No firmware running\n");
1858 }
1859
1860 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1861 /* The 57765 A0 needs a little more
1862 * time to do some important work.
1863 */
1864 mdelay(10);
1865 }
1866
1867 return 0;
1868 }
1869
tg3_link_report(struct tg3 * tp)1870 static void tg3_link_report(struct tg3 *tp)
1871 {
1872 if (!netif_carrier_ok(tp->dev)) {
1873 netif_info(tp, link, tp->dev, "Link is down\n");
1874 tg3_ump_link_report(tp);
1875 } else if (netif_msg_link(tp)) {
1876 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1877 (tp->link_config.active_speed == SPEED_1000 ?
1878 1000 :
1879 (tp->link_config.active_speed == SPEED_100 ?
1880 100 : 10)),
1881 (tp->link_config.active_duplex == DUPLEX_FULL ?
1882 "full" : "half"));
1883
1884 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1885 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1886 "on" : "off",
1887 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1888 "on" : "off");
1889
1890 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1891 netdev_info(tp->dev, "EEE is %s\n",
1892 tp->setlpicnt ? "enabled" : "disabled");
1893
1894 tg3_ump_link_report(tp);
1895 }
1896
1897 tp->link_up = netif_carrier_ok(tp->dev);
1898 }
1899
tg3_decode_flowctrl_1000T(u32 adv)1900 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1901 {
1902 u32 flowctrl = 0;
1903
1904 if (adv & ADVERTISE_PAUSE_CAP) {
1905 flowctrl |= FLOW_CTRL_RX;
1906 if (!(adv & ADVERTISE_PAUSE_ASYM))
1907 flowctrl |= FLOW_CTRL_TX;
1908 } else if (adv & ADVERTISE_PAUSE_ASYM)
1909 flowctrl |= FLOW_CTRL_TX;
1910
1911 return flowctrl;
1912 }
1913
tg3_advert_flowctrl_1000X(u8 flow_ctrl)1914 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1915 {
1916 u16 miireg;
1917
1918 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1919 miireg = ADVERTISE_1000XPAUSE;
1920 else if (flow_ctrl & FLOW_CTRL_TX)
1921 miireg = ADVERTISE_1000XPSE_ASYM;
1922 else if (flow_ctrl & FLOW_CTRL_RX)
1923 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1924 else
1925 miireg = 0;
1926
1927 return miireg;
1928 }
1929
tg3_decode_flowctrl_1000X(u32 adv)1930 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1931 {
1932 u32 flowctrl = 0;
1933
1934 if (adv & ADVERTISE_1000XPAUSE) {
1935 flowctrl |= FLOW_CTRL_RX;
1936 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1937 flowctrl |= FLOW_CTRL_TX;
1938 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1939 flowctrl |= FLOW_CTRL_TX;
1940
1941 return flowctrl;
1942 }
1943
tg3_resolve_flowctrl_1000X(u16 lcladv,u16 rmtadv)1944 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1945 {
1946 u8 cap = 0;
1947
1948 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1949 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1950 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1951 if (lcladv & ADVERTISE_1000XPAUSE)
1952 cap = FLOW_CTRL_RX;
1953 if (rmtadv & ADVERTISE_1000XPAUSE)
1954 cap = FLOW_CTRL_TX;
1955 }
1956
1957 return cap;
1958 }
1959
tg3_setup_flow_control(struct tg3 * tp,u32 lcladv,u32 rmtadv)1960 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1961 {
1962 u8 autoneg;
1963 u8 flowctrl = 0;
1964 u32 old_rx_mode = tp->rx_mode;
1965 u32 old_tx_mode = tp->tx_mode;
1966
1967 if (tg3_flag(tp, USE_PHYLIB))
1968 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1969 else
1970 autoneg = tp->link_config.autoneg;
1971
1972 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1973 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1974 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1975 else
1976 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1977 } else
1978 flowctrl = tp->link_config.flowctrl;
1979
1980 tp->link_config.active_flowctrl = flowctrl;
1981
1982 if (flowctrl & FLOW_CTRL_RX)
1983 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1984 else
1985 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1986
1987 if (old_rx_mode != tp->rx_mode)
1988 tw32_f(MAC_RX_MODE, tp->rx_mode);
1989
1990 if (flowctrl & FLOW_CTRL_TX)
1991 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1992 else
1993 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1994
1995 if (old_tx_mode != tp->tx_mode)
1996 tw32_f(MAC_TX_MODE, tp->tx_mode);
1997 }
1998
tg3_adjust_link(struct net_device * dev)1999 static void tg3_adjust_link(struct net_device *dev)
2000 {
2001 u8 oldflowctrl, linkmesg = 0;
2002 u32 mac_mode, lcl_adv, rmt_adv;
2003 struct tg3 *tp = netdev_priv(dev);
2004 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2005
2006 spin_lock_bh(&tp->lock);
2007
2008 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2009 MAC_MODE_HALF_DUPLEX);
2010
2011 oldflowctrl = tp->link_config.active_flowctrl;
2012
2013 if (phydev->link) {
2014 lcl_adv = 0;
2015 rmt_adv = 0;
2016
2017 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2018 mac_mode |= MAC_MODE_PORT_MODE_MII;
2019 else if (phydev->speed == SPEED_1000 ||
2020 tg3_asic_rev(tp) != ASIC_REV_5785)
2021 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2022 else
2023 mac_mode |= MAC_MODE_PORT_MODE_MII;
2024
2025 if (phydev->duplex == DUPLEX_HALF)
2026 mac_mode |= MAC_MODE_HALF_DUPLEX;
2027 else {
2028 lcl_adv = mii_advertise_flowctrl(
2029 tp->link_config.flowctrl);
2030
2031 if (phydev->pause)
2032 rmt_adv = LPA_PAUSE_CAP;
2033 if (phydev->asym_pause)
2034 rmt_adv |= LPA_PAUSE_ASYM;
2035 }
2036
2037 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2038 } else
2039 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2040
2041 if (mac_mode != tp->mac_mode) {
2042 tp->mac_mode = mac_mode;
2043 tw32_f(MAC_MODE, tp->mac_mode);
2044 udelay(40);
2045 }
2046
2047 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2048 if (phydev->speed == SPEED_10)
2049 tw32(MAC_MI_STAT,
2050 MAC_MI_STAT_10MBPS_MODE |
2051 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2052 else
2053 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2054 }
2055
2056 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2057 tw32(MAC_TX_LENGTHS,
2058 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2059 (6 << TX_LENGTHS_IPG_SHIFT) |
2060 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2061 else
2062 tw32(MAC_TX_LENGTHS,
2063 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2064 (6 << TX_LENGTHS_IPG_SHIFT) |
2065 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2066
2067 if (phydev->link != tp->old_link ||
2068 phydev->speed != tp->link_config.active_speed ||
2069 phydev->duplex != tp->link_config.active_duplex ||
2070 oldflowctrl != tp->link_config.active_flowctrl)
2071 linkmesg = 1;
2072
2073 tp->old_link = phydev->link;
2074 tp->link_config.active_speed = phydev->speed;
2075 tp->link_config.active_duplex = phydev->duplex;
2076
2077 spin_unlock_bh(&tp->lock);
2078
2079 if (linkmesg)
2080 tg3_link_report(tp);
2081 }
2082
tg3_phy_init(struct tg3 * tp)2083 static int tg3_phy_init(struct tg3 *tp)
2084 {
2085 struct phy_device *phydev;
2086
2087 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2088 return 0;
2089
2090 /* Bring the PHY back to a known state. */
2091 tg3_bmcr_reset(tp);
2092
2093 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2094
2095 /* Attach the MAC to the PHY. */
2096 phydev = phy_connect(tp->dev, phydev_name(phydev),
2097 tg3_adjust_link, phydev->interface);
2098 if (IS_ERR(phydev)) {
2099 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2100 return PTR_ERR(phydev);
2101 }
2102
2103 /* Mask with MAC supported features. */
2104 switch (phydev->interface) {
2105 case PHY_INTERFACE_MODE_GMII:
2106 case PHY_INTERFACE_MODE_RGMII:
2107 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2108 phy_set_max_speed(phydev, SPEED_1000);
2109 phy_support_asym_pause(phydev);
2110 break;
2111 }
2112 fallthrough;
2113 case PHY_INTERFACE_MODE_MII:
2114 phy_set_max_speed(phydev, SPEED_100);
2115 phy_support_asym_pause(phydev);
2116 break;
2117 default:
2118 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2119 return -EINVAL;
2120 }
2121
2122 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2123
2124 phy_attached_info(phydev);
2125
2126 return 0;
2127 }
2128
tg3_phy_start(struct tg3 * tp)2129 static void tg3_phy_start(struct tg3 *tp)
2130 {
2131 struct phy_device *phydev;
2132
2133 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2134 return;
2135
2136 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2137
2138 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2139 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2140 phydev->speed = tp->link_config.speed;
2141 phydev->duplex = tp->link_config.duplex;
2142 phydev->autoneg = tp->link_config.autoneg;
2143 ethtool_convert_legacy_u32_to_link_mode(
2144 phydev->advertising, tp->link_config.advertising);
2145 }
2146
2147 phy_start(phydev);
2148
2149 phy_start_aneg(phydev);
2150 }
2151
tg3_phy_stop(struct tg3 * tp)2152 static void tg3_phy_stop(struct tg3 *tp)
2153 {
2154 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2155 return;
2156
2157 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2158 }
2159
tg3_phy_fini(struct tg3 * tp)2160 static void tg3_phy_fini(struct tg3 *tp)
2161 {
2162 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2163 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2164 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2165 }
2166 }
2167
tg3_phy_set_extloopbk(struct tg3 * tp)2168 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2169 {
2170 int err;
2171 u32 val;
2172
2173 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2174 return 0;
2175
2176 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2177 /* Cannot do read-modify-write on 5401 */
2178 err = tg3_phy_auxctl_write(tp,
2179 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2180 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2181 0x4c20);
2182 goto done;
2183 }
2184
2185 err = tg3_phy_auxctl_read(tp,
2186 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2187 if (err)
2188 return err;
2189
2190 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2191 err = tg3_phy_auxctl_write(tp,
2192 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2193
2194 done:
2195 return err;
2196 }
2197
tg3_phy_fet_toggle_apd(struct tg3 * tp,bool enable)2198 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2199 {
2200 u32 phytest;
2201
2202 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2203 u32 phy;
2204
2205 tg3_writephy(tp, MII_TG3_FET_TEST,
2206 phytest | MII_TG3_FET_SHADOW_EN);
2207 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2208 if (enable)
2209 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2210 else
2211 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2212 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2213 }
2214 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2215 }
2216 }
2217
tg3_phy_toggle_apd(struct tg3 * tp,bool enable)2218 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2219 {
2220 u32 reg;
2221
2222 if (!tg3_flag(tp, 5705_PLUS) ||
2223 (tg3_flag(tp, 5717_PLUS) &&
2224 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2225 return;
2226
2227 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2228 tg3_phy_fet_toggle_apd(tp, enable);
2229 return;
2230 }
2231
2232 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2233 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2234 MII_TG3_MISC_SHDW_SCR5_SDTL |
2235 MII_TG3_MISC_SHDW_SCR5_C125OE;
2236 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2237 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2238
2239 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2240
2241
2242 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2243 if (enable)
2244 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2245
2246 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2247 }
2248
tg3_phy_toggle_automdix(struct tg3 * tp,bool enable)2249 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2250 {
2251 u32 phy;
2252
2253 if (!tg3_flag(tp, 5705_PLUS) ||
2254 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2255 return;
2256
2257 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2258 u32 ephy;
2259
2260 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2261 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2262
2263 tg3_writephy(tp, MII_TG3_FET_TEST,
2264 ephy | MII_TG3_FET_SHADOW_EN);
2265 if (!tg3_readphy(tp, reg, &phy)) {
2266 if (enable)
2267 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2268 else
2269 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2270 tg3_writephy(tp, reg, phy);
2271 }
2272 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2273 }
2274 } else {
2275 int ret;
2276
2277 ret = tg3_phy_auxctl_read(tp,
2278 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2279 if (!ret) {
2280 if (enable)
2281 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2282 else
2283 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2284 tg3_phy_auxctl_write(tp,
2285 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2286 }
2287 }
2288 }
2289
tg3_phy_set_wirespeed(struct tg3 * tp)2290 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2291 {
2292 int ret;
2293 u32 val;
2294
2295 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2296 return;
2297
2298 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2299 if (!ret)
2300 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2301 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2302 }
2303
tg3_phy_apply_otp(struct tg3 * tp)2304 static void tg3_phy_apply_otp(struct tg3 *tp)
2305 {
2306 u32 otp, phy;
2307
2308 if (!tp->phy_otp)
2309 return;
2310
2311 otp = tp->phy_otp;
2312
2313 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2314 return;
2315
2316 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2317 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2318 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2319
2320 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2321 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2322 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2323
2324 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2325 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2326 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2327
2328 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2329 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2330
2331 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2332 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2333
2334 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2335 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2336 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2337
2338 tg3_phy_toggle_auxctl_smdsp(tp, false);
2339 }
2340
tg3_eee_pull_config(struct tg3 * tp,struct ethtool_eee * eee)2341 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2342 {
2343 u32 val;
2344 struct ethtool_eee *dest = &tp->eee;
2345
2346 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2347 return;
2348
2349 if (eee)
2350 dest = eee;
2351
2352 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2353 return;
2354
2355 /* Pull eee_active */
2356 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2357 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2358 dest->eee_active = 1;
2359 } else
2360 dest->eee_active = 0;
2361
2362 /* Pull lp advertised settings */
2363 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2364 return;
2365 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2366
2367 /* Pull advertised and eee_enabled settings */
2368 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2369 return;
2370 dest->eee_enabled = !!val;
2371 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2372
2373 /* Pull tx_lpi_enabled */
2374 val = tr32(TG3_CPMU_EEE_MODE);
2375 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2376
2377 /* Pull lpi timer value */
2378 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2379 }
2380
tg3_phy_eee_adjust(struct tg3 * tp,bool current_link_up)2381 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2382 {
2383 u32 val;
2384
2385 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2386 return;
2387
2388 tp->setlpicnt = 0;
2389
2390 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2391 current_link_up &&
2392 tp->link_config.active_duplex == DUPLEX_FULL &&
2393 (tp->link_config.active_speed == SPEED_100 ||
2394 tp->link_config.active_speed == SPEED_1000)) {
2395 u32 eeectl;
2396
2397 if (tp->link_config.active_speed == SPEED_1000)
2398 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2399 else
2400 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2401
2402 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2403
2404 tg3_eee_pull_config(tp, NULL);
2405 if (tp->eee.eee_active)
2406 tp->setlpicnt = 2;
2407 }
2408
2409 if (!tp->setlpicnt) {
2410 if (current_link_up &&
2411 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2412 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2413 tg3_phy_toggle_auxctl_smdsp(tp, false);
2414 }
2415
2416 val = tr32(TG3_CPMU_EEE_MODE);
2417 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2418 }
2419 }
2420
tg3_phy_eee_enable(struct tg3 * tp)2421 static void tg3_phy_eee_enable(struct tg3 *tp)
2422 {
2423 u32 val;
2424
2425 if (tp->link_config.active_speed == SPEED_1000 &&
2426 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2427 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2428 tg3_flag(tp, 57765_CLASS)) &&
2429 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2430 val = MII_TG3_DSP_TAP26_ALNOKO |
2431 MII_TG3_DSP_TAP26_RMRXSTO;
2432 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2433 tg3_phy_toggle_auxctl_smdsp(tp, false);
2434 }
2435
2436 val = tr32(TG3_CPMU_EEE_MODE);
2437 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2438 }
2439
tg3_wait_macro_done(struct tg3 * tp)2440 static int tg3_wait_macro_done(struct tg3 *tp)
2441 {
2442 int limit = 100;
2443
2444 while (limit--) {
2445 u32 tmp32;
2446
2447 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2448 if ((tmp32 & 0x1000) == 0)
2449 break;
2450 }
2451 }
2452 if (limit < 0)
2453 return -EBUSY;
2454
2455 return 0;
2456 }
2457
tg3_phy_write_and_check_testpat(struct tg3 * tp,int * resetp)2458 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2459 {
2460 static const u32 test_pat[4][6] = {
2461 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2462 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2463 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2464 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2465 };
2466 int chan;
2467
2468 for (chan = 0; chan < 4; chan++) {
2469 int i;
2470
2471 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2472 (chan * 0x2000) | 0x0200);
2473 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2474
2475 for (i = 0; i < 6; i++)
2476 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2477 test_pat[chan][i]);
2478
2479 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2480 if (tg3_wait_macro_done(tp)) {
2481 *resetp = 1;
2482 return -EBUSY;
2483 }
2484
2485 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2486 (chan * 0x2000) | 0x0200);
2487 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2488 if (tg3_wait_macro_done(tp)) {
2489 *resetp = 1;
2490 return -EBUSY;
2491 }
2492
2493 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2494 if (tg3_wait_macro_done(tp)) {
2495 *resetp = 1;
2496 return -EBUSY;
2497 }
2498
2499 for (i = 0; i < 6; i += 2) {
2500 u32 low, high;
2501
2502 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2503 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2504 tg3_wait_macro_done(tp)) {
2505 *resetp = 1;
2506 return -EBUSY;
2507 }
2508 low &= 0x7fff;
2509 high &= 0x000f;
2510 if (low != test_pat[chan][i] ||
2511 high != test_pat[chan][i+1]) {
2512 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2513 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2514 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2515
2516 return -EBUSY;
2517 }
2518 }
2519 }
2520
2521 return 0;
2522 }
2523
tg3_phy_reset_chanpat(struct tg3 * tp)2524 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2525 {
2526 int chan;
2527
2528 for (chan = 0; chan < 4; chan++) {
2529 int i;
2530
2531 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2532 (chan * 0x2000) | 0x0200);
2533 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2534 for (i = 0; i < 6; i++)
2535 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2536 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2537 if (tg3_wait_macro_done(tp))
2538 return -EBUSY;
2539 }
2540
2541 return 0;
2542 }
2543
tg3_phy_reset_5703_4_5(struct tg3 * tp)2544 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2545 {
2546 u32 reg32, phy9_orig;
2547 int retries, do_phy_reset, err;
2548
2549 retries = 10;
2550 do_phy_reset = 1;
2551 do {
2552 if (do_phy_reset) {
2553 err = tg3_bmcr_reset(tp);
2554 if (err)
2555 return err;
2556 do_phy_reset = 0;
2557 }
2558
2559 /* Disable transmitter and interrupt. */
2560 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2561 continue;
2562
2563 reg32 |= 0x3000;
2564 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2565
2566 /* Set full-duplex, 1000 mbps. */
2567 tg3_writephy(tp, MII_BMCR,
2568 BMCR_FULLDPLX | BMCR_SPEED1000);
2569
2570 /* Set to master mode. */
2571 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2572 continue;
2573
2574 tg3_writephy(tp, MII_CTRL1000,
2575 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2576
2577 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2578 if (err)
2579 return err;
2580
2581 /* Block the PHY control access. */
2582 tg3_phydsp_write(tp, 0x8005, 0x0800);
2583
2584 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2585 if (!err)
2586 break;
2587 } while (--retries);
2588
2589 err = tg3_phy_reset_chanpat(tp);
2590 if (err)
2591 return err;
2592
2593 tg3_phydsp_write(tp, 0x8005, 0x0000);
2594
2595 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2596 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2597
2598 tg3_phy_toggle_auxctl_smdsp(tp, false);
2599
2600 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2601
2602 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2603 if (err)
2604 return err;
2605
2606 reg32 &= ~0x3000;
2607 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2608
2609 return 0;
2610 }
2611
tg3_carrier_off(struct tg3 * tp)2612 static void tg3_carrier_off(struct tg3 *tp)
2613 {
2614 netif_carrier_off(tp->dev);
2615 tp->link_up = false;
2616 }
2617
tg3_warn_mgmt_link_flap(struct tg3 * tp)2618 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2619 {
2620 if (tg3_flag(tp, ENABLE_ASF))
2621 netdev_warn(tp->dev,
2622 "Management side-band traffic will be interrupted during phy settings change\n");
2623 }
2624
2625 /* This will reset the tigon3 PHY if there is no valid
2626 * link unless the FORCE argument is non-zero.
2627 */
tg3_phy_reset(struct tg3 * tp)2628 static int tg3_phy_reset(struct tg3 *tp)
2629 {
2630 u32 val, cpmuctrl;
2631 int err;
2632
2633 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2634 val = tr32(GRC_MISC_CFG);
2635 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2636 udelay(40);
2637 }
2638 err = tg3_readphy(tp, MII_BMSR, &val);
2639 err |= tg3_readphy(tp, MII_BMSR, &val);
2640 if (err != 0)
2641 return -EBUSY;
2642
2643 if (netif_running(tp->dev) && tp->link_up) {
2644 netif_carrier_off(tp->dev);
2645 tg3_link_report(tp);
2646 }
2647
2648 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2649 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2650 tg3_asic_rev(tp) == ASIC_REV_5705) {
2651 err = tg3_phy_reset_5703_4_5(tp);
2652 if (err)
2653 return err;
2654 goto out;
2655 }
2656
2657 cpmuctrl = 0;
2658 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2659 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2660 cpmuctrl = tr32(TG3_CPMU_CTRL);
2661 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2662 tw32(TG3_CPMU_CTRL,
2663 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2664 }
2665
2666 err = tg3_bmcr_reset(tp);
2667 if (err)
2668 return err;
2669
2670 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2671 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2672 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2673
2674 tw32(TG3_CPMU_CTRL, cpmuctrl);
2675 }
2676
2677 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2678 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2679 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2680 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2681 CPMU_LSPD_1000MB_MACCLK_12_5) {
2682 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2683 udelay(40);
2684 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2685 }
2686 }
2687
2688 if (tg3_flag(tp, 5717_PLUS) &&
2689 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2690 return 0;
2691
2692 tg3_phy_apply_otp(tp);
2693
2694 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2695 tg3_phy_toggle_apd(tp, true);
2696 else
2697 tg3_phy_toggle_apd(tp, false);
2698
2699 out:
2700 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2701 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2702 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2703 tg3_phydsp_write(tp, 0x000a, 0x0323);
2704 tg3_phy_toggle_auxctl_smdsp(tp, false);
2705 }
2706
2707 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2708 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2709 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2710 }
2711
2712 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2713 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2714 tg3_phydsp_write(tp, 0x000a, 0x310b);
2715 tg3_phydsp_write(tp, 0x201f, 0x9506);
2716 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2717 tg3_phy_toggle_auxctl_smdsp(tp, false);
2718 }
2719 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2720 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2721 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2722 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2723 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2724 tg3_writephy(tp, MII_TG3_TEST1,
2725 MII_TG3_TEST1_TRIM_EN | 0x4);
2726 } else
2727 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2728
2729 tg3_phy_toggle_auxctl_smdsp(tp, false);
2730 }
2731 }
2732
2733 /* Set Extended packet length bit (bit 14) on all chips that */
2734 /* support jumbo frames */
2735 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2736 /* Cannot do read-modify-write on 5401 */
2737 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2738 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2739 /* Set bit 14 with read-modify-write to preserve other bits */
2740 err = tg3_phy_auxctl_read(tp,
2741 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2742 if (!err)
2743 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2744 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2745 }
2746
2747 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2748 * jumbo frames transmission.
2749 */
2750 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2751 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2752 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2753 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2754 }
2755
2756 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2757 /* adjust output voltage */
2758 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2759 }
2760
2761 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2762 tg3_phydsp_write(tp, 0xffb, 0x4000);
2763
2764 tg3_phy_toggle_automdix(tp, true);
2765 tg3_phy_set_wirespeed(tp);
2766 return 0;
2767 }
2768
2769 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2770 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2771 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2772 TG3_GPIO_MSG_NEED_VAUX)
2773 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2774 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2775 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2776 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2777 (TG3_GPIO_MSG_DRVR_PRES << 12))
2778
2779 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2780 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2781 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2782 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2783 (TG3_GPIO_MSG_NEED_VAUX << 12))
2784
tg3_set_function_status(struct tg3 * tp,u32 newstat)2785 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2786 {
2787 u32 status, shift;
2788
2789 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2790 tg3_asic_rev(tp) == ASIC_REV_5719)
2791 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2792 else
2793 status = tr32(TG3_CPMU_DRV_STATUS);
2794
2795 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2796 status &= ~(TG3_GPIO_MSG_MASK << shift);
2797 status |= (newstat << shift);
2798
2799 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2800 tg3_asic_rev(tp) == ASIC_REV_5719)
2801 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2802 else
2803 tw32(TG3_CPMU_DRV_STATUS, status);
2804
2805 return status >> TG3_APE_GPIO_MSG_SHIFT;
2806 }
2807
tg3_pwrsrc_switch_to_vmain(struct tg3 * tp)2808 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2809 {
2810 if (!tg3_flag(tp, IS_NIC))
2811 return 0;
2812
2813 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2814 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2815 tg3_asic_rev(tp) == ASIC_REV_5720) {
2816 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2817 return -EIO;
2818
2819 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2820
2821 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2822 TG3_GRC_LCLCTL_PWRSW_DELAY);
2823
2824 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2825 } else {
2826 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2827 TG3_GRC_LCLCTL_PWRSW_DELAY);
2828 }
2829
2830 return 0;
2831 }
2832
tg3_pwrsrc_die_with_vmain(struct tg3 * tp)2833 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2834 {
2835 u32 grc_local_ctrl;
2836
2837 if (!tg3_flag(tp, IS_NIC) ||
2838 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2839 tg3_asic_rev(tp) == ASIC_REV_5701)
2840 return;
2841
2842 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2843
2844 tw32_wait_f(GRC_LOCAL_CTRL,
2845 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2846 TG3_GRC_LCLCTL_PWRSW_DELAY);
2847
2848 tw32_wait_f(GRC_LOCAL_CTRL,
2849 grc_local_ctrl,
2850 TG3_GRC_LCLCTL_PWRSW_DELAY);
2851
2852 tw32_wait_f(GRC_LOCAL_CTRL,
2853 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2854 TG3_GRC_LCLCTL_PWRSW_DELAY);
2855 }
2856
tg3_pwrsrc_switch_to_vaux(struct tg3 * tp)2857 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2858 {
2859 if (!tg3_flag(tp, IS_NIC))
2860 return;
2861
2862 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2863 tg3_asic_rev(tp) == ASIC_REV_5701) {
2864 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2865 (GRC_LCLCTRL_GPIO_OE0 |
2866 GRC_LCLCTRL_GPIO_OE1 |
2867 GRC_LCLCTRL_GPIO_OE2 |
2868 GRC_LCLCTRL_GPIO_OUTPUT0 |
2869 GRC_LCLCTRL_GPIO_OUTPUT1),
2870 TG3_GRC_LCLCTL_PWRSW_DELAY);
2871 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2872 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2873 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2874 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2875 GRC_LCLCTRL_GPIO_OE1 |
2876 GRC_LCLCTRL_GPIO_OE2 |
2877 GRC_LCLCTRL_GPIO_OUTPUT0 |
2878 GRC_LCLCTRL_GPIO_OUTPUT1 |
2879 tp->grc_local_ctrl;
2880 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2881 TG3_GRC_LCLCTL_PWRSW_DELAY);
2882
2883 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2884 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2885 TG3_GRC_LCLCTL_PWRSW_DELAY);
2886
2887 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2888 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2889 TG3_GRC_LCLCTL_PWRSW_DELAY);
2890 } else {
2891 u32 no_gpio2;
2892 u32 grc_local_ctrl = 0;
2893
2894 /* Workaround to prevent overdrawing Amps. */
2895 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2896 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2897 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2898 grc_local_ctrl,
2899 TG3_GRC_LCLCTL_PWRSW_DELAY);
2900 }
2901
2902 /* On 5753 and variants, GPIO2 cannot be used. */
2903 no_gpio2 = tp->nic_sram_data_cfg &
2904 NIC_SRAM_DATA_CFG_NO_GPIO2;
2905
2906 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2907 GRC_LCLCTRL_GPIO_OE1 |
2908 GRC_LCLCTRL_GPIO_OE2 |
2909 GRC_LCLCTRL_GPIO_OUTPUT1 |
2910 GRC_LCLCTRL_GPIO_OUTPUT2;
2911 if (no_gpio2) {
2912 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2913 GRC_LCLCTRL_GPIO_OUTPUT2);
2914 }
2915 tw32_wait_f(GRC_LOCAL_CTRL,
2916 tp->grc_local_ctrl | grc_local_ctrl,
2917 TG3_GRC_LCLCTL_PWRSW_DELAY);
2918
2919 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2920
2921 tw32_wait_f(GRC_LOCAL_CTRL,
2922 tp->grc_local_ctrl | grc_local_ctrl,
2923 TG3_GRC_LCLCTL_PWRSW_DELAY);
2924
2925 if (!no_gpio2) {
2926 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2927 tw32_wait_f(GRC_LOCAL_CTRL,
2928 tp->grc_local_ctrl | grc_local_ctrl,
2929 TG3_GRC_LCLCTL_PWRSW_DELAY);
2930 }
2931 }
2932 }
2933
tg3_frob_aux_power_5717(struct tg3 * tp,bool wol_enable)2934 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2935 {
2936 u32 msg = 0;
2937
2938 /* Serialize power state transitions */
2939 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2940 return;
2941
2942 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2943 msg = TG3_GPIO_MSG_NEED_VAUX;
2944
2945 msg = tg3_set_function_status(tp, msg);
2946
2947 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2948 goto done;
2949
2950 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2951 tg3_pwrsrc_switch_to_vaux(tp);
2952 else
2953 tg3_pwrsrc_die_with_vmain(tp);
2954
2955 done:
2956 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2957 }
2958
tg3_frob_aux_power(struct tg3 * tp,bool include_wol)2959 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2960 {
2961 bool need_vaux = false;
2962
2963 /* The GPIOs do something completely different on 57765. */
2964 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2965 return;
2966
2967 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2968 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2969 tg3_asic_rev(tp) == ASIC_REV_5720) {
2970 tg3_frob_aux_power_5717(tp, include_wol ?
2971 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2972 return;
2973 }
2974
2975 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2976 struct net_device *dev_peer;
2977
2978 dev_peer = pci_get_drvdata(tp->pdev_peer);
2979
2980 /* remove_one() may have been run on the peer. */
2981 if (dev_peer) {
2982 struct tg3 *tp_peer = netdev_priv(dev_peer);
2983
2984 if (tg3_flag(tp_peer, INIT_COMPLETE))
2985 return;
2986
2987 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2988 tg3_flag(tp_peer, ENABLE_ASF))
2989 need_vaux = true;
2990 }
2991 }
2992
2993 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2994 tg3_flag(tp, ENABLE_ASF))
2995 need_vaux = true;
2996
2997 if (need_vaux)
2998 tg3_pwrsrc_switch_to_vaux(tp);
2999 else
3000 tg3_pwrsrc_die_with_vmain(tp);
3001 }
3002
tg3_5700_link_polarity(struct tg3 * tp,u32 speed)3003 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3004 {
3005 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3006 return 1;
3007 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3008 if (speed != SPEED_10)
3009 return 1;
3010 } else if (speed == SPEED_10)
3011 return 1;
3012
3013 return 0;
3014 }
3015
tg3_phy_power_bug(struct tg3 * tp)3016 static bool tg3_phy_power_bug(struct tg3 *tp)
3017 {
3018 switch (tg3_asic_rev(tp)) {
3019 case ASIC_REV_5700:
3020 case ASIC_REV_5704:
3021 return true;
3022 case ASIC_REV_5780:
3023 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3024 return true;
3025 return false;
3026 case ASIC_REV_5717:
3027 if (!tp->pci_fn)
3028 return true;
3029 return false;
3030 case ASIC_REV_5719:
3031 case ASIC_REV_5720:
3032 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3033 !tp->pci_fn)
3034 return true;
3035 return false;
3036 }
3037
3038 return false;
3039 }
3040
tg3_phy_led_bug(struct tg3 * tp)3041 static bool tg3_phy_led_bug(struct tg3 *tp)
3042 {
3043 switch (tg3_asic_rev(tp)) {
3044 case ASIC_REV_5719:
3045 case ASIC_REV_5720:
3046 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3047 !tp->pci_fn)
3048 return true;
3049 return false;
3050 }
3051
3052 return false;
3053 }
3054
tg3_power_down_phy(struct tg3 * tp,bool do_low_power)3055 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3056 {
3057 u32 val;
3058
3059 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3060 return;
3061
3062 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3063 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3064 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3065 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3066
3067 sg_dig_ctrl |=
3068 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3069 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3070 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3071 }
3072 return;
3073 }
3074
3075 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3076 tg3_bmcr_reset(tp);
3077 val = tr32(GRC_MISC_CFG);
3078 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3079 udelay(40);
3080 return;
3081 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3082 u32 phytest;
3083 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3084 u32 phy;
3085
3086 tg3_writephy(tp, MII_ADVERTISE, 0);
3087 tg3_writephy(tp, MII_BMCR,
3088 BMCR_ANENABLE | BMCR_ANRESTART);
3089
3090 tg3_writephy(tp, MII_TG3_FET_TEST,
3091 phytest | MII_TG3_FET_SHADOW_EN);
3092 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3093 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3094 tg3_writephy(tp,
3095 MII_TG3_FET_SHDW_AUXMODE4,
3096 phy);
3097 }
3098 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3099 }
3100 return;
3101 } else if (do_low_power) {
3102 if (!tg3_phy_led_bug(tp))
3103 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3104 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3105
3106 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3107 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3108 MII_TG3_AUXCTL_PCTL_VREG_11V;
3109 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3110 }
3111
3112 /* The PHY should not be powered down on some chips because
3113 * of bugs.
3114 */
3115 if (tg3_phy_power_bug(tp))
3116 return;
3117
3118 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3119 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3120 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3121 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3122 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3123 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3124 }
3125
3126 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3127 }
3128
3129 /* tp->lock is held. */
tg3_nvram_lock(struct tg3 * tp)3130 static int tg3_nvram_lock(struct tg3 *tp)
3131 {
3132 if (tg3_flag(tp, NVRAM)) {
3133 int i;
3134
3135 if (tp->nvram_lock_cnt == 0) {
3136 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3137 for (i = 0; i < 8000; i++) {
3138 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3139 break;
3140 udelay(20);
3141 }
3142 if (i == 8000) {
3143 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3144 return -ENODEV;
3145 }
3146 }
3147 tp->nvram_lock_cnt++;
3148 }
3149 return 0;
3150 }
3151
3152 /* tp->lock is held. */
tg3_nvram_unlock(struct tg3 * tp)3153 static void tg3_nvram_unlock(struct tg3 *tp)
3154 {
3155 if (tg3_flag(tp, NVRAM)) {
3156 if (tp->nvram_lock_cnt > 0)
3157 tp->nvram_lock_cnt--;
3158 if (tp->nvram_lock_cnt == 0)
3159 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3160 }
3161 }
3162
3163 /* tp->lock is held. */
tg3_enable_nvram_access(struct tg3 * tp)3164 static void tg3_enable_nvram_access(struct tg3 *tp)
3165 {
3166 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3167 u32 nvaccess = tr32(NVRAM_ACCESS);
3168
3169 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3170 }
3171 }
3172
3173 /* tp->lock is held. */
tg3_disable_nvram_access(struct tg3 * tp)3174 static void tg3_disable_nvram_access(struct tg3 *tp)
3175 {
3176 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3177 u32 nvaccess = tr32(NVRAM_ACCESS);
3178
3179 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3180 }
3181 }
3182
tg3_nvram_read_using_eeprom(struct tg3 * tp,u32 offset,u32 * val)3183 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3184 u32 offset, u32 *val)
3185 {
3186 u32 tmp;
3187 int i;
3188
3189 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3190 return -EINVAL;
3191
3192 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3193 EEPROM_ADDR_DEVID_MASK |
3194 EEPROM_ADDR_READ);
3195 tw32(GRC_EEPROM_ADDR,
3196 tmp |
3197 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3198 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3199 EEPROM_ADDR_ADDR_MASK) |
3200 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3201
3202 for (i = 0; i < 1000; i++) {
3203 tmp = tr32(GRC_EEPROM_ADDR);
3204
3205 if (tmp & EEPROM_ADDR_COMPLETE)
3206 break;
3207 msleep(1);
3208 }
3209 if (!(tmp & EEPROM_ADDR_COMPLETE))
3210 return -EBUSY;
3211
3212 tmp = tr32(GRC_EEPROM_DATA);
3213
3214 /*
3215 * The data will always be opposite the native endian
3216 * format. Perform a blind byteswap to compensate.
3217 */
3218 *val = swab32(tmp);
3219
3220 return 0;
3221 }
3222
3223 #define NVRAM_CMD_TIMEOUT 10000
3224
tg3_nvram_exec_cmd(struct tg3 * tp,u32 nvram_cmd)3225 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3226 {
3227 int i;
3228
3229 tw32(NVRAM_CMD, nvram_cmd);
3230 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3231 usleep_range(10, 40);
3232 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3233 udelay(10);
3234 break;
3235 }
3236 }
3237
3238 if (i == NVRAM_CMD_TIMEOUT)
3239 return -EBUSY;
3240
3241 return 0;
3242 }
3243
tg3_nvram_phys_addr(struct tg3 * tp,u32 addr)3244 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3245 {
3246 if (tg3_flag(tp, NVRAM) &&
3247 tg3_flag(tp, NVRAM_BUFFERED) &&
3248 tg3_flag(tp, FLASH) &&
3249 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3250 (tp->nvram_jedecnum == JEDEC_ATMEL))
3251
3252 addr = ((addr / tp->nvram_pagesize) <<
3253 ATMEL_AT45DB0X1B_PAGE_POS) +
3254 (addr % tp->nvram_pagesize);
3255
3256 return addr;
3257 }
3258
tg3_nvram_logical_addr(struct tg3 * tp,u32 addr)3259 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3260 {
3261 if (tg3_flag(tp, NVRAM) &&
3262 tg3_flag(tp, NVRAM_BUFFERED) &&
3263 tg3_flag(tp, FLASH) &&
3264 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3265 (tp->nvram_jedecnum == JEDEC_ATMEL))
3266
3267 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3268 tp->nvram_pagesize) +
3269 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3270
3271 return addr;
3272 }
3273
3274 /* NOTE: Data read in from NVRAM is byteswapped according to
3275 * the byteswapping settings for all other register accesses.
3276 * tg3 devices are BE devices, so on a BE machine, the data
3277 * returned will be exactly as it is seen in NVRAM. On a LE
3278 * machine, the 32-bit value will be byteswapped.
3279 */
tg3_nvram_read(struct tg3 * tp,u32 offset,u32 * val)3280 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3281 {
3282 int ret;
3283
3284 if (!tg3_flag(tp, NVRAM))
3285 return tg3_nvram_read_using_eeprom(tp, offset, val);
3286
3287 offset = tg3_nvram_phys_addr(tp, offset);
3288
3289 if (offset > NVRAM_ADDR_MSK)
3290 return -EINVAL;
3291
3292 ret = tg3_nvram_lock(tp);
3293 if (ret)
3294 return ret;
3295
3296 tg3_enable_nvram_access(tp);
3297
3298 tw32(NVRAM_ADDR, offset);
3299 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3300 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3301
3302 if (ret == 0)
3303 *val = tr32(NVRAM_RDDATA);
3304
3305 tg3_disable_nvram_access(tp);
3306
3307 tg3_nvram_unlock(tp);
3308
3309 return ret;
3310 }
3311
3312 /* Ensures NVRAM data is in bytestream format. */
tg3_nvram_read_be32(struct tg3 * tp,u32 offset,__be32 * val)3313 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3314 {
3315 u32 v;
3316 int res = tg3_nvram_read(tp, offset, &v);
3317 if (!res)
3318 *val = cpu_to_be32(v);
3319 return res;
3320 }
3321
tg3_nvram_write_block_using_eeprom(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3322 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3323 u32 offset, u32 len, u8 *buf)
3324 {
3325 int i, j, rc = 0;
3326 u32 val;
3327
3328 for (i = 0; i < len; i += 4) {
3329 u32 addr;
3330 __be32 data;
3331
3332 addr = offset + i;
3333
3334 memcpy(&data, buf + i, 4);
3335
3336 /*
3337 * The SEEPROM interface expects the data to always be opposite
3338 * the native endian format. We accomplish this by reversing
3339 * all the operations that would have been performed on the
3340 * data from a call to tg3_nvram_read_be32().
3341 */
3342 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3343
3344 val = tr32(GRC_EEPROM_ADDR);
3345 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3346
3347 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3348 EEPROM_ADDR_READ);
3349 tw32(GRC_EEPROM_ADDR, val |
3350 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3351 (addr & EEPROM_ADDR_ADDR_MASK) |
3352 EEPROM_ADDR_START |
3353 EEPROM_ADDR_WRITE);
3354
3355 for (j = 0; j < 1000; j++) {
3356 val = tr32(GRC_EEPROM_ADDR);
3357
3358 if (val & EEPROM_ADDR_COMPLETE)
3359 break;
3360 msleep(1);
3361 }
3362 if (!(val & EEPROM_ADDR_COMPLETE)) {
3363 rc = -EBUSY;
3364 break;
3365 }
3366 }
3367
3368 return rc;
3369 }
3370
3371 /* offset and length are dword aligned */
tg3_nvram_write_block_unbuffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3372 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3373 u8 *buf)
3374 {
3375 int ret = 0;
3376 u32 pagesize = tp->nvram_pagesize;
3377 u32 pagemask = pagesize - 1;
3378 u32 nvram_cmd;
3379 u8 *tmp;
3380
3381 tmp = kmalloc(pagesize, GFP_KERNEL);
3382 if (tmp == NULL)
3383 return -ENOMEM;
3384
3385 while (len) {
3386 int j;
3387 u32 phy_addr, page_off, size;
3388
3389 phy_addr = offset & ~pagemask;
3390
3391 for (j = 0; j < pagesize; j += 4) {
3392 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3393 (__be32 *) (tmp + j));
3394 if (ret)
3395 break;
3396 }
3397 if (ret)
3398 break;
3399
3400 page_off = offset & pagemask;
3401 size = pagesize;
3402 if (len < size)
3403 size = len;
3404
3405 len -= size;
3406
3407 memcpy(tmp + page_off, buf, size);
3408
3409 offset = offset + (pagesize - page_off);
3410
3411 tg3_enable_nvram_access(tp);
3412
3413 /*
3414 * Before we can erase the flash page, we need
3415 * to issue a special "write enable" command.
3416 */
3417 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3418
3419 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3420 break;
3421
3422 /* Erase the target page */
3423 tw32(NVRAM_ADDR, phy_addr);
3424
3425 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3426 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3427
3428 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3429 break;
3430
3431 /* Issue another write enable to start the write. */
3432 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3433
3434 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3435 break;
3436
3437 for (j = 0; j < pagesize; j += 4) {
3438 __be32 data;
3439
3440 data = *((__be32 *) (tmp + j));
3441
3442 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3443
3444 tw32(NVRAM_ADDR, phy_addr + j);
3445
3446 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3447 NVRAM_CMD_WR;
3448
3449 if (j == 0)
3450 nvram_cmd |= NVRAM_CMD_FIRST;
3451 else if (j == (pagesize - 4))
3452 nvram_cmd |= NVRAM_CMD_LAST;
3453
3454 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3455 if (ret)
3456 break;
3457 }
3458 if (ret)
3459 break;
3460 }
3461
3462 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3463 tg3_nvram_exec_cmd(tp, nvram_cmd);
3464
3465 kfree(tmp);
3466
3467 return ret;
3468 }
3469
3470 /* offset and length are dword aligned */
tg3_nvram_write_block_buffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3471 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3472 u8 *buf)
3473 {
3474 int i, ret = 0;
3475
3476 for (i = 0; i < len; i += 4, offset += 4) {
3477 u32 page_off, phy_addr, nvram_cmd;
3478 __be32 data;
3479
3480 memcpy(&data, buf + i, 4);
3481 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3482
3483 page_off = offset % tp->nvram_pagesize;
3484
3485 phy_addr = tg3_nvram_phys_addr(tp, offset);
3486
3487 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3488
3489 if (page_off == 0 || i == 0)
3490 nvram_cmd |= NVRAM_CMD_FIRST;
3491 if (page_off == (tp->nvram_pagesize - 4))
3492 nvram_cmd |= NVRAM_CMD_LAST;
3493
3494 if (i == (len - 4))
3495 nvram_cmd |= NVRAM_CMD_LAST;
3496
3497 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3498 !tg3_flag(tp, FLASH) ||
3499 !tg3_flag(tp, 57765_PLUS))
3500 tw32(NVRAM_ADDR, phy_addr);
3501
3502 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3503 !tg3_flag(tp, 5755_PLUS) &&
3504 (tp->nvram_jedecnum == JEDEC_ST) &&
3505 (nvram_cmd & NVRAM_CMD_FIRST)) {
3506 u32 cmd;
3507
3508 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3509 ret = tg3_nvram_exec_cmd(tp, cmd);
3510 if (ret)
3511 break;
3512 }
3513 if (!tg3_flag(tp, FLASH)) {
3514 /* We always do complete word writes to eeprom. */
3515 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3516 }
3517
3518 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3519 if (ret)
3520 break;
3521 }
3522 return ret;
3523 }
3524
3525 /* offset and length are dword aligned */
tg3_nvram_write_block(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3526 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3527 {
3528 int ret;
3529
3530 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3531 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3532 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3533 udelay(40);
3534 }
3535
3536 if (!tg3_flag(tp, NVRAM)) {
3537 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3538 } else {
3539 u32 grc_mode;
3540
3541 ret = tg3_nvram_lock(tp);
3542 if (ret)
3543 return ret;
3544
3545 tg3_enable_nvram_access(tp);
3546 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3547 tw32(NVRAM_WRITE1, 0x406);
3548
3549 grc_mode = tr32(GRC_MODE);
3550 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3551
3552 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3553 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3554 buf);
3555 } else {
3556 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3557 buf);
3558 }
3559
3560 grc_mode = tr32(GRC_MODE);
3561 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3562
3563 tg3_disable_nvram_access(tp);
3564 tg3_nvram_unlock(tp);
3565 }
3566
3567 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3568 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3569 udelay(40);
3570 }
3571
3572 return ret;
3573 }
3574
3575 #define RX_CPU_SCRATCH_BASE 0x30000
3576 #define RX_CPU_SCRATCH_SIZE 0x04000
3577 #define TX_CPU_SCRATCH_BASE 0x34000
3578 #define TX_CPU_SCRATCH_SIZE 0x04000
3579
3580 /* tp->lock is held. */
tg3_pause_cpu(struct tg3 * tp,u32 cpu_base)3581 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3582 {
3583 int i;
3584 const int iters = 10000;
3585
3586 for (i = 0; i < iters; i++) {
3587 tw32(cpu_base + CPU_STATE, 0xffffffff);
3588 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3589 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3590 break;
3591 if (pci_channel_offline(tp->pdev))
3592 return -EBUSY;
3593 }
3594
3595 return (i == iters) ? -EBUSY : 0;
3596 }
3597
3598 /* tp->lock is held. */
tg3_rxcpu_pause(struct tg3 * tp)3599 static int tg3_rxcpu_pause(struct tg3 *tp)
3600 {
3601 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3602
3603 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3604 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3605 udelay(10);
3606
3607 return rc;
3608 }
3609
3610 /* tp->lock is held. */
tg3_txcpu_pause(struct tg3 * tp)3611 static int tg3_txcpu_pause(struct tg3 *tp)
3612 {
3613 return tg3_pause_cpu(tp, TX_CPU_BASE);
3614 }
3615
3616 /* tp->lock is held. */
tg3_resume_cpu(struct tg3 * tp,u32 cpu_base)3617 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3618 {
3619 tw32(cpu_base + CPU_STATE, 0xffffffff);
3620 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3621 }
3622
3623 /* tp->lock is held. */
tg3_rxcpu_resume(struct tg3 * tp)3624 static void tg3_rxcpu_resume(struct tg3 *tp)
3625 {
3626 tg3_resume_cpu(tp, RX_CPU_BASE);
3627 }
3628
3629 /* tp->lock is held. */
tg3_halt_cpu(struct tg3 * tp,u32 cpu_base)3630 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3631 {
3632 int rc;
3633
3634 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3635
3636 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3637 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3638
3639 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3640 return 0;
3641 }
3642 if (cpu_base == RX_CPU_BASE) {
3643 rc = tg3_rxcpu_pause(tp);
3644 } else {
3645 /*
3646 * There is only an Rx CPU for the 5750 derivative in the
3647 * BCM4785.
3648 */
3649 if (tg3_flag(tp, IS_SSB_CORE))
3650 return 0;
3651
3652 rc = tg3_txcpu_pause(tp);
3653 }
3654
3655 if (rc) {
3656 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3657 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3658 return -ENODEV;
3659 }
3660
3661 /* Clear firmware's nvram arbitration. */
3662 if (tg3_flag(tp, NVRAM))
3663 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3664 return 0;
3665 }
3666
tg3_fw_data_len(struct tg3 * tp,const struct tg3_firmware_hdr * fw_hdr)3667 static int tg3_fw_data_len(struct tg3 *tp,
3668 const struct tg3_firmware_hdr *fw_hdr)
3669 {
3670 int fw_len;
3671
3672 /* Non fragmented firmware have one firmware header followed by a
3673 * contiguous chunk of data to be written. The length field in that
3674 * header is not the length of data to be written but the complete
3675 * length of the bss. The data length is determined based on
3676 * tp->fw->size minus headers.
3677 *
3678 * Fragmented firmware have a main header followed by multiple
3679 * fragments. Each fragment is identical to non fragmented firmware
3680 * with a firmware header followed by a contiguous chunk of data. In
3681 * the main header, the length field is unused and set to 0xffffffff.
3682 * In each fragment header the length is the entire size of that
3683 * fragment i.e. fragment data + header length. Data length is
3684 * therefore length field in the header minus TG3_FW_HDR_LEN.
3685 */
3686 if (tp->fw_len == 0xffffffff)
3687 fw_len = be32_to_cpu(fw_hdr->len);
3688 else
3689 fw_len = tp->fw->size;
3690
3691 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3692 }
3693
3694 /* tp->lock is held. */
tg3_load_firmware_cpu(struct tg3 * tp,u32 cpu_base,u32 cpu_scratch_base,int cpu_scratch_size,const struct tg3_firmware_hdr * fw_hdr)3695 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3696 u32 cpu_scratch_base, int cpu_scratch_size,
3697 const struct tg3_firmware_hdr *fw_hdr)
3698 {
3699 int err, i;
3700 void (*write_op)(struct tg3 *, u32, u32);
3701 int total_len = tp->fw->size;
3702
3703 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3704 netdev_err(tp->dev,
3705 "%s: Trying to load TX cpu firmware which is 5705\n",
3706 __func__);
3707 return -EINVAL;
3708 }
3709
3710 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3711 write_op = tg3_write_mem;
3712 else
3713 write_op = tg3_write_indirect_reg32;
3714
3715 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3716 /* It is possible that bootcode is still loading at this point.
3717 * Get the nvram lock first before halting the cpu.
3718 */
3719 int lock_err = tg3_nvram_lock(tp);
3720 err = tg3_halt_cpu(tp, cpu_base);
3721 if (!lock_err)
3722 tg3_nvram_unlock(tp);
3723 if (err)
3724 goto out;
3725
3726 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3727 write_op(tp, cpu_scratch_base + i, 0);
3728 tw32(cpu_base + CPU_STATE, 0xffffffff);
3729 tw32(cpu_base + CPU_MODE,
3730 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3731 } else {
3732 /* Subtract additional main header for fragmented firmware and
3733 * advance to the first fragment
3734 */
3735 total_len -= TG3_FW_HDR_LEN;
3736 fw_hdr++;
3737 }
3738
3739 do {
3740 u32 *fw_data = (u32 *)(fw_hdr + 1);
3741 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3742 write_op(tp, cpu_scratch_base +
3743 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3744 (i * sizeof(u32)),
3745 be32_to_cpu(fw_data[i]));
3746
3747 total_len -= be32_to_cpu(fw_hdr->len);
3748
3749 /* Advance to next fragment */
3750 fw_hdr = (struct tg3_firmware_hdr *)
3751 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3752 } while (total_len > 0);
3753
3754 err = 0;
3755
3756 out:
3757 return err;
3758 }
3759
3760 /* tp->lock is held. */
tg3_pause_cpu_and_set_pc(struct tg3 * tp,u32 cpu_base,u32 pc)3761 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3762 {
3763 int i;
3764 const int iters = 5;
3765
3766 tw32(cpu_base + CPU_STATE, 0xffffffff);
3767 tw32_f(cpu_base + CPU_PC, pc);
3768
3769 for (i = 0; i < iters; i++) {
3770 if (tr32(cpu_base + CPU_PC) == pc)
3771 break;
3772 tw32(cpu_base + CPU_STATE, 0xffffffff);
3773 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3774 tw32_f(cpu_base + CPU_PC, pc);
3775 udelay(1000);
3776 }
3777
3778 return (i == iters) ? -EBUSY : 0;
3779 }
3780
3781 /* tp->lock is held. */
tg3_load_5701_a0_firmware_fix(struct tg3 * tp)3782 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3783 {
3784 const struct tg3_firmware_hdr *fw_hdr;
3785 int err;
3786
3787 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3788
3789 /* Firmware blob starts with version numbers, followed by
3790 start address and length. We are setting complete length.
3791 length = end_address_of_bss - start_address_of_text.
3792 Remainder is the blob to be loaded contiguously
3793 from start address. */
3794
3795 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3796 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3797 fw_hdr);
3798 if (err)
3799 return err;
3800
3801 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3802 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3803 fw_hdr);
3804 if (err)
3805 return err;
3806
3807 /* Now startup only the RX cpu. */
3808 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3809 be32_to_cpu(fw_hdr->base_addr));
3810 if (err) {
3811 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3812 "should be %08x\n", __func__,
3813 tr32(RX_CPU_BASE + CPU_PC),
3814 be32_to_cpu(fw_hdr->base_addr));
3815 return -ENODEV;
3816 }
3817
3818 tg3_rxcpu_resume(tp);
3819
3820 return 0;
3821 }
3822
tg3_validate_rxcpu_state(struct tg3 * tp)3823 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3824 {
3825 const int iters = 1000;
3826 int i;
3827 u32 val;
3828
3829 /* Wait for boot code to complete initialization and enter service
3830 * loop. It is then safe to download service patches
3831 */
3832 for (i = 0; i < iters; i++) {
3833 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3834 break;
3835
3836 udelay(10);
3837 }
3838
3839 if (i == iters) {
3840 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3841 return -EBUSY;
3842 }
3843
3844 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3845 if (val & 0xff) {
3846 netdev_warn(tp->dev,
3847 "Other patches exist. Not downloading EEE patch\n");
3848 return -EEXIST;
3849 }
3850
3851 return 0;
3852 }
3853
3854 /* tp->lock is held. */
tg3_load_57766_firmware(struct tg3 * tp)3855 static void tg3_load_57766_firmware(struct tg3 *tp)
3856 {
3857 struct tg3_firmware_hdr *fw_hdr;
3858
3859 if (!tg3_flag(tp, NO_NVRAM))
3860 return;
3861
3862 if (tg3_validate_rxcpu_state(tp))
3863 return;
3864
3865 if (!tp->fw)
3866 return;
3867
3868 /* This firmware blob has a different format than older firmware
3869 * releases as given below. The main difference is we have fragmented
3870 * data to be written to non-contiguous locations.
3871 *
3872 * In the beginning we have a firmware header identical to other
3873 * firmware which consists of version, base addr and length. The length
3874 * here is unused and set to 0xffffffff.
3875 *
3876 * This is followed by a series of firmware fragments which are
3877 * individually identical to previous firmware. i.e. they have the
3878 * firmware header and followed by data for that fragment. The version
3879 * field of the individual fragment header is unused.
3880 */
3881
3882 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3883 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3884 return;
3885
3886 if (tg3_rxcpu_pause(tp))
3887 return;
3888
3889 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3890 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3891
3892 tg3_rxcpu_resume(tp);
3893 }
3894
3895 /* tp->lock is held. */
tg3_load_tso_firmware(struct tg3 * tp)3896 static int tg3_load_tso_firmware(struct tg3 *tp)
3897 {
3898 const struct tg3_firmware_hdr *fw_hdr;
3899 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3900 int err;
3901
3902 if (!tg3_flag(tp, FW_TSO))
3903 return 0;
3904
3905 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3906
3907 /* Firmware blob starts with version numbers, followed by
3908 start address and length. We are setting complete length.
3909 length = end_address_of_bss - start_address_of_text.
3910 Remainder is the blob to be loaded contiguously
3911 from start address. */
3912
3913 cpu_scratch_size = tp->fw_len;
3914
3915 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3916 cpu_base = RX_CPU_BASE;
3917 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3918 } else {
3919 cpu_base = TX_CPU_BASE;
3920 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3921 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3922 }
3923
3924 err = tg3_load_firmware_cpu(tp, cpu_base,
3925 cpu_scratch_base, cpu_scratch_size,
3926 fw_hdr);
3927 if (err)
3928 return err;
3929
3930 /* Now startup the cpu. */
3931 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3932 be32_to_cpu(fw_hdr->base_addr));
3933 if (err) {
3934 netdev_err(tp->dev,
3935 "%s fails to set CPU PC, is %08x should be %08x\n",
3936 __func__, tr32(cpu_base + CPU_PC),
3937 be32_to_cpu(fw_hdr->base_addr));
3938 return -ENODEV;
3939 }
3940
3941 tg3_resume_cpu(tp, cpu_base);
3942 return 0;
3943 }
3944
3945 /* tp->lock is held. */
__tg3_set_one_mac_addr(struct tg3 * tp,const u8 * mac_addr,int index)3946 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3947 int index)
3948 {
3949 u32 addr_high, addr_low;
3950
3951 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3952 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3953 (mac_addr[4] << 8) | mac_addr[5]);
3954
3955 if (index < 4) {
3956 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3957 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3958 } else {
3959 index -= 4;
3960 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3961 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3962 }
3963 }
3964
3965 /* tp->lock is held. */
__tg3_set_mac_addr(struct tg3 * tp,bool skip_mac_1)3966 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3967 {
3968 u32 addr_high;
3969 int i;
3970
3971 for (i = 0; i < 4; i++) {
3972 if (i == 1 && skip_mac_1)
3973 continue;
3974 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3975 }
3976
3977 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3978 tg3_asic_rev(tp) == ASIC_REV_5704) {
3979 for (i = 4; i < 16; i++)
3980 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3981 }
3982
3983 addr_high = (tp->dev->dev_addr[0] +
3984 tp->dev->dev_addr[1] +
3985 tp->dev->dev_addr[2] +
3986 tp->dev->dev_addr[3] +
3987 tp->dev->dev_addr[4] +
3988 tp->dev->dev_addr[5]) &
3989 TX_BACKOFF_SEED_MASK;
3990 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3991 }
3992
tg3_enable_register_access(struct tg3 * tp)3993 static void tg3_enable_register_access(struct tg3 *tp)
3994 {
3995 /*
3996 * Make sure register accesses (indirect or otherwise) will function
3997 * correctly.
3998 */
3999 pci_write_config_dword(tp->pdev,
4000 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4001 }
4002
tg3_power_up(struct tg3 * tp)4003 static int tg3_power_up(struct tg3 *tp)
4004 {
4005 int err;
4006
4007 tg3_enable_register_access(tp);
4008
4009 err = pci_set_power_state(tp->pdev, PCI_D0);
4010 if (!err) {
4011 /* Switch out of Vaux if it is a NIC */
4012 tg3_pwrsrc_switch_to_vmain(tp);
4013 } else {
4014 netdev_err(tp->dev, "Transition to D0 failed\n");
4015 }
4016
4017 return err;
4018 }
4019
4020 static int tg3_setup_phy(struct tg3 *, bool);
4021
tg3_power_down_prepare(struct tg3 * tp)4022 static int tg3_power_down_prepare(struct tg3 *tp)
4023 {
4024 u32 misc_host_ctrl;
4025 bool device_should_wake, do_low_power;
4026
4027 tg3_enable_register_access(tp);
4028
4029 /* Restore the CLKREQ setting. */
4030 if (tg3_flag(tp, CLKREQ_BUG))
4031 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4032 PCI_EXP_LNKCTL_CLKREQ_EN);
4033
4034 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4035 tw32(TG3PCI_MISC_HOST_CTRL,
4036 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4037
4038 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4039 tg3_flag(tp, WOL_ENABLE);
4040
4041 if (tg3_flag(tp, USE_PHYLIB)) {
4042 do_low_power = false;
4043 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4044 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4045 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4046 struct phy_device *phydev;
4047 u32 phyid;
4048
4049 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4050
4051 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4052
4053 tp->link_config.speed = phydev->speed;
4054 tp->link_config.duplex = phydev->duplex;
4055 tp->link_config.autoneg = phydev->autoneg;
4056 ethtool_convert_link_mode_to_legacy_u32(
4057 &tp->link_config.advertising,
4058 phydev->advertising);
4059
4060 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4061 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4062 advertising);
4063 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4064 advertising);
4065 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4066 advertising);
4067
4068 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4069 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4070 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4071 advertising);
4072 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4073 advertising);
4074 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4075 advertising);
4076 } else {
4077 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4078 advertising);
4079 }
4080 }
4081
4082 linkmode_copy(phydev->advertising, advertising);
4083 phy_start_aneg(phydev);
4084
4085 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4086 if (phyid != PHY_ID_BCMAC131) {
4087 phyid &= PHY_BCM_OUI_MASK;
4088 if (phyid == PHY_BCM_OUI_1 ||
4089 phyid == PHY_BCM_OUI_2 ||
4090 phyid == PHY_BCM_OUI_3)
4091 do_low_power = true;
4092 }
4093 }
4094 } else {
4095 do_low_power = true;
4096
4097 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4098 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4099
4100 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4101 tg3_setup_phy(tp, false);
4102 }
4103
4104 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4105 u32 val;
4106
4107 val = tr32(GRC_VCPU_EXT_CTRL);
4108 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4109 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4110 int i;
4111 u32 val;
4112
4113 for (i = 0; i < 200; i++) {
4114 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4115 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4116 break;
4117 msleep(1);
4118 }
4119 }
4120 if (tg3_flag(tp, WOL_CAP))
4121 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4122 WOL_DRV_STATE_SHUTDOWN |
4123 WOL_DRV_WOL |
4124 WOL_SET_MAGIC_PKT);
4125
4126 if (device_should_wake) {
4127 u32 mac_mode;
4128
4129 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4130 if (do_low_power &&
4131 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4132 tg3_phy_auxctl_write(tp,
4133 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4134 MII_TG3_AUXCTL_PCTL_WOL_EN |
4135 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4136 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4137 udelay(40);
4138 }
4139
4140 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4141 mac_mode = MAC_MODE_PORT_MODE_GMII;
4142 else if (tp->phy_flags &
4143 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4144 if (tp->link_config.active_speed == SPEED_1000)
4145 mac_mode = MAC_MODE_PORT_MODE_GMII;
4146 else
4147 mac_mode = MAC_MODE_PORT_MODE_MII;
4148 } else
4149 mac_mode = MAC_MODE_PORT_MODE_MII;
4150
4151 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4152 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4153 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4154 SPEED_100 : SPEED_10;
4155 if (tg3_5700_link_polarity(tp, speed))
4156 mac_mode |= MAC_MODE_LINK_POLARITY;
4157 else
4158 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4159 }
4160 } else {
4161 mac_mode = MAC_MODE_PORT_MODE_TBI;
4162 }
4163
4164 if (!tg3_flag(tp, 5750_PLUS))
4165 tw32(MAC_LED_CTRL, tp->led_ctrl);
4166
4167 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4168 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4169 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4170 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4171
4172 if (tg3_flag(tp, ENABLE_APE))
4173 mac_mode |= MAC_MODE_APE_TX_EN |
4174 MAC_MODE_APE_RX_EN |
4175 MAC_MODE_TDE_ENABLE;
4176
4177 tw32_f(MAC_MODE, mac_mode);
4178 udelay(100);
4179
4180 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4181 udelay(10);
4182 }
4183
4184 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4185 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4186 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4187 u32 base_val;
4188
4189 base_val = tp->pci_clock_ctrl;
4190 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4191 CLOCK_CTRL_TXCLK_DISABLE);
4192
4193 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4194 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4195 } else if (tg3_flag(tp, 5780_CLASS) ||
4196 tg3_flag(tp, CPMU_PRESENT) ||
4197 tg3_asic_rev(tp) == ASIC_REV_5906) {
4198 /* do nothing */
4199 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4200 u32 newbits1, newbits2;
4201
4202 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4203 tg3_asic_rev(tp) == ASIC_REV_5701) {
4204 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4205 CLOCK_CTRL_TXCLK_DISABLE |
4206 CLOCK_CTRL_ALTCLK);
4207 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4208 } else if (tg3_flag(tp, 5705_PLUS)) {
4209 newbits1 = CLOCK_CTRL_625_CORE;
4210 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4211 } else {
4212 newbits1 = CLOCK_CTRL_ALTCLK;
4213 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4214 }
4215
4216 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4217 40);
4218
4219 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4220 40);
4221
4222 if (!tg3_flag(tp, 5705_PLUS)) {
4223 u32 newbits3;
4224
4225 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4226 tg3_asic_rev(tp) == ASIC_REV_5701) {
4227 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4228 CLOCK_CTRL_TXCLK_DISABLE |
4229 CLOCK_CTRL_44MHZ_CORE);
4230 } else {
4231 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4232 }
4233
4234 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4235 tp->pci_clock_ctrl | newbits3, 40);
4236 }
4237 }
4238
4239 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4240 tg3_power_down_phy(tp, do_low_power);
4241
4242 tg3_frob_aux_power(tp, true);
4243
4244 /* Workaround for unstable PLL clock */
4245 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4246 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4247 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4248 u32 val = tr32(0x7d00);
4249
4250 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4251 tw32(0x7d00, val);
4252 if (!tg3_flag(tp, ENABLE_ASF)) {
4253 int err;
4254
4255 err = tg3_nvram_lock(tp);
4256 tg3_halt_cpu(tp, RX_CPU_BASE);
4257 if (!err)
4258 tg3_nvram_unlock(tp);
4259 }
4260 }
4261
4262 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4263
4264 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4265
4266 return 0;
4267 }
4268
tg3_power_down(struct tg3 * tp)4269 static void tg3_power_down(struct tg3 *tp)
4270 {
4271 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4272 pci_set_power_state(tp->pdev, PCI_D3hot);
4273 }
4274
tg3_aux_stat_to_speed_duplex(struct tg3 * tp,u32 val,u32 * speed,u8 * duplex)4275 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4276 {
4277 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4278 case MII_TG3_AUX_STAT_10HALF:
4279 *speed = SPEED_10;
4280 *duplex = DUPLEX_HALF;
4281 break;
4282
4283 case MII_TG3_AUX_STAT_10FULL:
4284 *speed = SPEED_10;
4285 *duplex = DUPLEX_FULL;
4286 break;
4287
4288 case MII_TG3_AUX_STAT_100HALF:
4289 *speed = SPEED_100;
4290 *duplex = DUPLEX_HALF;
4291 break;
4292
4293 case MII_TG3_AUX_STAT_100FULL:
4294 *speed = SPEED_100;
4295 *duplex = DUPLEX_FULL;
4296 break;
4297
4298 case MII_TG3_AUX_STAT_1000HALF:
4299 *speed = SPEED_1000;
4300 *duplex = DUPLEX_HALF;
4301 break;
4302
4303 case MII_TG3_AUX_STAT_1000FULL:
4304 *speed = SPEED_1000;
4305 *duplex = DUPLEX_FULL;
4306 break;
4307
4308 default:
4309 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4310 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4311 SPEED_10;
4312 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4313 DUPLEX_HALF;
4314 break;
4315 }
4316 *speed = SPEED_UNKNOWN;
4317 *duplex = DUPLEX_UNKNOWN;
4318 break;
4319 }
4320 }
4321
tg3_phy_autoneg_cfg(struct tg3 * tp,u32 advertise,u32 flowctrl)4322 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4323 {
4324 int err = 0;
4325 u32 val, new_adv;
4326
4327 new_adv = ADVERTISE_CSMA;
4328 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4329 new_adv |= mii_advertise_flowctrl(flowctrl);
4330
4331 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4332 if (err)
4333 goto done;
4334
4335 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4336 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4337
4338 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4339 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4340 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4341
4342 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4343 if (err)
4344 goto done;
4345 }
4346
4347 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4348 goto done;
4349
4350 tw32(TG3_CPMU_EEE_MODE,
4351 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4352
4353 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4354 if (!err) {
4355 u32 err2;
4356
4357 val = 0;
4358 /* Advertise 100-BaseTX EEE ability */
4359 if (advertise & ADVERTISED_100baseT_Full)
4360 val |= MDIO_AN_EEE_ADV_100TX;
4361 /* Advertise 1000-BaseT EEE ability */
4362 if (advertise & ADVERTISED_1000baseT_Full)
4363 val |= MDIO_AN_EEE_ADV_1000T;
4364
4365 if (!tp->eee.eee_enabled) {
4366 val = 0;
4367 tp->eee.advertised = 0;
4368 } else {
4369 tp->eee.advertised = advertise &
4370 (ADVERTISED_100baseT_Full |
4371 ADVERTISED_1000baseT_Full);
4372 }
4373
4374 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4375 if (err)
4376 val = 0;
4377
4378 switch (tg3_asic_rev(tp)) {
4379 case ASIC_REV_5717:
4380 case ASIC_REV_57765:
4381 case ASIC_REV_57766:
4382 case ASIC_REV_5719:
4383 /* If we advertised any eee advertisements above... */
4384 if (val)
4385 val = MII_TG3_DSP_TAP26_ALNOKO |
4386 MII_TG3_DSP_TAP26_RMRXSTO |
4387 MII_TG3_DSP_TAP26_OPCSINPT;
4388 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4389 fallthrough;
4390 case ASIC_REV_5720:
4391 case ASIC_REV_5762:
4392 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4393 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4394 MII_TG3_DSP_CH34TP2_HIBW01);
4395 }
4396
4397 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4398 if (!err)
4399 err = err2;
4400 }
4401
4402 done:
4403 return err;
4404 }
4405
tg3_phy_copper_begin(struct tg3 * tp)4406 static void tg3_phy_copper_begin(struct tg3 *tp)
4407 {
4408 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4409 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4410 u32 adv, fc;
4411
4412 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4413 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4414 adv = ADVERTISED_10baseT_Half |
4415 ADVERTISED_10baseT_Full;
4416 if (tg3_flag(tp, WOL_SPEED_100MB))
4417 adv |= ADVERTISED_100baseT_Half |
4418 ADVERTISED_100baseT_Full;
4419 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4420 if (!(tp->phy_flags &
4421 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4422 adv |= ADVERTISED_1000baseT_Half;
4423 adv |= ADVERTISED_1000baseT_Full;
4424 }
4425
4426 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4427 } else {
4428 adv = tp->link_config.advertising;
4429 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4430 adv &= ~(ADVERTISED_1000baseT_Half |
4431 ADVERTISED_1000baseT_Full);
4432
4433 fc = tp->link_config.flowctrl;
4434 }
4435
4436 tg3_phy_autoneg_cfg(tp, adv, fc);
4437
4438 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4439 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4440 /* Normally during power down we want to autonegotiate
4441 * the lowest possible speed for WOL. However, to avoid
4442 * link flap, we leave it untouched.
4443 */
4444 return;
4445 }
4446
4447 tg3_writephy(tp, MII_BMCR,
4448 BMCR_ANENABLE | BMCR_ANRESTART);
4449 } else {
4450 int i;
4451 u32 bmcr, orig_bmcr;
4452
4453 tp->link_config.active_speed = tp->link_config.speed;
4454 tp->link_config.active_duplex = tp->link_config.duplex;
4455
4456 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4457 /* With autoneg disabled, 5715 only links up when the
4458 * advertisement register has the configured speed
4459 * enabled.
4460 */
4461 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4462 }
4463
4464 bmcr = 0;
4465 switch (tp->link_config.speed) {
4466 default:
4467 case SPEED_10:
4468 break;
4469
4470 case SPEED_100:
4471 bmcr |= BMCR_SPEED100;
4472 break;
4473
4474 case SPEED_1000:
4475 bmcr |= BMCR_SPEED1000;
4476 break;
4477 }
4478
4479 if (tp->link_config.duplex == DUPLEX_FULL)
4480 bmcr |= BMCR_FULLDPLX;
4481
4482 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4483 (bmcr != orig_bmcr)) {
4484 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4485 for (i = 0; i < 1500; i++) {
4486 u32 tmp;
4487
4488 udelay(10);
4489 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4490 tg3_readphy(tp, MII_BMSR, &tmp))
4491 continue;
4492 if (!(tmp & BMSR_LSTATUS)) {
4493 udelay(40);
4494 break;
4495 }
4496 }
4497 tg3_writephy(tp, MII_BMCR, bmcr);
4498 udelay(40);
4499 }
4500 }
4501 }
4502
tg3_phy_pull_config(struct tg3 * tp)4503 static int tg3_phy_pull_config(struct tg3 *tp)
4504 {
4505 int err;
4506 u32 val;
4507
4508 err = tg3_readphy(tp, MII_BMCR, &val);
4509 if (err)
4510 goto done;
4511
4512 if (!(val & BMCR_ANENABLE)) {
4513 tp->link_config.autoneg = AUTONEG_DISABLE;
4514 tp->link_config.advertising = 0;
4515 tg3_flag_clear(tp, PAUSE_AUTONEG);
4516
4517 err = -EIO;
4518
4519 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4520 case 0:
4521 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4522 goto done;
4523
4524 tp->link_config.speed = SPEED_10;
4525 break;
4526 case BMCR_SPEED100:
4527 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4528 goto done;
4529
4530 tp->link_config.speed = SPEED_100;
4531 break;
4532 case BMCR_SPEED1000:
4533 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4534 tp->link_config.speed = SPEED_1000;
4535 break;
4536 }
4537 fallthrough;
4538 default:
4539 goto done;
4540 }
4541
4542 if (val & BMCR_FULLDPLX)
4543 tp->link_config.duplex = DUPLEX_FULL;
4544 else
4545 tp->link_config.duplex = DUPLEX_HALF;
4546
4547 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4548
4549 err = 0;
4550 goto done;
4551 }
4552
4553 tp->link_config.autoneg = AUTONEG_ENABLE;
4554 tp->link_config.advertising = ADVERTISED_Autoneg;
4555 tg3_flag_set(tp, PAUSE_AUTONEG);
4556
4557 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4558 u32 adv;
4559
4560 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4561 if (err)
4562 goto done;
4563
4564 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4565 tp->link_config.advertising |= adv | ADVERTISED_TP;
4566
4567 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4568 } else {
4569 tp->link_config.advertising |= ADVERTISED_FIBRE;
4570 }
4571
4572 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4573 u32 adv;
4574
4575 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4576 err = tg3_readphy(tp, MII_CTRL1000, &val);
4577 if (err)
4578 goto done;
4579
4580 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4581 } else {
4582 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4583 if (err)
4584 goto done;
4585
4586 adv = tg3_decode_flowctrl_1000X(val);
4587 tp->link_config.flowctrl = adv;
4588
4589 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4590 adv = mii_adv_to_ethtool_adv_x(val);
4591 }
4592
4593 tp->link_config.advertising |= adv;
4594 }
4595
4596 done:
4597 return err;
4598 }
4599
tg3_init_5401phy_dsp(struct tg3 * tp)4600 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4601 {
4602 int err;
4603
4604 /* Turn off tap power management. */
4605 /* Set Extended packet length bit */
4606 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4607
4608 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4609 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4610 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4611 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4612 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4613
4614 udelay(40);
4615
4616 return err;
4617 }
4618
tg3_phy_eee_config_ok(struct tg3 * tp)4619 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4620 {
4621 struct ethtool_eee eee;
4622
4623 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4624 return true;
4625
4626 tg3_eee_pull_config(tp, &eee);
4627
4628 if (tp->eee.eee_enabled) {
4629 if (tp->eee.advertised != eee.advertised ||
4630 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4631 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4632 return false;
4633 } else {
4634 /* EEE is disabled but we're advertising */
4635 if (eee.advertised)
4636 return false;
4637 }
4638
4639 return true;
4640 }
4641
tg3_phy_copper_an_config_ok(struct tg3 * tp,u32 * lcladv)4642 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4643 {
4644 u32 advmsk, tgtadv, advertising;
4645
4646 advertising = tp->link_config.advertising;
4647 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4648
4649 advmsk = ADVERTISE_ALL;
4650 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4651 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4652 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4653 }
4654
4655 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4656 return false;
4657
4658 if ((*lcladv & advmsk) != tgtadv)
4659 return false;
4660
4661 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4662 u32 tg3_ctrl;
4663
4664 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4665
4666 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4667 return false;
4668
4669 if (tgtadv &&
4670 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4671 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4672 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4673 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4674 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4675 } else {
4676 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4677 }
4678
4679 if (tg3_ctrl != tgtadv)
4680 return false;
4681 }
4682
4683 return true;
4684 }
4685
tg3_phy_copper_fetch_rmtadv(struct tg3 * tp,u32 * rmtadv)4686 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4687 {
4688 u32 lpeth = 0;
4689
4690 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4691 u32 val;
4692
4693 if (tg3_readphy(tp, MII_STAT1000, &val))
4694 return false;
4695
4696 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4697 }
4698
4699 if (tg3_readphy(tp, MII_LPA, rmtadv))
4700 return false;
4701
4702 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4703 tp->link_config.rmt_adv = lpeth;
4704
4705 return true;
4706 }
4707
tg3_test_and_report_link_chg(struct tg3 * tp,bool curr_link_up)4708 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4709 {
4710 if (curr_link_up != tp->link_up) {
4711 if (curr_link_up) {
4712 netif_carrier_on(tp->dev);
4713 } else {
4714 netif_carrier_off(tp->dev);
4715 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4716 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4717 }
4718
4719 tg3_link_report(tp);
4720 return true;
4721 }
4722
4723 return false;
4724 }
4725
tg3_clear_mac_status(struct tg3 * tp)4726 static void tg3_clear_mac_status(struct tg3 *tp)
4727 {
4728 tw32(MAC_EVENT, 0);
4729
4730 tw32_f(MAC_STATUS,
4731 MAC_STATUS_SYNC_CHANGED |
4732 MAC_STATUS_CFG_CHANGED |
4733 MAC_STATUS_MI_COMPLETION |
4734 MAC_STATUS_LNKSTATE_CHANGED);
4735 udelay(40);
4736 }
4737
tg3_setup_eee(struct tg3 * tp)4738 static void tg3_setup_eee(struct tg3 *tp)
4739 {
4740 u32 val;
4741
4742 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4743 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4744 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4745 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4746
4747 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4748
4749 tw32_f(TG3_CPMU_EEE_CTRL,
4750 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4751
4752 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4753 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4754 TG3_CPMU_EEEMD_LPI_IN_RX |
4755 TG3_CPMU_EEEMD_EEE_ENABLE;
4756
4757 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4758 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4759
4760 if (tg3_flag(tp, ENABLE_APE))
4761 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4762
4763 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4764
4765 tw32_f(TG3_CPMU_EEE_DBTMR1,
4766 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4767 (tp->eee.tx_lpi_timer & 0xffff));
4768
4769 tw32_f(TG3_CPMU_EEE_DBTMR2,
4770 TG3_CPMU_DBTMR2_APE_TX_2047US |
4771 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4772 }
4773
tg3_setup_copper_phy(struct tg3 * tp,bool force_reset)4774 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4775 {
4776 bool current_link_up;
4777 u32 bmsr, val;
4778 u32 lcl_adv, rmt_adv;
4779 u32 current_speed;
4780 u8 current_duplex;
4781 int i, err;
4782
4783 tg3_clear_mac_status(tp);
4784
4785 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4786 tw32_f(MAC_MI_MODE,
4787 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4788 udelay(80);
4789 }
4790
4791 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4792
4793 /* Some third-party PHYs need to be reset on link going
4794 * down.
4795 */
4796 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4797 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4798 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4799 tp->link_up) {
4800 tg3_readphy(tp, MII_BMSR, &bmsr);
4801 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4802 !(bmsr & BMSR_LSTATUS))
4803 force_reset = true;
4804 }
4805 if (force_reset)
4806 tg3_phy_reset(tp);
4807
4808 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4809 tg3_readphy(tp, MII_BMSR, &bmsr);
4810 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4811 !tg3_flag(tp, INIT_COMPLETE))
4812 bmsr = 0;
4813
4814 if (!(bmsr & BMSR_LSTATUS)) {
4815 err = tg3_init_5401phy_dsp(tp);
4816 if (err)
4817 return err;
4818
4819 tg3_readphy(tp, MII_BMSR, &bmsr);
4820 for (i = 0; i < 1000; i++) {
4821 udelay(10);
4822 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4823 (bmsr & BMSR_LSTATUS)) {
4824 udelay(40);
4825 break;
4826 }
4827 }
4828
4829 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4830 TG3_PHY_REV_BCM5401_B0 &&
4831 !(bmsr & BMSR_LSTATUS) &&
4832 tp->link_config.active_speed == SPEED_1000) {
4833 err = tg3_phy_reset(tp);
4834 if (!err)
4835 err = tg3_init_5401phy_dsp(tp);
4836 if (err)
4837 return err;
4838 }
4839 }
4840 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4841 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4842 /* 5701 {A0,B0} CRC bug workaround */
4843 tg3_writephy(tp, 0x15, 0x0a75);
4844 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4845 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4846 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4847 }
4848
4849 /* Clear pending interrupts... */
4850 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4851 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4852
4853 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4854 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4855 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4856 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4857
4858 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4859 tg3_asic_rev(tp) == ASIC_REV_5701) {
4860 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4861 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4862 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4863 else
4864 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4865 }
4866
4867 current_link_up = false;
4868 current_speed = SPEED_UNKNOWN;
4869 current_duplex = DUPLEX_UNKNOWN;
4870 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4871 tp->link_config.rmt_adv = 0;
4872
4873 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4874 err = tg3_phy_auxctl_read(tp,
4875 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4876 &val);
4877 if (!err && !(val & (1 << 10))) {
4878 tg3_phy_auxctl_write(tp,
4879 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4880 val | (1 << 10));
4881 goto relink;
4882 }
4883 }
4884
4885 bmsr = 0;
4886 for (i = 0; i < 100; i++) {
4887 tg3_readphy(tp, MII_BMSR, &bmsr);
4888 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4889 (bmsr & BMSR_LSTATUS))
4890 break;
4891 udelay(40);
4892 }
4893
4894 if (bmsr & BMSR_LSTATUS) {
4895 u32 aux_stat, bmcr;
4896
4897 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4898 for (i = 0; i < 2000; i++) {
4899 udelay(10);
4900 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4901 aux_stat)
4902 break;
4903 }
4904
4905 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4906 ¤t_speed,
4907 ¤t_duplex);
4908
4909 bmcr = 0;
4910 for (i = 0; i < 200; i++) {
4911 tg3_readphy(tp, MII_BMCR, &bmcr);
4912 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4913 continue;
4914 if (bmcr && bmcr != 0x7fff)
4915 break;
4916 udelay(10);
4917 }
4918
4919 lcl_adv = 0;
4920 rmt_adv = 0;
4921
4922 tp->link_config.active_speed = current_speed;
4923 tp->link_config.active_duplex = current_duplex;
4924
4925 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4926 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4927
4928 if ((bmcr & BMCR_ANENABLE) &&
4929 eee_config_ok &&
4930 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4931 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4932 current_link_up = true;
4933
4934 /* EEE settings changes take effect only after a phy
4935 * reset. If we have skipped a reset due to Link Flap
4936 * Avoidance being enabled, do it now.
4937 */
4938 if (!eee_config_ok &&
4939 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4940 !force_reset) {
4941 tg3_setup_eee(tp);
4942 tg3_phy_reset(tp);
4943 }
4944 } else {
4945 if (!(bmcr & BMCR_ANENABLE) &&
4946 tp->link_config.speed == current_speed &&
4947 tp->link_config.duplex == current_duplex) {
4948 current_link_up = true;
4949 }
4950 }
4951
4952 if (current_link_up &&
4953 tp->link_config.active_duplex == DUPLEX_FULL) {
4954 u32 reg, bit;
4955
4956 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4957 reg = MII_TG3_FET_GEN_STAT;
4958 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4959 } else {
4960 reg = MII_TG3_EXT_STAT;
4961 bit = MII_TG3_EXT_STAT_MDIX;
4962 }
4963
4964 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4965 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4966
4967 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4968 }
4969 }
4970
4971 relink:
4972 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4973 tg3_phy_copper_begin(tp);
4974
4975 if (tg3_flag(tp, ROBOSWITCH)) {
4976 current_link_up = true;
4977 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4978 current_speed = SPEED_1000;
4979 current_duplex = DUPLEX_FULL;
4980 tp->link_config.active_speed = current_speed;
4981 tp->link_config.active_duplex = current_duplex;
4982 }
4983
4984 tg3_readphy(tp, MII_BMSR, &bmsr);
4985 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4986 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4987 current_link_up = true;
4988 }
4989
4990 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4991 if (current_link_up) {
4992 if (tp->link_config.active_speed == SPEED_100 ||
4993 tp->link_config.active_speed == SPEED_10)
4994 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4995 else
4996 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4997 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4998 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4999 else
5000 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5001
5002 /* In order for the 5750 core in BCM4785 chip to work properly
5003 * in RGMII mode, the Led Control Register must be set up.
5004 */
5005 if (tg3_flag(tp, RGMII_MODE)) {
5006 u32 led_ctrl = tr32(MAC_LED_CTRL);
5007 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5008
5009 if (tp->link_config.active_speed == SPEED_10)
5010 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5011 else if (tp->link_config.active_speed == SPEED_100)
5012 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5013 LED_CTRL_100MBPS_ON);
5014 else if (tp->link_config.active_speed == SPEED_1000)
5015 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5016 LED_CTRL_1000MBPS_ON);
5017
5018 tw32(MAC_LED_CTRL, led_ctrl);
5019 udelay(40);
5020 }
5021
5022 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5023 if (tp->link_config.active_duplex == DUPLEX_HALF)
5024 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5025
5026 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5027 if (current_link_up &&
5028 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5029 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5030 else
5031 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5032 }
5033
5034 /* ??? Without this setting Netgear GA302T PHY does not
5035 * ??? send/receive packets...
5036 */
5037 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5038 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5039 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5040 tw32_f(MAC_MI_MODE, tp->mi_mode);
5041 udelay(80);
5042 }
5043
5044 tw32_f(MAC_MODE, tp->mac_mode);
5045 udelay(40);
5046
5047 tg3_phy_eee_adjust(tp, current_link_up);
5048
5049 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5050 /* Polled via timer. */
5051 tw32_f(MAC_EVENT, 0);
5052 } else {
5053 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5054 }
5055 udelay(40);
5056
5057 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5058 current_link_up &&
5059 tp->link_config.active_speed == SPEED_1000 &&
5060 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5061 udelay(120);
5062 tw32_f(MAC_STATUS,
5063 (MAC_STATUS_SYNC_CHANGED |
5064 MAC_STATUS_CFG_CHANGED));
5065 udelay(40);
5066 tg3_write_mem(tp,
5067 NIC_SRAM_FIRMWARE_MBOX,
5068 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5069 }
5070
5071 /* Prevent send BD corruption. */
5072 if (tg3_flag(tp, CLKREQ_BUG)) {
5073 if (tp->link_config.active_speed == SPEED_100 ||
5074 tp->link_config.active_speed == SPEED_10)
5075 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5076 PCI_EXP_LNKCTL_CLKREQ_EN);
5077 else
5078 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5079 PCI_EXP_LNKCTL_CLKREQ_EN);
5080 }
5081
5082 tg3_test_and_report_link_chg(tp, current_link_up);
5083
5084 return 0;
5085 }
5086
5087 struct tg3_fiber_aneginfo {
5088 int state;
5089 #define ANEG_STATE_UNKNOWN 0
5090 #define ANEG_STATE_AN_ENABLE 1
5091 #define ANEG_STATE_RESTART_INIT 2
5092 #define ANEG_STATE_RESTART 3
5093 #define ANEG_STATE_DISABLE_LINK_OK 4
5094 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5095 #define ANEG_STATE_ABILITY_DETECT 6
5096 #define ANEG_STATE_ACK_DETECT_INIT 7
5097 #define ANEG_STATE_ACK_DETECT 8
5098 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5099 #define ANEG_STATE_COMPLETE_ACK 10
5100 #define ANEG_STATE_IDLE_DETECT_INIT 11
5101 #define ANEG_STATE_IDLE_DETECT 12
5102 #define ANEG_STATE_LINK_OK 13
5103 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5104 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5105
5106 u32 flags;
5107 #define MR_AN_ENABLE 0x00000001
5108 #define MR_RESTART_AN 0x00000002
5109 #define MR_AN_COMPLETE 0x00000004
5110 #define MR_PAGE_RX 0x00000008
5111 #define MR_NP_LOADED 0x00000010
5112 #define MR_TOGGLE_TX 0x00000020
5113 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5114 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5115 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5116 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5117 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5118 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5119 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5120 #define MR_TOGGLE_RX 0x00002000
5121 #define MR_NP_RX 0x00004000
5122
5123 #define MR_LINK_OK 0x80000000
5124
5125 unsigned long link_time, cur_time;
5126
5127 u32 ability_match_cfg;
5128 int ability_match_count;
5129
5130 char ability_match, idle_match, ack_match;
5131
5132 u32 txconfig, rxconfig;
5133 #define ANEG_CFG_NP 0x00000080
5134 #define ANEG_CFG_ACK 0x00000040
5135 #define ANEG_CFG_RF2 0x00000020
5136 #define ANEG_CFG_RF1 0x00000010
5137 #define ANEG_CFG_PS2 0x00000001
5138 #define ANEG_CFG_PS1 0x00008000
5139 #define ANEG_CFG_HD 0x00004000
5140 #define ANEG_CFG_FD 0x00002000
5141 #define ANEG_CFG_INVAL 0x00001f06
5142
5143 };
5144 #define ANEG_OK 0
5145 #define ANEG_DONE 1
5146 #define ANEG_TIMER_ENAB 2
5147 #define ANEG_FAILED -1
5148
5149 #define ANEG_STATE_SETTLE_TIME 10000
5150
tg3_fiber_aneg_smachine(struct tg3 * tp,struct tg3_fiber_aneginfo * ap)5151 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5152 struct tg3_fiber_aneginfo *ap)
5153 {
5154 u16 flowctrl;
5155 unsigned long delta;
5156 u32 rx_cfg_reg;
5157 int ret;
5158
5159 if (ap->state == ANEG_STATE_UNKNOWN) {
5160 ap->rxconfig = 0;
5161 ap->link_time = 0;
5162 ap->cur_time = 0;
5163 ap->ability_match_cfg = 0;
5164 ap->ability_match_count = 0;
5165 ap->ability_match = 0;
5166 ap->idle_match = 0;
5167 ap->ack_match = 0;
5168 }
5169 ap->cur_time++;
5170
5171 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5172 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5173
5174 if (rx_cfg_reg != ap->ability_match_cfg) {
5175 ap->ability_match_cfg = rx_cfg_reg;
5176 ap->ability_match = 0;
5177 ap->ability_match_count = 0;
5178 } else {
5179 if (++ap->ability_match_count > 1) {
5180 ap->ability_match = 1;
5181 ap->ability_match_cfg = rx_cfg_reg;
5182 }
5183 }
5184 if (rx_cfg_reg & ANEG_CFG_ACK)
5185 ap->ack_match = 1;
5186 else
5187 ap->ack_match = 0;
5188
5189 ap->idle_match = 0;
5190 } else {
5191 ap->idle_match = 1;
5192 ap->ability_match_cfg = 0;
5193 ap->ability_match_count = 0;
5194 ap->ability_match = 0;
5195 ap->ack_match = 0;
5196
5197 rx_cfg_reg = 0;
5198 }
5199
5200 ap->rxconfig = rx_cfg_reg;
5201 ret = ANEG_OK;
5202
5203 switch (ap->state) {
5204 case ANEG_STATE_UNKNOWN:
5205 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5206 ap->state = ANEG_STATE_AN_ENABLE;
5207
5208 fallthrough;
5209 case ANEG_STATE_AN_ENABLE:
5210 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5211 if (ap->flags & MR_AN_ENABLE) {
5212 ap->link_time = 0;
5213 ap->cur_time = 0;
5214 ap->ability_match_cfg = 0;
5215 ap->ability_match_count = 0;
5216 ap->ability_match = 0;
5217 ap->idle_match = 0;
5218 ap->ack_match = 0;
5219
5220 ap->state = ANEG_STATE_RESTART_INIT;
5221 } else {
5222 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5223 }
5224 break;
5225
5226 case ANEG_STATE_RESTART_INIT:
5227 ap->link_time = ap->cur_time;
5228 ap->flags &= ~(MR_NP_LOADED);
5229 ap->txconfig = 0;
5230 tw32(MAC_TX_AUTO_NEG, 0);
5231 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5232 tw32_f(MAC_MODE, tp->mac_mode);
5233 udelay(40);
5234
5235 ret = ANEG_TIMER_ENAB;
5236 ap->state = ANEG_STATE_RESTART;
5237
5238 fallthrough;
5239 case ANEG_STATE_RESTART:
5240 delta = ap->cur_time - ap->link_time;
5241 if (delta > ANEG_STATE_SETTLE_TIME)
5242 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5243 else
5244 ret = ANEG_TIMER_ENAB;
5245 break;
5246
5247 case ANEG_STATE_DISABLE_LINK_OK:
5248 ret = ANEG_DONE;
5249 break;
5250
5251 case ANEG_STATE_ABILITY_DETECT_INIT:
5252 ap->flags &= ~(MR_TOGGLE_TX);
5253 ap->txconfig = ANEG_CFG_FD;
5254 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5255 if (flowctrl & ADVERTISE_1000XPAUSE)
5256 ap->txconfig |= ANEG_CFG_PS1;
5257 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5258 ap->txconfig |= ANEG_CFG_PS2;
5259 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5260 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5261 tw32_f(MAC_MODE, tp->mac_mode);
5262 udelay(40);
5263
5264 ap->state = ANEG_STATE_ABILITY_DETECT;
5265 break;
5266
5267 case ANEG_STATE_ABILITY_DETECT:
5268 if (ap->ability_match != 0 && ap->rxconfig != 0)
5269 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5270 break;
5271
5272 case ANEG_STATE_ACK_DETECT_INIT:
5273 ap->txconfig |= ANEG_CFG_ACK;
5274 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5275 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5276 tw32_f(MAC_MODE, tp->mac_mode);
5277 udelay(40);
5278
5279 ap->state = ANEG_STATE_ACK_DETECT;
5280
5281 fallthrough;
5282 case ANEG_STATE_ACK_DETECT:
5283 if (ap->ack_match != 0) {
5284 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5285 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5286 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5287 } else {
5288 ap->state = ANEG_STATE_AN_ENABLE;
5289 }
5290 } else if (ap->ability_match != 0 &&
5291 ap->rxconfig == 0) {
5292 ap->state = ANEG_STATE_AN_ENABLE;
5293 }
5294 break;
5295
5296 case ANEG_STATE_COMPLETE_ACK_INIT:
5297 if (ap->rxconfig & ANEG_CFG_INVAL) {
5298 ret = ANEG_FAILED;
5299 break;
5300 }
5301 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5302 MR_LP_ADV_HALF_DUPLEX |
5303 MR_LP_ADV_SYM_PAUSE |
5304 MR_LP_ADV_ASYM_PAUSE |
5305 MR_LP_ADV_REMOTE_FAULT1 |
5306 MR_LP_ADV_REMOTE_FAULT2 |
5307 MR_LP_ADV_NEXT_PAGE |
5308 MR_TOGGLE_RX |
5309 MR_NP_RX);
5310 if (ap->rxconfig & ANEG_CFG_FD)
5311 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5312 if (ap->rxconfig & ANEG_CFG_HD)
5313 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5314 if (ap->rxconfig & ANEG_CFG_PS1)
5315 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5316 if (ap->rxconfig & ANEG_CFG_PS2)
5317 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5318 if (ap->rxconfig & ANEG_CFG_RF1)
5319 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5320 if (ap->rxconfig & ANEG_CFG_RF2)
5321 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5322 if (ap->rxconfig & ANEG_CFG_NP)
5323 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5324
5325 ap->link_time = ap->cur_time;
5326
5327 ap->flags ^= (MR_TOGGLE_TX);
5328 if (ap->rxconfig & 0x0008)
5329 ap->flags |= MR_TOGGLE_RX;
5330 if (ap->rxconfig & ANEG_CFG_NP)
5331 ap->flags |= MR_NP_RX;
5332 ap->flags |= MR_PAGE_RX;
5333
5334 ap->state = ANEG_STATE_COMPLETE_ACK;
5335 ret = ANEG_TIMER_ENAB;
5336 break;
5337
5338 case ANEG_STATE_COMPLETE_ACK:
5339 if (ap->ability_match != 0 &&
5340 ap->rxconfig == 0) {
5341 ap->state = ANEG_STATE_AN_ENABLE;
5342 break;
5343 }
5344 delta = ap->cur_time - ap->link_time;
5345 if (delta > ANEG_STATE_SETTLE_TIME) {
5346 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5347 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5348 } else {
5349 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5350 !(ap->flags & MR_NP_RX)) {
5351 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5352 } else {
5353 ret = ANEG_FAILED;
5354 }
5355 }
5356 }
5357 break;
5358
5359 case ANEG_STATE_IDLE_DETECT_INIT:
5360 ap->link_time = ap->cur_time;
5361 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5362 tw32_f(MAC_MODE, tp->mac_mode);
5363 udelay(40);
5364
5365 ap->state = ANEG_STATE_IDLE_DETECT;
5366 ret = ANEG_TIMER_ENAB;
5367 break;
5368
5369 case ANEG_STATE_IDLE_DETECT:
5370 if (ap->ability_match != 0 &&
5371 ap->rxconfig == 0) {
5372 ap->state = ANEG_STATE_AN_ENABLE;
5373 break;
5374 }
5375 delta = ap->cur_time - ap->link_time;
5376 if (delta > ANEG_STATE_SETTLE_TIME) {
5377 /* XXX another gem from the Broadcom driver :( */
5378 ap->state = ANEG_STATE_LINK_OK;
5379 }
5380 break;
5381
5382 case ANEG_STATE_LINK_OK:
5383 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5384 ret = ANEG_DONE;
5385 break;
5386
5387 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5388 /* ??? unimplemented */
5389 break;
5390
5391 case ANEG_STATE_NEXT_PAGE_WAIT:
5392 /* ??? unimplemented */
5393 break;
5394
5395 default:
5396 ret = ANEG_FAILED;
5397 break;
5398 }
5399
5400 return ret;
5401 }
5402
fiber_autoneg(struct tg3 * tp,u32 * txflags,u32 * rxflags)5403 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5404 {
5405 int res = 0;
5406 struct tg3_fiber_aneginfo aninfo;
5407 int status = ANEG_FAILED;
5408 unsigned int tick;
5409 u32 tmp;
5410
5411 tw32_f(MAC_TX_AUTO_NEG, 0);
5412
5413 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5414 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5415 udelay(40);
5416
5417 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5418 udelay(40);
5419
5420 memset(&aninfo, 0, sizeof(aninfo));
5421 aninfo.flags |= MR_AN_ENABLE;
5422 aninfo.state = ANEG_STATE_UNKNOWN;
5423 aninfo.cur_time = 0;
5424 tick = 0;
5425 while (++tick < 195000) {
5426 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5427 if (status == ANEG_DONE || status == ANEG_FAILED)
5428 break;
5429
5430 udelay(1);
5431 }
5432
5433 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5434 tw32_f(MAC_MODE, tp->mac_mode);
5435 udelay(40);
5436
5437 *txflags = aninfo.txconfig;
5438 *rxflags = aninfo.flags;
5439
5440 if (status == ANEG_DONE &&
5441 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5442 MR_LP_ADV_FULL_DUPLEX)))
5443 res = 1;
5444
5445 return res;
5446 }
5447
tg3_init_bcm8002(struct tg3 * tp)5448 static void tg3_init_bcm8002(struct tg3 *tp)
5449 {
5450 u32 mac_status = tr32(MAC_STATUS);
5451 int i;
5452
5453 /* Reset when initting first time or we have a link. */
5454 if (tg3_flag(tp, INIT_COMPLETE) &&
5455 !(mac_status & MAC_STATUS_PCS_SYNCED))
5456 return;
5457
5458 /* Set PLL lock range. */
5459 tg3_writephy(tp, 0x16, 0x8007);
5460
5461 /* SW reset */
5462 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5463
5464 /* Wait for reset to complete. */
5465 /* XXX schedule_timeout() ... */
5466 for (i = 0; i < 500; i++)
5467 udelay(10);
5468
5469 /* Config mode; select PMA/Ch 1 regs. */
5470 tg3_writephy(tp, 0x10, 0x8411);
5471
5472 /* Enable auto-lock and comdet, select txclk for tx. */
5473 tg3_writephy(tp, 0x11, 0x0a10);
5474
5475 tg3_writephy(tp, 0x18, 0x00a0);
5476 tg3_writephy(tp, 0x16, 0x41ff);
5477
5478 /* Assert and deassert POR. */
5479 tg3_writephy(tp, 0x13, 0x0400);
5480 udelay(40);
5481 tg3_writephy(tp, 0x13, 0x0000);
5482
5483 tg3_writephy(tp, 0x11, 0x0a50);
5484 udelay(40);
5485 tg3_writephy(tp, 0x11, 0x0a10);
5486
5487 /* Wait for signal to stabilize */
5488 /* XXX schedule_timeout() ... */
5489 for (i = 0; i < 15000; i++)
5490 udelay(10);
5491
5492 /* Deselect the channel register so we can read the PHYID
5493 * later.
5494 */
5495 tg3_writephy(tp, 0x10, 0x8011);
5496 }
5497
tg3_setup_fiber_hw_autoneg(struct tg3 * tp,u32 mac_status)5498 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5499 {
5500 u16 flowctrl;
5501 bool current_link_up;
5502 u32 sg_dig_ctrl, sg_dig_status;
5503 u32 serdes_cfg, expected_sg_dig_ctrl;
5504 int workaround, port_a;
5505
5506 serdes_cfg = 0;
5507 workaround = 0;
5508 port_a = 1;
5509 current_link_up = false;
5510
5511 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5512 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5513 workaround = 1;
5514 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5515 port_a = 0;
5516
5517 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5518 /* preserve bits 20-23 for voltage regulator */
5519 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5520 }
5521
5522 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5523
5524 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5525 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5526 if (workaround) {
5527 u32 val = serdes_cfg;
5528
5529 if (port_a)
5530 val |= 0xc010000;
5531 else
5532 val |= 0x4010000;
5533 tw32_f(MAC_SERDES_CFG, val);
5534 }
5535
5536 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5537 }
5538 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5539 tg3_setup_flow_control(tp, 0, 0);
5540 current_link_up = true;
5541 }
5542 goto out;
5543 }
5544
5545 /* Want auto-negotiation. */
5546 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5547
5548 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5549 if (flowctrl & ADVERTISE_1000XPAUSE)
5550 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5551 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5552 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5553
5554 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5555 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5556 tp->serdes_counter &&
5557 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5558 MAC_STATUS_RCVD_CFG)) ==
5559 MAC_STATUS_PCS_SYNCED)) {
5560 tp->serdes_counter--;
5561 current_link_up = true;
5562 goto out;
5563 }
5564 restart_autoneg:
5565 if (workaround)
5566 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5567 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5568 udelay(5);
5569 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5570
5571 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5572 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5573 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5574 MAC_STATUS_SIGNAL_DET)) {
5575 sg_dig_status = tr32(SG_DIG_STATUS);
5576 mac_status = tr32(MAC_STATUS);
5577
5578 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5579 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5580 u32 local_adv = 0, remote_adv = 0;
5581
5582 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5583 local_adv |= ADVERTISE_1000XPAUSE;
5584 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5585 local_adv |= ADVERTISE_1000XPSE_ASYM;
5586
5587 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5588 remote_adv |= LPA_1000XPAUSE;
5589 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5590 remote_adv |= LPA_1000XPAUSE_ASYM;
5591
5592 tp->link_config.rmt_adv =
5593 mii_adv_to_ethtool_adv_x(remote_adv);
5594
5595 tg3_setup_flow_control(tp, local_adv, remote_adv);
5596 current_link_up = true;
5597 tp->serdes_counter = 0;
5598 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5599 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5600 if (tp->serdes_counter)
5601 tp->serdes_counter--;
5602 else {
5603 if (workaround) {
5604 u32 val = serdes_cfg;
5605
5606 if (port_a)
5607 val |= 0xc010000;
5608 else
5609 val |= 0x4010000;
5610
5611 tw32_f(MAC_SERDES_CFG, val);
5612 }
5613
5614 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5615 udelay(40);
5616
5617 /* Link parallel detection - link is up */
5618 /* only if we have PCS_SYNC and not */
5619 /* receiving config code words */
5620 mac_status = tr32(MAC_STATUS);
5621 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5622 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5623 tg3_setup_flow_control(tp, 0, 0);
5624 current_link_up = true;
5625 tp->phy_flags |=
5626 TG3_PHYFLG_PARALLEL_DETECT;
5627 tp->serdes_counter =
5628 SERDES_PARALLEL_DET_TIMEOUT;
5629 } else
5630 goto restart_autoneg;
5631 }
5632 }
5633 } else {
5634 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5635 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5636 }
5637
5638 out:
5639 return current_link_up;
5640 }
5641
tg3_setup_fiber_by_hand(struct tg3 * tp,u32 mac_status)5642 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5643 {
5644 bool current_link_up = false;
5645
5646 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5647 goto out;
5648
5649 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5650 u32 txflags, rxflags;
5651 int i;
5652
5653 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5654 u32 local_adv = 0, remote_adv = 0;
5655
5656 if (txflags & ANEG_CFG_PS1)
5657 local_adv |= ADVERTISE_1000XPAUSE;
5658 if (txflags & ANEG_CFG_PS2)
5659 local_adv |= ADVERTISE_1000XPSE_ASYM;
5660
5661 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5662 remote_adv |= LPA_1000XPAUSE;
5663 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5664 remote_adv |= LPA_1000XPAUSE_ASYM;
5665
5666 tp->link_config.rmt_adv =
5667 mii_adv_to_ethtool_adv_x(remote_adv);
5668
5669 tg3_setup_flow_control(tp, local_adv, remote_adv);
5670
5671 current_link_up = true;
5672 }
5673 for (i = 0; i < 30; i++) {
5674 udelay(20);
5675 tw32_f(MAC_STATUS,
5676 (MAC_STATUS_SYNC_CHANGED |
5677 MAC_STATUS_CFG_CHANGED));
5678 udelay(40);
5679 if ((tr32(MAC_STATUS) &
5680 (MAC_STATUS_SYNC_CHANGED |
5681 MAC_STATUS_CFG_CHANGED)) == 0)
5682 break;
5683 }
5684
5685 mac_status = tr32(MAC_STATUS);
5686 if (!current_link_up &&
5687 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5688 !(mac_status & MAC_STATUS_RCVD_CFG))
5689 current_link_up = true;
5690 } else {
5691 tg3_setup_flow_control(tp, 0, 0);
5692
5693 /* Forcing 1000FD link up. */
5694 current_link_up = true;
5695
5696 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5697 udelay(40);
5698
5699 tw32_f(MAC_MODE, tp->mac_mode);
5700 udelay(40);
5701 }
5702
5703 out:
5704 return current_link_up;
5705 }
5706
tg3_setup_fiber_phy(struct tg3 * tp,bool force_reset)5707 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5708 {
5709 u32 orig_pause_cfg;
5710 u32 orig_active_speed;
5711 u8 orig_active_duplex;
5712 u32 mac_status;
5713 bool current_link_up;
5714 int i;
5715
5716 orig_pause_cfg = tp->link_config.active_flowctrl;
5717 orig_active_speed = tp->link_config.active_speed;
5718 orig_active_duplex = tp->link_config.active_duplex;
5719
5720 if (!tg3_flag(tp, HW_AUTONEG) &&
5721 tp->link_up &&
5722 tg3_flag(tp, INIT_COMPLETE)) {
5723 mac_status = tr32(MAC_STATUS);
5724 mac_status &= (MAC_STATUS_PCS_SYNCED |
5725 MAC_STATUS_SIGNAL_DET |
5726 MAC_STATUS_CFG_CHANGED |
5727 MAC_STATUS_RCVD_CFG);
5728 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5729 MAC_STATUS_SIGNAL_DET)) {
5730 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5731 MAC_STATUS_CFG_CHANGED));
5732 return 0;
5733 }
5734 }
5735
5736 tw32_f(MAC_TX_AUTO_NEG, 0);
5737
5738 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5739 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5740 tw32_f(MAC_MODE, tp->mac_mode);
5741 udelay(40);
5742
5743 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5744 tg3_init_bcm8002(tp);
5745
5746 /* Enable link change event even when serdes polling. */
5747 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5748 udelay(40);
5749
5750 tp->link_config.rmt_adv = 0;
5751 mac_status = tr32(MAC_STATUS);
5752
5753 if (tg3_flag(tp, HW_AUTONEG))
5754 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5755 else
5756 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5757
5758 tp->napi[0].hw_status->status =
5759 (SD_STATUS_UPDATED |
5760 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5761
5762 for (i = 0; i < 100; i++) {
5763 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5764 MAC_STATUS_CFG_CHANGED));
5765 udelay(5);
5766 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5767 MAC_STATUS_CFG_CHANGED |
5768 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5769 break;
5770 }
5771
5772 mac_status = tr32(MAC_STATUS);
5773 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5774 current_link_up = false;
5775 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5776 tp->serdes_counter == 0) {
5777 tw32_f(MAC_MODE, (tp->mac_mode |
5778 MAC_MODE_SEND_CONFIGS));
5779 udelay(1);
5780 tw32_f(MAC_MODE, tp->mac_mode);
5781 }
5782 }
5783
5784 if (current_link_up) {
5785 tp->link_config.active_speed = SPEED_1000;
5786 tp->link_config.active_duplex = DUPLEX_FULL;
5787 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5788 LED_CTRL_LNKLED_OVERRIDE |
5789 LED_CTRL_1000MBPS_ON));
5790 } else {
5791 tp->link_config.active_speed = SPEED_UNKNOWN;
5792 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5793 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5794 LED_CTRL_LNKLED_OVERRIDE |
5795 LED_CTRL_TRAFFIC_OVERRIDE));
5796 }
5797
5798 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5799 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5800 if (orig_pause_cfg != now_pause_cfg ||
5801 orig_active_speed != tp->link_config.active_speed ||
5802 orig_active_duplex != tp->link_config.active_duplex)
5803 tg3_link_report(tp);
5804 }
5805
5806 return 0;
5807 }
5808
tg3_setup_fiber_mii_phy(struct tg3 * tp,bool force_reset)5809 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5810 {
5811 int err = 0;
5812 u32 bmsr, bmcr;
5813 u32 current_speed = SPEED_UNKNOWN;
5814 u8 current_duplex = DUPLEX_UNKNOWN;
5815 bool current_link_up = false;
5816 u32 local_adv, remote_adv, sgsr;
5817
5818 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5819 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5820 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5821 (sgsr & SERDES_TG3_SGMII_MODE)) {
5822
5823 if (force_reset)
5824 tg3_phy_reset(tp);
5825
5826 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5827
5828 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5829 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5830 } else {
5831 current_link_up = true;
5832 if (sgsr & SERDES_TG3_SPEED_1000) {
5833 current_speed = SPEED_1000;
5834 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5835 } else if (sgsr & SERDES_TG3_SPEED_100) {
5836 current_speed = SPEED_100;
5837 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5838 } else {
5839 current_speed = SPEED_10;
5840 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5841 }
5842
5843 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5844 current_duplex = DUPLEX_FULL;
5845 else
5846 current_duplex = DUPLEX_HALF;
5847 }
5848
5849 tw32_f(MAC_MODE, tp->mac_mode);
5850 udelay(40);
5851
5852 tg3_clear_mac_status(tp);
5853
5854 goto fiber_setup_done;
5855 }
5856
5857 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5858 tw32_f(MAC_MODE, tp->mac_mode);
5859 udelay(40);
5860
5861 tg3_clear_mac_status(tp);
5862
5863 if (force_reset)
5864 tg3_phy_reset(tp);
5865
5866 tp->link_config.rmt_adv = 0;
5867
5868 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5869 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5870 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5871 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5872 bmsr |= BMSR_LSTATUS;
5873 else
5874 bmsr &= ~BMSR_LSTATUS;
5875 }
5876
5877 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5878
5879 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5880 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5881 /* do nothing, just check for link up at the end */
5882 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5883 u32 adv, newadv;
5884
5885 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5886 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5887 ADVERTISE_1000XPAUSE |
5888 ADVERTISE_1000XPSE_ASYM |
5889 ADVERTISE_SLCT);
5890
5891 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5892 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5893
5894 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5895 tg3_writephy(tp, MII_ADVERTISE, newadv);
5896 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5897 tg3_writephy(tp, MII_BMCR, bmcr);
5898
5899 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5900 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5901 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5902
5903 return err;
5904 }
5905 } else {
5906 u32 new_bmcr;
5907
5908 bmcr &= ~BMCR_SPEED1000;
5909 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5910
5911 if (tp->link_config.duplex == DUPLEX_FULL)
5912 new_bmcr |= BMCR_FULLDPLX;
5913
5914 if (new_bmcr != bmcr) {
5915 /* BMCR_SPEED1000 is a reserved bit that needs
5916 * to be set on write.
5917 */
5918 new_bmcr |= BMCR_SPEED1000;
5919
5920 /* Force a linkdown */
5921 if (tp->link_up) {
5922 u32 adv;
5923
5924 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5925 adv &= ~(ADVERTISE_1000XFULL |
5926 ADVERTISE_1000XHALF |
5927 ADVERTISE_SLCT);
5928 tg3_writephy(tp, MII_ADVERTISE, adv);
5929 tg3_writephy(tp, MII_BMCR, bmcr |
5930 BMCR_ANRESTART |
5931 BMCR_ANENABLE);
5932 udelay(10);
5933 tg3_carrier_off(tp);
5934 }
5935 tg3_writephy(tp, MII_BMCR, new_bmcr);
5936 bmcr = new_bmcr;
5937 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5938 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5939 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5940 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5941 bmsr |= BMSR_LSTATUS;
5942 else
5943 bmsr &= ~BMSR_LSTATUS;
5944 }
5945 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5946 }
5947 }
5948
5949 if (bmsr & BMSR_LSTATUS) {
5950 current_speed = SPEED_1000;
5951 current_link_up = true;
5952 if (bmcr & BMCR_FULLDPLX)
5953 current_duplex = DUPLEX_FULL;
5954 else
5955 current_duplex = DUPLEX_HALF;
5956
5957 local_adv = 0;
5958 remote_adv = 0;
5959
5960 if (bmcr & BMCR_ANENABLE) {
5961 u32 common;
5962
5963 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5964 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5965 common = local_adv & remote_adv;
5966 if (common & (ADVERTISE_1000XHALF |
5967 ADVERTISE_1000XFULL)) {
5968 if (common & ADVERTISE_1000XFULL)
5969 current_duplex = DUPLEX_FULL;
5970 else
5971 current_duplex = DUPLEX_HALF;
5972
5973 tp->link_config.rmt_adv =
5974 mii_adv_to_ethtool_adv_x(remote_adv);
5975 } else if (!tg3_flag(tp, 5780_CLASS)) {
5976 /* Link is up via parallel detect */
5977 } else {
5978 current_link_up = false;
5979 }
5980 }
5981 }
5982
5983 fiber_setup_done:
5984 if (current_link_up && current_duplex == DUPLEX_FULL)
5985 tg3_setup_flow_control(tp, local_adv, remote_adv);
5986
5987 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5988 if (tp->link_config.active_duplex == DUPLEX_HALF)
5989 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5990
5991 tw32_f(MAC_MODE, tp->mac_mode);
5992 udelay(40);
5993
5994 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5995
5996 tp->link_config.active_speed = current_speed;
5997 tp->link_config.active_duplex = current_duplex;
5998
5999 tg3_test_and_report_link_chg(tp, current_link_up);
6000 return err;
6001 }
6002
tg3_serdes_parallel_detect(struct tg3 * tp)6003 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6004 {
6005 if (tp->serdes_counter) {
6006 /* Give autoneg time to complete. */
6007 tp->serdes_counter--;
6008 return;
6009 }
6010
6011 if (!tp->link_up &&
6012 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6013 u32 bmcr;
6014
6015 tg3_readphy(tp, MII_BMCR, &bmcr);
6016 if (bmcr & BMCR_ANENABLE) {
6017 u32 phy1, phy2;
6018
6019 /* Select shadow register 0x1f */
6020 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6021 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6022
6023 /* Select expansion interrupt status register */
6024 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6025 MII_TG3_DSP_EXP1_INT_STAT);
6026 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6027 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6028
6029 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6030 /* We have signal detect and not receiving
6031 * config code words, link is up by parallel
6032 * detection.
6033 */
6034
6035 bmcr &= ~BMCR_ANENABLE;
6036 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6037 tg3_writephy(tp, MII_BMCR, bmcr);
6038 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6039 }
6040 }
6041 } else if (tp->link_up &&
6042 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6043 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6044 u32 phy2;
6045
6046 /* Select expansion interrupt status register */
6047 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6048 MII_TG3_DSP_EXP1_INT_STAT);
6049 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6050 if (phy2 & 0x20) {
6051 u32 bmcr;
6052
6053 /* Config code words received, turn on autoneg. */
6054 tg3_readphy(tp, MII_BMCR, &bmcr);
6055 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6056
6057 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6058
6059 }
6060 }
6061 }
6062
tg3_setup_phy(struct tg3 * tp,bool force_reset)6063 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6064 {
6065 u32 val;
6066 int err;
6067
6068 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6069 err = tg3_setup_fiber_phy(tp, force_reset);
6070 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6071 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6072 else
6073 err = tg3_setup_copper_phy(tp, force_reset);
6074
6075 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6076 u32 scale;
6077
6078 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6079 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6080 scale = 65;
6081 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6082 scale = 6;
6083 else
6084 scale = 12;
6085
6086 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6087 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6088 tw32(GRC_MISC_CFG, val);
6089 }
6090
6091 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6092 (6 << TX_LENGTHS_IPG_SHIFT);
6093 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6094 tg3_asic_rev(tp) == ASIC_REV_5762)
6095 val |= tr32(MAC_TX_LENGTHS) &
6096 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6097 TX_LENGTHS_CNT_DWN_VAL_MSK);
6098
6099 if (tp->link_config.active_speed == SPEED_1000 &&
6100 tp->link_config.active_duplex == DUPLEX_HALF)
6101 tw32(MAC_TX_LENGTHS, val |
6102 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6103 else
6104 tw32(MAC_TX_LENGTHS, val |
6105 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6106
6107 if (!tg3_flag(tp, 5705_PLUS)) {
6108 if (tp->link_up) {
6109 tw32(HOSTCC_STAT_COAL_TICKS,
6110 tp->coal.stats_block_coalesce_usecs);
6111 } else {
6112 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6113 }
6114 }
6115
6116 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6117 val = tr32(PCIE_PWR_MGMT_THRESH);
6118 if (!tp->link_up)
6119 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6120 tp->pwrmgmt_thresh;
6121 else
6122 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6123 tw32(PCIE_PWR_MGMT_THRESH, val);
6124 }
6125
6126 return err;
6127 }
6128
6129 /* tp->lock must be held */
tg3_refclk_read(struct tg3 * tp,struct ptp_system_timestamp * sts)6130 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6131 {
6132 u64 stamp;
6133
6134 ptp_read_system_prets(sts);
6135 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6136 ptp_read_system_postts(sts);
6137 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6138
6139 return stamp;
6140 }
6141
6142 /* tp->lock must be held */
tg3_refclk_write(struct tg3 * tp,u64 newval)6143 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6144 {
6145 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6146
6147 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6148 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6149 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6150 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6151 }
6152
6153 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6154 static inline void tg3_full_unlock(struct tg3 *tp);
tg3_get_ts_info(struct net_device * dev,struct ethtool_ts_info * info)6155 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6156 {
6157 struct tg3 *tp = netdev_priv(dev);
6158
6159 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6160 SOF_TIMESTAMPING_RX_SOFTWARE |
6161 SOF_TIMESTAMPING_SOFTWARE;
6162
6163 if (tg3_flag(tp, PTP_CAPABLE)) {
6164 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6165 SOF_TIMESTAMPING_RX_HARDWARE |
6166 SOF_TIMESTAMPING_RAW_HARDWARE;
6167 }
6168
6169 if (tp->ptp_clock)
6170 info->phc_index = ptp_clock_index(tp->ptp_clock);
6171 else
6172 info->phc_index = -1;
6173
6174 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6175
6176 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6177 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6178 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6179 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6180 return 0;
6181 }
6182
tg3_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)6183 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6184 {
6185 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6186 u64 correction;
6187 bool neg_adj;
6188
6189 /* Frequency adjustment is performed using hardware with a 24 bit
6190 * accumulator and a programmable correction value. On each clk, the
6191 * correction value gets added to the accumulator and when it
6192 * overflows, the time counter is incremented/decremented.
6193 */
6194 neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6195
6196 tg3_full_lock(tp, 0);
6197
6198 if (correction)
6199 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6200 TG3_EAV_REF_CLK_CORRECT_EN |
6201 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6202 ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6203 else
6204 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6205
6206 tg3_full_unlock(tp);
6207
6208 return 0;
6209 }
6210
tg3_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)6211 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6212 {
6213 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6214
6215 tg3_full_lock(tp, 0);
6216 tp->ptp_adjust += delta;
6217 tg3_full_unlock(tp);
6218
6219 return 0;
6220 }
6221
tg3_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)6222 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6223 struct ptp_system_timestamp *sts)
6224 {
6225 u64 ns;
6226 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6227
6228 tg3_full_lock(tp, 0);
6229 ns = tg3_refclk_read(tp, sts);
6230 ns += tp->ptp_adjust;
6231 tg3_full_unlock(tp);
6232
6233 *ts = ns_to_timespec64(ns);
6234
6235 return 0;
6236 }
6237
tg3_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)6238 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6239 const struct timespec64 *ts)
6240 {
6241 u64 ns;
6242 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6243
6244 ns = timespec64_to_ns(ts);
6245
6246 tg3_full_lock(tp, 0);
6247 tg3_refclk_write(tp, ns);
6248 tp->ptp_adjust = 0;
6249 tg3_full_unlock(tp);
6250
6251 return 0;
6252 }
6253
tg3_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)6254 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6255 struct ptp_clock_request *rq, int on)
6256 {
6257 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6258 u32 clock_ctl;
6259 int rval = 0;
6260
6261 switch (rq->type) {
6262 case PTP_CLK_REQ_PEROUT:
6263 /* Reject requests with unsupported flags */
6264 if (rq->perout.flags)
6265 return -EOPNOTSUPP;
6266
6267 if (rq->perout.index != 0)
6268 return -EINVAL;
6269
6270 tg3_full_lock(tp, 0);
6271 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6272 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6273
6274 if (on) {
6275 u64 nsec;
6276
6277 nsec = rq->perout.start.sec * 1000000000ULL +
6278 rq->perout.start.nsec;
6279
6280 if (rq->perout.period.sec || rq->perout.period.nsec) {
6281 netdev_warn(tp->dev,
6282 "Device supports only a one-shot timesync output, period must be 0\n");
6283 rval = -EINVAL;
6284 goto err_out;
6285 }
6286
6287 if (nsec & (1ULL << 63)) {
6288 netdev_warn(tp->dev,
6289 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6290 rval = -EINVAL;
6291 goto err_out;
6292 }
6293
6294 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6295 tw32(TG3_EAV_WATCHDOG0_MSB,
6296 TG3_EAV_WATCHDOG0_EN |
6297 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6298
6299 tw32(TG3_EAV_REF_CLCK_CTL,
6300 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6301 } else {
6302 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6303 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6304 }
6305
6306 err_out:
6307 tg3_full_unlock(tp);
6308 return rval;
6309
6310 default:
6311 break;
6312 }
6313
6314 return -EOPNOTSUPP;
6315 }
6316
6317 static const struct ptp_clock_info tg3_ptp_caps = {
6318 .owner = THIS_MODULE,
6319 .name = "tg3 clock",
6320 .max_adj = 250000000,
6321 .n_alarm = 0,
6322 .n_ext_ts = 0,
6323 .n_per_out = 1,
6324 .n_pins = 0,
6325 .pps = 0,
6326 .adjfine = tg3_ptp_adjfine,
6327 .adjtime = tg3_ptp_adjtime,
6328 .gettimex64 = tg3_ptp_gettimex,
6329 .settime64 = tg3_ptp_settime,
6330 .enable = tg3_ptp_enable,
6331 };
6332
tg3_hwclock_to_timestamp(struct tg3 * tp,u64 hwclock,struct skb_shared_hwtstamps * timestamp)6333 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6334 struct skb_shared_hwtstamps *timestamp)
6335 {
6336 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6337 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6338 tp->ptp_adjust);
6339 }
6340
6341 /* tp->lock must be held */
tg3_ptp_init(struct tg3 * tp)6342 static void tg3_ptp_init(struct tg3 *tp)
6343 {
6344 if (!tg3_flag(tp, PTP_CAPABLE))
6345 return;
6346
6347 /* Initialize the hardware clock to the system time. */
6348 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6349 tp->ptp_adjust = 0;
6350 tp->ptp_info = tg3_ptp_caps;
6351 }
6352
6353 /* tp->lock must be held */
tg3_ptp_resume(struct tg3 * tp)6354 static void tg3_ptp_resume(struct tg3 *tp)
6355 {
6356 if (!tg3_flag(tp, PTP_CAPABLE))
6357 return;
6358
6359 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6360 tp->ptp_adjust = 0;
6361 }
6362
tg3_ptp_fini(struct tg3 * tp)6363 static void tg3_ptp_fini(struct tg3 *tp)
6364 {
6365 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6366 return;
6367
6368 ptp_clock_unregister(tp->ptp_clock);
6369 tp->ptp_clock = NULL;
6370 tp->ptp_adjust = 0;
6371 }
6372
tg3_irq_sync(struct tg3 * tp)6373 static inline int tg3_irq_sync(struct tg3 *tp)
6374 {
6375 return tp->irq_sync;
6376 }
6377
tg3_rd32_loop(struct tg3 * tp,u32 * dst,u32 off,u32 len)6378 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6379 {
6380 int i;
6381
6382 dst = (u32 *)((u8 *)dst + off);
6383 for (i = 0; i < len; i += sizeof(u32))
6384 *dst++ = tr32(off + i);
6385 }
6386
tg3_dump_legacy_regs(struct tg3 * tp,u32 * regs)6387 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6388 {
6389 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6390 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6391 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6392 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6393 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6394 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6395 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6396 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6397 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6398 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6399 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6400 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6401 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6402 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6403 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6404 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6405 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6406 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6407 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6408
6409 if (tg3_flag(tp, SUPPORT_MSIX))
6410 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6411
6412 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6413 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6414 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6415 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6416 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6417 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6418 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6419 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6420
6421 if (!tg3_flag(tp, 5705_PLUS)) {
6422 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6423 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6424 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6425 }
6426
6427 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6428 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6429 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6430 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6431 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6432
6433 if (tg3_flag(tp, NVRAM))
6434 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6435 }
6436
tg3_dump_state(struct tg3 * tp)6437 static void tg3_dump_state(struct tg3 *tp)
6438 {
6439 int i;
6440 u32 *regs;
6441
6442 /* If it is a PCI error, all registers will be 0xffff,
6443 * we don't dump them out, just report the error and return
6444 */
6445 if (tp->pdev->error_state != pci_channel_io_normal) {
6446 netdev_err(tp->dev, "PCI channel ERROR!\n");
6447 return;
6448 }
6449
6450 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6451 if (!regs)
6452 return;
6453
6454 if (tg3_flag(tp, PCI_EXPRESS)) {
6455 /* Read up to but not including private PCI registers */
6456 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6457 regs[i / sizeof(u32)] = tr32(i);
6458 } else
6459 tg3_dump_legacy_regs(tp, regs);
6460
6461 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6462 if (!regs[i + 0] && !regs[i + 1] &&
6463 !regs[i + 2] && !regs[i + 3])
6464 continue;
6465
6466 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6467 i * 4,
6468 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6469 }
6470
6471 kfree(regs);
6472
6473 for (i = 0; i < tp->irq_cnt; i++) {
6474 struct tg3_napi *tnapi = &tp->napi[i];
6475
6476 /* SW status block */
6477 netdev_err(tp->dev,
6478 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6479 i,
6480 tnapi->hw_status->status,
6481 tnapi->hw_status->status_tag,
6482 tnapi->hw_status->rx_jumbo_consumer,
6483 tnapi->hw_status->rx_consumer,
6484 tnapi->hw_status->rx_mini_consumer,
6485 tnapi->hw_status->idx[0].rx_producer,
6486 tnapi->hw_status->idx[0].tx_consumer);
6487
6488 netdev_err(tp->dev,
6489 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6490 i,
6491 tnapi->last_tag, tnapi->last_irq_tag,
6492 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6493 tnapi->rx_rcb_ptr,
6494 tnapi->prodring.rx_std_prod_idx,
6495 tnapi->prodring.rx_std_cons_idx,
6496 tnapi->prodring.rx_jmb_prod_idx,
6497 tnapi->prodring.rx_jmb_cons_idx);
6498 }
6499 }
6500
6501 /* This is called whenever we suspect that the system chipset is re-
6502 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6503 * is bogus tx completions. We try to recover by setting the
6504 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6505 * in the workqueue.
6506 */
tg3_tx_recover(struct tg3 * tp)6507 static void tg3_tx_recover(struct tg3 *tp)
6508 {
6509 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6510 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6511
6512 netdev_warn(tp->dev,
6513 "The system may be re-ordering memory-mapped I/O "
6514 "cycles to the network device, attempting to recover. "
6515 "Please report the problem to the driver maintainer "
6516 "and include system chipset information.\n");
6517
6518 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6519 }
6520
tg3_tx_avail(struct tg3_napi * tnapi)6521 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6522 {
6523 /* Tell compiler to fetch tx indices from memory. */
6524 barrier();
6525 return tnapi->tx_pending -
6526 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6527 }
6528
6529 /* Tigon3 never reports partial packet sends. So we do not
6530 * need special logic to handle SKBs that have not had all
6531 * of their frags sent yet, like SunGEM does.
6532 */
tg3_tx(struct tg3_napi * tnapi)6533 static void tg3_tx(struct tg3_napi *tnapi)
6534 {
6535 struct tg3 *tp = tnapi->tp;
6536 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6537 u32 sw_idx = tnapi->tx_cons;
6538 struct netdev_queue *txq;
6539 int index = tnapi - tp->napi;
6540 unsigned int pkts_compl = 0, bytes_compl = 0;
6541
6542 if (tg3_flag(tp, ENABLE_TSS))
6543 index--;
6544
6545 txq = netdev_get_tx_queue(tp->dev, index);
6546
6547 while (sw_idx != hw_idx) {
6548 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6549 struct sk_buff *skb = ri->skb;
6550 int i, tx_bug = 0;
6551
6552 if (unlikely(skb == NULL)) {
6553 tg3_tx_recover(tp);
6554 return;
6555 }
6556
6557 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6558 struct skb_shared_hwtstamps timestamp;
6559 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6560 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6561
6562 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6563
6564 skb_tstamp_tx(skb, ×tamp);
6565 }
6566
6567 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6568 skb_headlen(skb), DMA_TO_DEVICE);
6569
6570 ri->skb = NULL;
6571
6572 while (ri->fragmented) {
6573 ri->fragmented = false;
6574 sw_idx = NEXT_TX(sw_idx);
6575 ri = &tnapi->tx_buffers[sw_idx];
6576 }
6577
6578 sw_idx = NEXT_TX(sw_idx);
6579
6580 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6581 ri = &tnapi->tx_buffers[sw_idx];
6582 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6583 tx_bug = 1;
6584
6585 dma_unmap_page(&tp->pdev->dev,
6586 dma_unmap_addr(ri, mapping),
6587 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6588 DMA_TO_DEVICE);
6589
6590 while (ri->fragmented) {
6591 ri->fragmented = false;
6592 sw_idx = NEXT_TX(sw_idx);
6593 ri = &tnapi->tx_buffers[sw_idx];
6594 }
6595
6596 sw_idx = NEXT_TX(sw_idx);
6597 }
6598
6599 pkts_compl++;
6600 bytes_compl += skb->len;
6601
6602 dev_consume_skb_any(skb);
6603
6604 if (unlikely(tx_bug)) {
6605 tg3_tx_recover(tp);
6606 return;
6607 }
6608 }
6609
6610 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6611
6612 tnapi->tx_cons = sw_idx;
6613
6614 /* Need to make the tx_cons update visible to tg3_start_xmit()
6615 * before checking for netif_queue_stopped(). Without the
6616 * memory barrier, there is a small possibility that tg3_start_xmit()
6617 * will miss it and cause the queue to be stopped forever.
6618 */
6619 smp_mb();
6620
6621 if (unlikely(netif_tx_queue_stopped(txq) &&
6622 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6623 __netif_tx_lock(txq, smp_processor_id());
6624 if (netif_tx_queue_stopped(txq) &&
6625 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6626 netif_tx_wake_queue(txq);
6627 __netif_tx_unlock(txq);
6628 }
6629 }
6630
tg3_frag_free(bool is_frag,void * data)6631 static void tg3_frag_free(bool is_frag, void *data)
6632 {
6633 if (is_frag)
6634 skb_free_frag(data);
6635 else
6636 kfree(data);
6637 }
6638
tg3_rx_data_free(struct tg3 * tp,struct ring_info * ri,u32 map_sz)6639 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6640 {
6641 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6642 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6643
6644 if (!ri->data)
6645 return;
6646
6647 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6648 DMA_FROM_DEVICE);
6649 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6650 ri->data = NULL;
6651 }
6652
6653
6654 /* Returns size of skb allocated or < 0 on error.
6655 *
6656 * We only need to fill in the address because the other members
6657 * of the RX descriptor are invariant, see tg3_init_rings.
6658 *
6659 * Note the purposeful assymetry of cpu vs. chip accesses. For
6660 * posting buffers we only dirty the first cache line of the RX
6661 * descriptor (containing the address). Whereas for the RX status
6662 * buffers the cpu only reads the last cacheline of the RX descriptor
6663 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6664 */
tg3_alloc_rx_data(struct tg3 * tp,struct tg3_rx_prodring_set * tpr,u32 opaque_key,u32 dest_idx_unmasked,unsigned int * frag_size)6665 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6666 u32 opaque_key, u32 dest_idx_unmasked,
6667 unsigned int *frag_size)
6668 {
6669 struct tg3_rx_buffer_desc *desc;
6670 struct ring_info *map;
6671 u8 *data;
6672 dma_addr_t mapping;
6673 int skb_size, data_size, dest_idx;
6674
6675 switch (opaque_key) {
6676 case RXD_OPAQUE_RING_STD:
6677 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6678 desc = &tpr->rx_std[dest_idx];
6679 map = &tpr->rx_std_buffers[dest_idx];
6680 data_size = tp->rx_pkt_map_sz;
6681 break;
6682
6683 case RXD_OPAQUE_RING_JUMBO:
6684 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6685 desc = &tpr->rx_jmb[dest_idx].std;
6686 map = &tpr->rx_jmb_buffers[dest_idx];
6687 data_size = TG3_RX_JMB_MAP_SZ;
6688 break;
6689
6690 default:
6691 return -EINVAL;
6692 }
6693
6694 /* Do not overwrite any of the map or rp information
6695 * until we are sure we can commit to a new buffer.
6696 *
6697 * Callers depend upon this behavior and assume that
6698 * we leave everything unchanged if we fail.
6699 */
6700 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6701 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6702 if (skb_size <= PAGE_SIZE) {
6703 data = napi_alloc_frag(skb_size);
6704 *frag_size = skb_size;
6705 } else {
6706 data = kmalloc(skb_size, GFP_ATOMIC);
6707 *frag_size = 0;
6708 }
6709 if (!data)
6710 return -ENOMEM;
6711
6712 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6713 data_size, DMA_FROM_DEVICE);
6714 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6715 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6716 return -EIO;
6717 }
6718
6719 map->data = data;
6720 dma_unmap_addr_set(map, mapping, mapping);
6721
6722 desc->addr_hi = ((u64)mapping >> 32);
6723 desc->addr_lo = ((u64)mapping & 0xffffffff);
6724
6725 return data_size;
6726 }
6727
6728 /* We only need to move over in the address because the other
6729 * members of the RX descriptor are invariant. See notes above
6730 * tg3_alloc_rx_data for full details.
6731 */
tg3_recycle_rx(struct tg3_napi * tnapi,struct tg3_rx_prodring_set * dpr,u32 opaque_key,int src_idx,u32 dest_idx_unmasked)6732 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6733 struct tg3_rx_prodring_set *dpr,
6734 u32 opaque_key, int src_idx,
6735 u32 dest_idx_unmasked)
6736 {
6737 struct tg3 *tp = tnapi->tp;
6738 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6739 struct ring_info *src_map, *dest_map;
6740 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6741 int dest_idx;
6742
6743 switch (opaque_key) {
6744 case RXD_OPAQUE_RING_STD:
6745 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6746 dest_desc = &dpr->rx_std[dest_idx];
6747 dest_map = &dpr->rx_std_buffers[dest_idx];
6748 src_desc = &spr->rx_std[src_idx];
6749 src_map = &spr->rx_std_buffers[src_idx];
6750 break;
6751
6752 case RXD_OPAQUE_RING_JUMBO:
6753 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6754 dest_desc = &dpr->rx_jmb[dest_idx].std;
6755 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6756 src_desc = &spr->rx_jmb[src_idx].std;
6757 src_map = &spr->rx_jmb_buffers[src_idx];
6758 break;
6759
6760 default:
6761 return;
6762 }
6763
6764 dest_map->data = src_map->data;
6765 dma_unmap_addr_set(dest_map, mapping,
6766 dma_unmap_addr(src_map, mapping));
6767 dest_desc->addr_hi = src_desc->addr_hi;
6768 dest_desc->addr_lo = src_desc->addr_lo;
6769
6770 /* Ensure that the update to the skb happens after the physical
6771 * addresses have been transferred to the new BD location.
6772 */
6773 smp_wmb();
6774
6775 src_map->data = NULL;
6776 }
6777
6778 /* The RX ring scheme is composed of multiple rings which post fresh
6779 * buffers to the chip, and one special ring the chip uses to report
6780 * status back to the host.
6781 *
6782 * The special ring reports the status of received packets to the
6783 * host. The chip does not write into the original descriptor the
6784 * RX buffer was obtained from. The chip simply takes the original
6785 * descriptor as provided by the host, updates the status and length
6786 * field, then writes this into the next status ring entry.
6787 *
6788 * Each ring the host uses to post buffers to the chip is described
6789 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6790 * it is first placed into the on-chip ram. When the packet's length
6791 * is known, it walks down the TG3_BDINFO entries to select the ring.
6792 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6793 * which is within the range of the new packet's length is chosen.
6794 *
6795 * The "separate ring for rx status" scheme may sound queer, but it makes
6796 * sense from a cache coherency perspective. If only the host writes
6797 * to the buffer post rings, and only the chip writes to the rx status
6798 * rings, then cache lines never move beyond shared-modified state.
6799 * If both the host and chip were to write into the same ring, cache line
6800 * eviction could occur since both entities want it in an exclusive state.
6801 */
tg3_rx(struct tg3_napi * tnapi,int budget)6802 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6803 {
6804 struct tg3 *tp = tnapi->tp;
6805 u32 work_mask, rx_std_posted = 0;
6806 u32 std_prod_idx, jmb_prod_idx;
6807 u32 sw_idx = tnapi->rx_rcb_ptr;
6808 u16 hw_idx;
6809 int received;
6810 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6811
6812 hw_idx = *(tnapi->rx_rcb_prod_idx);
6813 /*
6814 * We need to order the read of hw_idx and the read of
6815 * the opaque cookie.
6816 */
6817 rmb();
6818 work_mask = 0;
6819 received = 0;
6820 std_prod_idx = tpr->rx_std_prod_idx;
6821 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6822 while (sw_idx != hw_idx && budget > 0) {
6823 struct ring_info *ri;
6824 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6825 unsigned int len;
6826 struct sk_buff *skb;
6827 dma_addr_t dma_addr;
6828 u32 opaque_key, desc_idx, *post_ptr;
6829 u8 *data;
6830 u64 tstamp = 0;
6831
6832 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6833 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6834 if (opaque_key == RXD_OPAQUE_RING_STD) {
6835 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6836 dma_addr = dma_unmap_addr(ri, mapping);
6837 data = ri->data;
6838 post_ptr = &std_prod_idx;
6839 rx_std_posted++;
6840 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6841 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6842 dma_addr = dma_unmap_addr(ri, mapping);
6843 data = ri->data;
6844 post_ptr = &jmb_prod_idx;
6845 } else
6846 goto next_pkt_nopost;
6847
6848 work_mask |= opaque_key;
6849
6850 if (desc->err_vlan & RXD_ERR_MASK) {
6851 drop_it:
6852 tg3_recycle_rx(tnapi, tpr, opaque_key,
6853 desc_idx, *post_ptr);
6854 drop_it_no_recycle:
6855 /* Other statistics kept track of by card. */
6856 tnapi->rx_dropped++;
6857 goto next_pkt;
6858 }
6859
6860 prefetch(data + TG3_RX_OFFSET(tp));
6861 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6862 ETH_FCS_LEN;
6863
6864 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6865 RXD_FLAG_PTPSTAT_PTPV1 ||
6866 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6867 RXD_FLAG_PTPSTAT_PTPV2) {
6868 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6869 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6870 }
6871
6872 if (len > TG3_RX_COPY_THRESH(tp)) {
6873 int skb_size;
6874 unsigned int frag_size;
6875
6876 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6877 *post_ptr, &frag_size);
6878 if (skb_size < 0)
6879 goto drop_it;
6880
6881 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6882 DMA_FROM_DEVICE);
6883
6884 /* Ensure that the update to the data happens
6885 * after the usage of the old DMA mapping.
6886 */
6887 smp_wmb();
6888
6889 ri->data = NULL;
6890
6891 if (frag_size)
6892 skb = build_skb(data, frag_size);
6893 else
6894 skb = slab_build_skb(data);
6895 if (!skb) {
6896 tg3_frag_free(frag_size != 0, data);
6897 goto drop_it_no_recycle;
6898 }
6899 skb_reserve(skb, TG3_RX_OFFSET(tp));
6900 } else {
6901 tg3_recycle_rx(tnapi, tpr, opaque_key,
6902 desc_idx, *post_ptr);
6903
6904 skb = netdev_alloc_skb(tp->dev,
6905 len + TG3_RAW_IP_ALIGN);
6906 if (skb == NULL)
6907 goto drop_it_no_recycle;
6908
6909 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6910 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6911 DMA_FROM_DEVICE);
6912 memcpy(skb->data,
6913 data + TG3_RX_OFFSET(tp),
6914 len);
6915 dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6916 len, DMA_FROM_DEVICE);
6917 }
6918
6919 skb_put(skb, len);
6920 if (tstamp)
6921 tg3_hwclock_to_timestamp(tp, tstamp,
6922 skb_hwtstamps(skb));
6923
6924 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6925 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6926 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6927 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6928 skb->ip_summed = CHECKSUM_UNNECESSARY;
6929 else
6930 skb_checksum_none_assert(skb);
6931
6932 skb->protocol = eth_type_trans(skb, tp->dev);
6933
6934 if (len > (tp->dev->mtu + ETH_HLEN) &&
6935 skb->protocol != htons(ETH_P_8021Q) &&
6936 skb->protocol != htons(ETH_P_8021AD)) {
6937 dev_kfree_skb_any(skb);
6938 goto drop_it_no_recycle;
6939 }
6940
6941 if (desc->type_flags & RXD_FLAG_VLAN &&
6942 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6943 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6944 desc->err_vlan & RXD_VLAN_MASK);
6945
6946 napi_gro_receive(&tnapi->napi, skb);
6947
6948 received++;
6949 budget--;
6950
6951 next_pkt:
6952 (*post_ptr)++;
6953
6954 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6955 tpr->rx_std_prod_idx = std_prod_idx &
6956 tp->rx_std_ring_mask;
6957 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6958 tpr->rx_std_prod_idx);
6959 work_mask &= ~RXD_OPAQUE_RING_STD;
6960 rx_std_posted = 0;
6961 }
6962 next_pkt_nopost:
6963 sw_idx++;
6964 sw_idx &= tp->rx_ret_ring_mask;
6965
6966 /* Refresh hw_idx to see if there is new work */
6967 if (sw_idx == hw_idx) {
6968 hw_idx = *(tnapi->rx_rcb_prod_idx);
6969 rmb();
6970 }
6971 }
6972
6973 /* ACK the status ring. */
6974 tnapi->rx_rcb_ptr = sw_idx;
6975 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6976
6977 /* Refill RX ring(s). */
6978 if (!tg3_flag(tp, ENABLE_RSS)) {
6979 /* Sync BD data before updating mailbox */
6980 wmb();
6981
6982 if (work_mask & RXD_OPAQUE_RING_STD) {
6983 tpr->rx_std_prod_idx = std_prod_idx &
6984 tp->rx_std_ring_mask;
6985 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6986 tpr->rx_std_prod_idx);
6987 }
6988 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6989 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6990 tp->rx_jmb_ring_mask;
6991 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6992 tpr->rx_jmb_prod_idx);
6993 }
6994 } else if (work_mask) {
6995 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6996 * updated before the producer indices can be updated.
6997 */
6998 smp_wmb();
6999
7000 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7001 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7002
7003 if (tnapi != &tp->napi[1]) {
7004 tp->rx_refill = true;
7005 napi_schedule(&tp->napi[1].napi);
7006 }
7007 }
7008
7009 return received;
7010 }
7011
tg3_poll_link(struct tg3 * tp)7012 static void tg3_poll_link(struct tg3 *tp)
7013 {
7014 /* handle link change and other phy events */
7015 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7016 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7017
7018 if (sblk->status & SD_STATUS_LINK_CHG) {
7019 sblk->status = SD_STATUS_UPDATED |
7020 (sblk->status & ~SD_STATUS_LINK_CHG);
7021 spin_lock(&tp->lock);
7022 if (tg3_flag(tp, USE_PHYLIB)) {
7023 tw32_f(MAC_STATUS,
7024 (MAC_STATUS_SYNC_CHANGED |
7025 MAC_STATUS_CFG_CHANGED |
7026 MAC_STATUS_MI_COMPLETION |
7027 MAC_STATUS_LNKSTATE_CHANGED));
7028 udelay(40);
7029 } else
7030 tg3_setup_phy(tp, false);
7031 spin_unlock(&tp->lock);
7032 }
7033 }
7034 }
7035
tg3_rx_prodring_xfer(struct tg3 * tp,struct tg3_rx_prodring_set * dpr,struct tg3_rx_prodring_set * spr)7036 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7037 struct tg3_rx_prodring_set *dpr,
7038 struct tg3_rx_prodring_set *spr)
7039 {
7040 u32 si, di, cpycnt, src_prod_idx;
7041 int i, err = 0;
7042
7043 while (1) {
7044 src_prod_idx = spr->rx_std_prod_idx;
7045
7046 /* Make sure updates to the rx_std_buffers[] entries and the
7047 * standard producer index are seen in the correct order.
7048 */
7049 smp_rmb();
7050
7051 if (spr->rx_std_cons_idx == src_prod_idx)
7052 break;
7053
7054 if (spr->rx_std_cons_idx < src_prod_idx)
7055 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7056 else
7057 cpycnt = tp->rx_std_ring_mask + 1 -
7058 spr->rx_std_cons_idx;
7059
7060 cpycnt = min(cpycnt,
7061 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7062
7063 si = spr->rx_std_cons_idx;
7064 di = dpr->rx_std_prod_idx;
7065
7066 for (i = di; i < di + cpycnt; i++) {
7067 if (dpr->rx_std_buffers[i].data) {
7068 cpycnt = i - di;
7069 err = -ENOSPC;
7070 break;
7071 }
7072 }
7073
7074 if (!cpycnt)
7075 break;
7076
7077 /* Ensure that updates to the rx_std_buffers ring and the
7078 * shadowed hardware producer ring from tg3_recycle_skb() are
7079 * ordered correctly WRT the skb check above.
7080 */
7081 smp_rmb();
7082
7083 memcpy(&dpr->rx_std_buffers[di],
7084 &spr->rx_std_buffers[si],
7085 cpycnt * sizeof(struct ring_info));
7086
7087 for (i = 0; i < cpycnt; i++, di++, si++) {
7088 struct tg3_rx_buffer_desc *sbd, *dbd;
7089 sbd = &spr->rx_std[si];
7090 dbd = &dpr->rx_std[di];
7091 dbd->addr_hi = sbd->addr_hi;
7092 dbd->addr_lo = sbd->addr_lo;
7093 }
7094
7095 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7096 tp->rx_std_ring_mask;
7097 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7098 tp->rx_std_ring_mask;
7099 }
7100
7101 while (1) {
7102 src_prod_idx = spr->rx_jmb_prod_idx;
7103
7104 /* Make sure updates to the rx_jmb_buffers[] entries and
7105 * the jumbo producer index are seen in the correct order.
7106 */
7107 smp_rmb();
7108
7109 if (spr->rx_jmb_cons_idx == src_prod_idx)
7110 break;
7111
7112 if (spr->rx_jmb_cons_idx < src_prod_idx)
7113 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7114 else
7115 cpycnt = tp->rx_jmb_ring_mask + 1 -
7116 spr->rx_jmb_cons_idx;
7117
7118 cpycnt = min(cpycnt,
7119 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7120
7121 si = spr->rx_jmb_cons_idx;
7122 di = dpr->rx_jmb_prod_idx;
7123
7124 for (i = di; i < di + cpycnt; i++) {
7125 if (dpr->rx_jmb_buffers[i].data) {
7126 cpycnt = i - di;
7127 err = -ENOSPC;
7128 break;
7129 }
7130 }
7131
7132 if (!cpycnt)
7133 break;
7134
7135 /* Ensure that updates to the rx_jmb_buffers ring and the
7136 * shadowed hardware producer ring from tg3_recycle_skb() are
7137 * ordered correctly WRT the skb check above.
7138 */
7139 smp_rmb();
7140
7141 memcpy(&dpr->rx_jmb_buffers[di],
7142 &spr->rx_jmb_buffers[si],
7143 cpycnt * sizeof(struct ring_info));
7144
7145 for (i = 0; i < cpycnt; i++, di++, si++) {
7146 struct tg3_rx_buffer_desc *sbd, *dbd;
7147 sbd = &spr->rx_jmb[si].std;
7148 dbd = &dpr->rx_jmb[di].std;
7149 dbd->addr_hi = sbd->addr_hi;
7150 dbd->addr_lo = sbd->addr_lo;
7151 }
7152
7153 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7154 tp->rx_jmb_ring_mask;
7155 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7156 tp->rx_jmb_ring_mask;
7157 }
7158
7159 return err;
7160 }
7161
tg3_poll_work(struct tg3_napi * tnapi,int work_done,int budget)7162 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7163 {
7164 struct tg3 *tp = tnapi->tp;
7165
7166 /* run TX completion thread */
7167 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7168 tg3_tx(tnapi);
7169 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7170 return work_done;
7171 }
7172
7173 if (!tnapi->rx_rcb_prod_idx)
7174 return work_done;
7175
7176 /* run RX thread, within the bounds set by NAPI.
7177 * All RX "locking" is done by ensuring outside
7178 * code synchronizes with tg3->napi.poll()
7179 */
7180 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7181 work_done += tg3_rx(tnapi, budget - work_done);
7182
7183 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7184 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7185 int i, err = 0;
7186 u32 std_prod_idx = dpr->rx_std_prod_idx;
7187 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7188
7189 tp->rx_refill = false;
7190 for (i = 1; i <= tp->rxq_cnt; i++)
7191 err |= tg3_rx_prodring_xfer(tp, dpr,
7192 &tp->napi[i].prodring);
7193
7194 wmb();
7195
7196 if (std_prod_idx != dpr->rx_std_prod_idx)
7197 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7198 dpr->rx_std_prod_idx);
7199
7200 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7201 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7202 dpr->rx_jmb_prod_idx);
7203
7204 if (err)
7205 tw32_f(HOSTCC_MODE, tp->coal_now);
7206 }
7207
7208 return work_done;
7209 }
7210
tg3_reset_task_schedule(struct tg3 * tp)7211 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7212 {
7213 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7214 schedule_work(&tp->reset_task);
7215 }
7216
tg3_reset_task_cancel(struct tg3 * tp)7217 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7218 {
7219 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7220 cancel_work_sync(&tp->reset_task);
7221 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7222 }
7223
tg3_poll_msix(struct napi_struct * napi,int budget)7224 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7225 {
7226 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7227 struct tg3 *tp = tnapi->tp;
7228 int work_done = 0;
7229 struct tg3_hw_status *sblk = tnapi->hw_status;
7230
7231 while (1) {
7232 work_done = tg3_poll_work(tnapi, work_done, budget);
7233
7234 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7235 goto tx_recovery;
7236
7237 if (unlikely(work_done >= budget))
7238 break;
7239
7240 /* tp->last_tag is used in tg3_int_reenable() below
7241 * to tell the hw how much work has been processed,
7242 * so we must read it before checking for more work.
7243 */
7244 tnapi->last_tag = sblk->status_tag;
7245 tnapi->last_irq_tag = tnapi->last_tag;
7246 rmb();
7247
7248 /* check for RX/TX work to do */
7249 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7250 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7251
7252 /* This test here is not race free, but will reduce
7253 * the number of interrupts by looping again.
7254 */
7255 if (tnapi == &tp->napi[1] && tp->rx_refill)
7256 continue;
7257
7258 napi_complete_done(napi, work_done);
7259 /* Reenable interrupts. */
7260 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7261
7262 /* This test here is synchronized by napi_schedule()
7263 * and napi_complete() to close the race condition.
7264 */
7265 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7266 tw32(HOSTCC_MODE, tp->coalesce_mode |
7267 HOSTCC_MODE_ENABLE |
7268 tnapi->coal_now);
7269 }
7270 break;
7271 }
7272 }
7273
7274 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7275 return work_done;
7276
7277 tx_recovery:
7278 /* work_done is guaranteed to be less than budget. */
7279 napi_complete(napi);
7280 tg3_reset_task_schedule(tp);
7281 return work_done;
7282 }
7283
tg3_process_error(struct tg3 * tp)7284 static void tg3_process_error(struct tg3 *tp)
7285 {
7286 u32 val;
7287 bool real_error = false;
7288
7289 if (tg3_flag(tp, ERROR_PROCESSED))
7290 return;
7291
7292 /* Check Flow Attention register */
7293 val = tr32(HOSTCC_FLOW_ATTN);
7294 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7295 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7296 real_error = true;
7297 }
7298
7299 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7300 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7301 real_error = true;
7302 }
7303
7304 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7305 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7306 real_error = true;
7307 }
7308
7309 if (!real_error)
7310 return;
7311
7312 tg3_dump_state(tp);
7313
7314 tg3_flag_set(tp, ERROR_PROCESSED);
7315 tg3_reset_task_schedule(tp);
7316 }
7317
tg3_poll(struct napi_struct * napi,int budget)7318 static int tg3_poll(struct napi_struct *napi, int budget)
7319 {
7320 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7321 struct tg3 *tp = tnapi->tp;
7322 int work_done = 0;
7323 struct tg3_hw_status *sblk = tnapi->hw_status;
7324
7325 while (1) {
7326 if (sblk->status & SD_STATUS_ERROR)
7327 tg3_process_error(tp);
7328
7329 tg3_poll_link(tp);
7330
7331 work_done = tg3_poll_work(tnapi, work_done, budget);
7332
7333 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7334 goto tx_recovery;
7335
7336 if (unlikely(work_done >= budget))
7337 break;
7338
7339 if (tg3_flag(tp, TAGGED_STATUS)) {
7340 /* tp->last_tag is used in tg3_int_reenable() below
7341 * to tell the hw how much work has been processed,
7342 * so we must read it before checking for more work.
7343 */
7344 tnapi->last_tag = sblk->status_tag;
7345 tnapi->last_irq_tag = tnapi->last_tag;
7346 rmb();
7347 } else
7348 sblk->status &= ~SD_STATUS_UPDATED;
7349
7350 if (likely(!tg3_has_work(tnapi))) {
7351 napi_complete_done(napi, work_done);
7352 tg3_int_reenable(tnapi);
7353 break;
7354 }
7355 }
7356
7357 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7358 return work_done;
7359
7360 tx_recovery:
7361 /* work_done is guaranteed to be less than budget. */
7362 napi_complete(napi);
7363 tg3_reset_task_schedule(tp);
7364 return work_done;
7365 }
7366
tg3_napi_disable(struct tg3 * tp)7367 static void tg3_napi_disable(struct tg3 *tp)
7368 {
7369 int i;
7370
7371 for (i = tp->irq_cnt - 1; i >= 0; i--)
7372 napi_disable(&tp->napi[i].napi);
7373 }
7374
tg3_napi_enable(struct tg3 * tp)7375 static void tg3_napi_enable(struct tg3 *tp)
7376 {
7377 int i;
7378
7379 for (i = 0; i < tp->irq_cnt; i++)
7380 napi_enable(&tp->napi[i].napi);
7381 }
7382
tg3_napi_init(struct tg3 * tp)7383 static void tg3_napi_init(struct tg3 *tp)
7384 {
7385 int i;
7386
7387 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
7388 for (i = 1; i < tp->irq_cnt; i++)
7389 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
7390 }
7391
tg3_napi_fini(struct tg3 * tp)7392 static void tg3_napi_fini(struct tg3 *tp)
7393 {
7394 int i;
7395
7396 for (i = 0; i < tp->irq_cnt; i++)
7397 netif_napi_del(&tp->napi[i].napi);
7398 }
7399
tg3_netif_stop(struct tg3 * tp)7400 static inline void tg3_netif_stop(struct tg3 *tp)
7401 {
7402 netif_trans_update(tp->dev); /* prevent tx timeout */
7403 tg3_napi_disable(tp);
7404 netif_carrier_off(tp->dev);
7405 netif_tx_disable(tp->dev);
7406 }
7407
7408 /* tp->lock must be held */
tg3_netif_start(struct tg3 * tp)7409 static inline void tg3_netif_start(struct tg3 *tp)
7410 {
7411 tg3_ptp_resume(tp);
7412
7413 /* NOTE: unconditional netif_tx_wake_all_queues is only
7414 * appropriate so long as all callers are assured to
7415 * have free tx slots (such as after tg3_init_hw)
7416 */
7417 netif_tx_wake_all_queues(tp->dev);
7418
7419 if (tp->link_up)
7420 netif_carrier_on(tp->dev);
7421
7422 tg3_napi_enable(tp);
7423 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7424 tg3_enable_ints(tp);
7425 }
7426
tg3_irq_quiesce(struct tg3 * tp)7427 static void tg3_irq_quiesce(struct tg3 *tp)
7428 __releases(tp->lock)
7429 __acquires(tp->lock)
7430 {
7431 int i;
7432
7433 BUG_ON(tp->irq_sync);
7434
7435 tp->irq_sync = 1;
7436 smp_mb();
7437
7438 spin_unlock_bh(&tp->lock);
7439
7440 for (i = 0; i < tp->irq_cnt; i++)
7441 synchronize_irq(tp->napi[i].irq_vec);
7442
7443 spin_lock_bh(&tp->lock);
7444 }
7445
7446 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7447 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7448 * with as well. Most of the time, this is not necessary except when
7449 * shutting down the device.
7450 */
tg3_full_lock(struct tg3 * tp,int irq_sync)7451 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7452 {
7453 spin_lock_bh(&tp->lock);
7454 if (irq_sync)
7455 tg3_irq_quiesce(tp);
7456 }
7457
tg3_full_unlock(struct tg3 * tp)7458 static inline void tg3_full_unlock(struct tg3 *tp)
7459 {
7460 spin_unlock_bh(&tp->lock);
7461 }
7462
7463 /* One-shot MSI handler - Chip automatically disables interrupt
7464 * after sending MSI so driver doesn't have to do it.
7465 */
tg3_msi_1shot(int irq,void * dev_id)7466 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7467 {
7468 struct tg3_napi *tnapi = dev_id;
7469 struct tg3 *tp = tnapi->tp;
7470
7471 prefetch(tnapi->hw_status);
7472 if (tnapi->rx_rcb)
7473 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7474
7475 if (likely(!tg3_irq_sync(tp)))
7476 napi_schedule(&tnapi->napi);
7477
7478 return IRQ_HANDLED;
7479 }
7480
7481 /* MSI ISR - No need to check for interrupt sharing and no need to
7482 * flush status block and interrupt mailbox. PCI ordering rules
7483 * guarantee that MSI will arrive after the status block.
7484 */
tg3_msi(int irq,void * dev_id)7485 static irqreturn_t tg3_msi(int irq, void *dev_id)
7486 {
7487 struct tg3_napi *tnapi = dev_id;
7488 struct tg3 *tp = tnapi->tp;
7489
7490 prefetch(tnapi->hw_status);
7491 if (tnapi->rx_rcb)
7492 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7493 /*
7494 * Writing any value to intr-mbox-0 clears PCI INTA# and
7495 * chip-internal interrupt pending events.
7496 * Writing non-zero to intr-mbox-0 additional tells the
7497 * NIC to stop sending us irqs, engaging "in-intr-handler"
7498 * event coalescing.
7499 */
7500 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7501 if (likely(!tg3_irq_sync(tp)))
7502 napi_schedule(&tnapi->napi);
7503
7504 return IRQ_RETVAL(1);
7505 }
7506
tg3_interrupt(int irq,void * dev_id)7507 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7508 {
7509 struct tg3_napi *tnapi = dev_id;
7510 struct tg3 *tp = tnapi->tp;
7511 struct tg3_hw_status *sblk = tnapi->hw_status;
7512 unsigned int handled = 1;
7513
7514 /* In INTx mode, it is possible for the interrupt to arrive at
7515 * the CPU before the status block posted prior to the interrupt.
7516 * Reading the PCI State register will confirm whether the
7517 * interrupt is ours and will flush the status block.
7518 */
7519 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7520 if (tg3_flag(tp, CHIP_RESETTING) ||
7521 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7522 handled = 0;
7523 goto out;
7524 }
7525 }
7526
7527 /*
7528 * Writing any value to intr-mbox-0 clears PCI INTA# and
7529 * chip-internal interrupt pending events.
7530 * Writing non-zero to intr-mbox-0 additional tells the
7531 * NIC to stop sending us irqs, engaging "in-intr-handler"
7532 * event coalescing.
7533 *
7534 * Flush the mailbox to de-assert the IRQ immediately to prevent
7535 * spurious interrupts. The flush impacts performance but
7536 * excessive spurious interrupts can be worse in some cases.
7537 */
7538 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7539 if (tg3_irq_sync(tp))
7540 goto out;
7541 sblk->status &= ~SD_STATUS_UPDATED;
7542 if (likely(tg3_has_work(tnapi))) {
7543 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7544 napi_schedule(&tnapi->napi);
7545 } else {
7546 /* No work, shared interrupt perhaps? re-enable
7547 * interrupts, and flush that PCI write
7548 */
7549 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7550 0x00000000);
7551 }
7552 out:
7553 return IRQ_RETVAL(handled);
7554 }
7555
tg3_interrupt_tagged(int irq,void * dev_id)7556 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7557 {
7558 struct tg3_napi *tnapi = dev_id;
7559 struct tg3 *tp = tnapi->tp;
7560 struct tg3_hw_status *sblk = tnapi->hw_status;
7561 unsigned int handled = 1;
7562
7563 /* In INTx mode, it is possible for the interrupt to arrive at
7564 * the CPU before the status block posted prior to the interrupt.
7565 * Reading the PCI State register will confirm whether the
7566 * interrupt is ours and will flush the status block.
7567 */
7568 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7569 if (tg3_flag(tp, CHIP_RESETTING) ||
7570 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7571 handled = 0;
7572 goto out;
7573 }
7574 }
7575
7576 /*
7577 * writing any value to intr-mbox-0 clears PCI INTA# and
7578 * chip-internal interrupt pending events.
7579 * writing non-zero to intr-mbox-0 additional tells the
7580 * NIC to stop sending us irqs, engaging "in-intr-handler"
7581 * event coalescing.
7582 *
7583 * Flush the mailbox to de-assert the IRQ immediately to prevent
7584 * spurious interrupts. The flush impacts performance but
7585 * excessive spurious interrupts can be worse in some cases.
7586 */
7587 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7588
7589 /*
7590 * In a shared interrupt configuration, sometimes other devices'
7591 * interrupts will scream. We record the current status tag here
7592 * so that the above check can report that the screaming interrupts
7593 * are unhandled. Eventually they will be silenced.
7594 */
7595 tnapi->last_irq_tag = sblk->status_tag;
7596
7597 if (tg3_irq_sync(tp))
7598 goto out;
7599
7600 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7601
7602 napi_schedule(&tnapi->napi);
7603
7604 out:
7605 return IRQ_RETVAL(handled);
7606 }
7607
7608 /* ISR for interrupt test */
tg3_test_isr(int irq,void * dev_id)7609 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7610 {
7611 struct tg3_napi *tnapi = dev_id;
7612 struct tg3 *tp = tnapi->tp;
7613 struct tg3_hw_status *sblk = tnapi->hw_status;
7614
7615 if ((sblk->status & SD_STATUS_UPDATED) ||
7616 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7617 tg3_disable_ints(tp);
7618 return IRQ_RETVAL(1);
7619 }
7620 return IRQ_RETVAL(0);
7621 }
7622
7623 #ifdef CONFIG_NET_POLL_CONTROLLER
tg3_poll_controller(struct net_device * dev)7624 static void tg3_poll_controller(struct net_device *dev)
7625 {
7626 int i;
7627 struct tg3 *tp = netdev_priv(dev);
7628
7629 if (tg3_irq_sync(tp))
7630 return;
7631
7632 for (i = 0; i < tp->irq_cnt; i++)
7633 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7634 }
7635 #endif
7636
tg3_tx_timeout(struct net_device * dev,unsigned int txqueue)7637 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7638 {
7639 struct tg3 *tp = netdev_priv(dev);
7640
7641 if (netif_msg_tx_err(tp)) {
7642 netdev_err(dev, "transmit timed out, resetting\n");
7643 tg3_dump_state(tp);
7644 }
7645
7646 tg3_reset_task_schedule(tp);
7647 }
7648
7649 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
tg3_4g_overflow_test(dma_addr_t mapping,int len)7650 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7651 {
7652 u32 base = (u32) mapping & 0xffffffff;
7653
7654 return base + len + 8 < base;
7655 }
7656
7657 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7658 * of any 4GB boundaries: 4G, 8G, etc
7659 */
tg3_4g_tso_overflow_test(struct tg3 * tp,dma_addr_t mapping,u32 len,u32 mss)7660 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7661 u32 len, u32 mss)
7662 {
7663 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7664 u32 base = (u32) mapping & 0xffffffff;
7665
7666 return ((base + len + (mss & 0x3fff)) < base);
7667 }
7668 return 0;
7669 }
7670
7671 /* Test for DMA addresses > 40-bit */
tg3_40bit_overflow_test(struct tg3 * tp,dma_addr_t mapping,int len)7672 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7673 int len)
7674 {
7675 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7676 if (tg3_flag(tp, 40BIT_DMA_BUG))
7677 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7678 return 0;
7679 #else
7680 return 0;
7681 #endif
7682 }
7683
tg3_tx_set_bd(struct tg3_tx_buffer_desc * txbd,dma_addr_t mapping,u32 len,u32 flags,u32 mss,u32 vlan)7684 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7685 dma_addr_t mapping, u32 len, u32 flags,
7686 u32 mss, u32 vlan)
7687 {
7688 txbd->addr_hi = ((u64) mapping >> 32);
7689 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7690 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7691 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7692 }
7693
tg3_tx_frag_set(struct tg3_napi * tnapi,u32 * entry,u32 * budget,dma_addr_t map,u32 len,u32 flags,u32 mss,u32 vlan)7694 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7695 dma_addr_t map, u32 len, u32 flags,
7696 u32 mss, u32 vlan)
7697 {
7698 struct tg3 *tp = tnapi->tp;
7699 bool hwbug = false;
7700
7701 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7702 hwbug = true;
7703
7704 if (tg3_4g_overflow_test(map, len))
7705 hwbug = true;
7706
7707 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7708 hwbug = true;
7709
7710 if (tg3_40bit_overflow_test(tp, map, len))
7711 hwbug = true;
7712
7713 if (tp->dma_limit) {
7714 u32 prvidx = *entry;
7715 u32 tmp_flag = flags & ~TXD_FLAG_END;
7716 while (len > tp->dma_limit && *budget) {
7717 u32 frag_len = tp->dma_limit;
7718 len -= tp->dma_limit;
7719
7720 /* Avoid the 8byte DMA problem */
7721 if (len <= 8) {
7722 len += tp->dma_limit / 2;
7723 frag_len = tp->dma_limit / 2;
7724 }
7725
7726 tnapi->tx_buffers[*entry].fragmented = true;
7727
7728 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7729 frag_len, tmp_flag, mss, vlan);
7730 *budget -= 1;
7731 prvidx = *entry;
7732 *entry = NEXT_TX(*entry);
7733
7734 map += frag_len;
7735 }
7736
7737 if (len) {
7738 if (*budget) {
7739 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7740 len, flags, mss, vlan);
7741 *budget -= 1;
7742 *entry = NEXT_TX(*entry);
7743 } else {
7744 hwbug = true;
7745 tnapi->tx_buffers[prvidx].fragmented = false;
7746 }
7747 }
7748 } else {
7749 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7750 len, flags, mss, vlan);
7751 *entry = NEXT_TX(*entry);
7752 }
7753
7754 return hwbug;
7755 }
7756
tg3_tx_skb_unmap(struct tg3_napi * tnapi,u32 entry,int last)7757 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7758 {
7759 int i;
7760 struct sk_buff *skb;
7761 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7762
7763 skb = txb->skb;
7764 txb->skb = NULL;
7765
7766 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7767 skb_headlen(skb), DMA_TO_DEVICE);
7768
7769 while (txb->fragmented) {
7770 txb->fragmented = false;
7771 entry = NEXT_TX(entry);
7772 txb = &tnapi->tx_buffers[entry];
7773 }
7774
7775 for (i = 0; i <= last; i++) {
7776 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7777
7778 entry = NEXT_TX(entry);
7779 txb = &tnapi->tx_buffers[entry];
7780
7781 dma_unmap_page(&tnapi->tp->pdev->dev,
7782 dma_unmap_addr(txb, mapping),
7783 skb_frag_size(frag), DMA_TO_DEVICE);
7784
7785 while (txb->fragmented) {
7786 txb->fragmented = false;
7787 entry = NEXT_TX(entry);
7788 txb = &tnapi->tx_buffers[entry];
7789 }
7790 }
7791 }
7792
7793 /* Workaround 4GB and 40-bit hardware DMA bugs. */
tigon3_dma_hwbug_workaround(struct tg3_napi * tnapi,struct sk_buff ** pskb,u32 * entry,u32 * budget,u32 base_flags,u32 mss,u32 vlan)7794 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7795 struct sk_buff **pskb,
7796 u32 *entry, u32 *budget,
7797 u32 base_flags, u32 mss, u32 vlan)
7798 {
7799 struct tg3 *tp = tnapi->tp;
7800 struct sk_buff *new_skb, *skb = *pskb;
7801 dma_addr_t new_addr = 0;
7802 int ret = 0;
7803
7804 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7805 new_skb = skb_copy(skb, GFP_ATOMIC);
7806 else {
7807 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7808
7809 new_skb = skb_copy_expand(skb,
7810 skb_headroom(skb) + more_headroom,
7811 skb_tailroom(skb), GFP_ATOMIC);
7812 }
7813
7814 if (!new_skb) {
7815 ret = -1;
7816 } else {
7817 /* New SKB is guaranteed to be linear. */
7818 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7819 new_skb->len, DMA_TO_DEVICE);
7820 /* Make sure the mapping succeeded */
7821 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7822 dev_kfree_skb_any(new_skb);
7823 ret = -1;
7824 } else {
7825 u32 save_entry = *entry;
7826
7827 base_flags |= TXD_FLAG_END;
7828
7829 tnapi->tx_buffers[*entry].skb = new_skb;
7830 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7831 mapping, new_addr);
7832
7833 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7834 new_skb->len, base_flags,
7835 mss, vlan)) {
7836 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7837 dev_kfree_skb_any(new_skb);
7838 ret = -1;
7839 }
7840 }
7841 }
7842
7843 dev_consume_skb_any(skb);
7844 *pskb = new_skb;
7845 return ret;
7846 }
7847
tg3_tso_bug_gso_check(struct tg3_napi * tnapi,struct sk_buff * skb)7848 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7849 {
7850 /* Check if we will never have enough descriptors,
7851 * as gso_segs can be more than current ring size
7852 */
7853 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7854 }
7855
7856 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7857
7858 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7859 * indicated in tg3_tx_frag_set()
7860 */
tg3_tso_bug(struct tg3 * tp,struct tg3_napi * tnapi,struct netdev_queue * txq,struct sk_buff * skb)7861 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7862 struct netdev_queue *txq, struct sk_buff *skb)
7863 {
7864 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7865 struct sk_buff *segs, *seg, *next;
7866
7867 /* Estimate the number of fragments in the worst case */
7868 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7869 netif_tx_stop_queue(txq);
7870
7871 /* netif_tx_stop_queue() must be done before checking
7872 * checking tx index in tg3_tx_avail() below, because in
7873 * tg3_tx(), we update tx index before checking for
7874 * netif_tx_queue_stopped().
7875 */
7876 smp_mb();
7877 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7878 return NETDEV_TX_BUSY;
7879
7880 netif_tx_wake_queue(txq);
7881 }
7882
7883 segs = skb_gso_segment(skb, tp->dev->features &
7884 ~(NETIF_F_TSO | NETIF_F_TSO6));
7885 if (IS_ERR(segs) || !segs) {
7886 tnapi->tx_dropped++;
7887 goto tg3_tso_bug_end;
7888 }
7889
7890 skb_list_walk_safe(segs, seg, next) {
7891 skb_mark_not_on_list(seg);
7892 tg3_start_xmit(seg, tp->dev);
7893 }
7894
7895 tg3_tso_bug_end:
7896 dev_consume_skb_any(skb);
7897
7898 return NETDEV_TX_OK;
7899 }
7900
7901 /* hard_start_xmit for all devices */
tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)7902 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7903 {
7904 struct tg3 *tp = netdev_priv(dev);
7905 u32 len, entry, base_flags, mss, vlan = 0;
7906 u32 budget;
7907 int i = -1, would_hit_hwbug;
7908 dma_addr_t mapping;
7909 struct tg3_napi *tnapi;
7910 struct netdev_queue *txq;
7911 unsigned int last;
7912 struct iphdr *iph = NULL;
7913 struct tcphdr *tcph = NULL;
7914 __sum16 tcp_csum = 0, ip_csum = 0;
7915 __be16 ip_tot_len = 0;
7916
7917 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7918 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7919 if (tg3_flag(tp, ENABLE_TSS))
7920 tnapi++;
7921
7922 budget = tg3_tx_avail(tnapi);
7923
7924 /* We are running in BH disabled context with netif_tx_lock
7925 * and TX reclaim runs via tp->napi.poll inside of a software
7926 * interrupt. Furthermore, IRQ processing runs lockless so we have
7927 * no IRQ context deadlocks to worry about either. Rejoice!
7928 */
7929 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7930 if (!netif_tx_queue_stopped(txq)) {
7931 netif_tx_stop_queue(txq);
7932
7933 /* This is a hard error, log it. */
7934 netdev_err(dev,
7935 "BUG! Tx Ring full when queue awake!\n");
7936 }
7937 return NETDEV_TX_BUSY;
7938 }
7939
7940 entry = tnapi->tx_prod;
7941 base_flags = 0;
7942
7943 mss = skb_shinfo(skb)->gso_size;
7944 if (mss) {
7945 u32 tcp_opt_len, hdr_len;
7946
7947 if (skb_cow_head(skb, 0))
7948 goto drop;
7949
7950 iph = ip_hdr(skb);
7951 tcp_opt_len = tcp_optlen(skb);
7952
7953 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
7954
7955 /* HW/FW can not correctly segment packets that have been
7956 * vlan encapsulated.
7957 */
7958 if (skb->protocol == htons(ETH_P_8021Q) ||
7959 skb->protocol == htons(ETH_P_8021AD)) {
7960 if (tg3_tso_bug_gso_check(tnapi, skb))
7961 return tg3_tso_bug(tp, tnapi, txq, skb);
7962 goto drop;
7963 }
7964
7965 if (!skb_is_gso_v6(skb)) {
7966 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7967 tg3_flag(tp, TSO_BUG)) {
7968 if (tg3_tso_bug_gso_check(tnapi, skb))
7969 return tg3_tso_bug(tp, tnapi, txq, skb);
7970 goto drop;
7971 }
7972 ip_csum = iph->check;
7973 ip_tot_len = iph->tot_len;
7974 iph->check = 0;
7975 iph->tot_len = htons(mss + hdr_len);
7976 }
7977
7978 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7979 TXD_FLAG_CPU_POST_DMA);
7980
7981 tcph = tcp_hdr(skb);
7982 tcp_csum = tcph->check;
7983
7984 if (tg3_flag(tp, HW_TSO_1) ||
7985 tg3_flag(tp, HW_TSO_2) ||
7986 tg3_flag(tp, HW_TSO_3)) {
7987 tcph->check = 0;
7988 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7989 } else {
7990 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7991 0, IPPROTO_TCP, 0);
7992 }
7993
7994 if (tg3_flag(tp, HW_TSO_3)) {
7995 mss |= (hdr_len & 0xc) << 12;
7996 if (hdr_len & 0x10)
7997 base_flags |= 0x00000010;
7998 base_flags |= (hdr_len & 0x3e0) << 5;
7999 } else if (tg3_flag(tp, HW_TSO_2))
8000 mss |= hdr_len << 9;
8001 else if (tg3_flag(tp, HW_TSO_1) ||
8002 tg3_asic_rev(tp) == ASIC_REV_5705) {
8003 if (tcp_opt_len || iph->ihl > 5) {
8004 int tsflags;
8005
8006 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8007 mss |= (tsflags << 11);
8008 }
8009 } else {
8010 if (tcp_opt_len || iph->ihl > 5) {
8011 int tsflags;
8012
8013 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8014 base_flags |= tsflags << 12;
8015 }
8016 }
8017 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8018 /* HW/FW can not correctly checksum packets that have been
8019 * vlan encapsulated.
8020 */
8021 if (skb->protocol == htons(ETH_P_8021Q) ||
8022 skb->protocol == htons(ETH_P_8021AD)) {
8023 if (skb_checksum_help(skb))
8024 goto drop;
8025 } else {
8026 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8027 }
8028 }
8029
8030 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8031 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8032 base_flags |= TXD_FLAG_JMB_PKT;
8033
8034 if (skb_vlan_tag_present(skb)) {
8035 base_flags |= TXD_FLAG_VLAN;
8036 vlan = skb_vlan_tag_get(skb);
8037 }
8038
8039 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8040 tg3_flag(tp, TX_TSTAMP_EN)) {
8041 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8042 base_flags |= TXD_FLAG_HWTSTAMP;
8043 }
8044
8045 len = skb_headlen(skb);
8046
8047 mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8048 DMA_TO_DEVICE);
8049 if (dma_mapping_error(&tp->pdev->dev, mapping))
8050 goto drop;
8051
8052
8053 tnapi->tx_buffers[entry].skb = skb;
8054 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8055
8056 would_hit_hwbug = 0;
8057
8058 if (tg3_flag(tp, 5701_DMA_BUG))
8059 would_hit_hwbug = 1;
8060
8061 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8062 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8063 mss, vlan)) {
8064 would_hit_hwbug = 1;
8065 } else if (skb_shinfo(skb)->nr_frags > 0) {
8066 u32 tmp_mss = mss;
8067
8068 if (!tg3_flag(tp, HW_TSO_1) &&
8069 !tg3_flag(tp, HW_TSO_2) &&
8070 !tg3_flag(tp, HW_TSO_3))
8071 tmp_mss = 0;
8072
8073 /* Now loop through additional data
8074 * fragments, and queue them.
8075 */
8076 last = skb_shinfo(skb)->nr_frags - 1;
8077 for (i = 0; i <= last; i++) {
8078 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8079
8080 len = skb_frag_size(frag);
8081 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8082 len, DMA_TO_DEVICE);
8083
8084 tnapi->tx_buffers[entry].skb = NULL;
8085 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8086 mapping);
8087 if (dma_mapping_error(&tp->pdev->dev, mapping))
8088 goto dma_error;
8089
8090 if (!budget ||
8091 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8092 len, base_flags |
8093 ((i == last) ? TXD_FLAG_END : 0),
8094 tmp_mss, vlan)) {
8095 would_hit_hwbug = 1;
8096 break;
8097 }
8098 }
8099 }
8100
8101 if (would_hit_hwbug) {
8102 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8103
8104 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8105 /* If it's a TSO packet, do GSO instead of
8106 * allocating and copying to a large linear SKB
8107 */
8108 if (ip_tot_len) {
8109 iph->check = ip_csum;
8110 iph->tot_len = ip_tot_len;
8111 }
8112 tcph->check = tcp_csum;
8113 return tg3_tso_bug(tp, tnapi, txq, skb);
8114 }
8115
8116 /* If the workaround fails due to memory/mapping
8117 * failure, silently drop this packet.
8118 */
8119 entry = tnapi->tx_prod;
8120 budget = tg3_tx_avail(tnapi);
8121 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8122 base_flags, mss, vlan))
8123 goto drop_nofree;
8124 }
8125
8126 skb_tx_timestamp(skb);
8127 netdev_tx_sent_queue(txq, skb->len);
8128
8129 /* Sync BD data before updating mailbox */
8130 wmb();
8131
8132 tnapi->tx_prod = entry;
8133 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8134 netif_tx_stop_queue(txq);
8135
8136 /* netif_tx_stop_queue() must be done before checking
8137 * checking tx index in tg3_tx_avail() below, because in
8138 * tg3_tx(), we update tx index before checking for
8139 * netif_tx_queue_stopped().
8140 */
8141 smp_mb();
8142 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8143 netif_tx_wake_queue(txq);
8144 }
8145
8146 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8147 /* Packets are ready, update Tx producer idx on card. */
8148 tw32_tx_mbox(tnapi->prodmbox, entry);
8149 }
8150
8151 return NETDEV_TX_OK;
8152
8153 dma_error:
8154 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8155 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8156 drop:
8157 dev_kfree_skb_any(skb);
8158 drop_nofree:
8159 tnapi->tx_dropped++;
8160 return NETDEV_TX_OK;
8161 }
8162
tg3_mac_loopback(struct tg3 * tp,bool enable)8163 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8164 {
8165 if (enable) {
8166 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8167 MAC_MODE_PORT_MODE_MASK);
8168
8169 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8170
8171 if (!tg3_flag(tp, 5705_PLUS))
8172 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8173
8174 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8175 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8176 else
8177 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8178 } else {
8179 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8180
8181 if (tg3_flag(tp, 5705_PLUS) ||
8182 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8183 tg3_asic_rev(tp) == ASIC_REV_5700)
8184 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8185 }
8186
8187 tw32(MAC_MODE, tp->mac_mode);
8188 udelay(40);
8189 }
8190
tg3_phy_lpbk_set(struct tg3 * tp,u32 speed,bool extlpbk)8191 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8192 {
8193 u32 val, bmcr, mac_mode, ptest = 0;
8194
8195 tg3_phy_toggle_apd(tp, false);
8196 tg3_phy_toggle_automdix(tp, false);
8197
8198 if (extlpbk && tg3_phy_set_extloopbk(tp))
8199 return -EIO;
8200
8201 bmcr = BMCR_FULLDPLX;
8202 switch (speed) {
8203 case SPEED_10:
8204 break;
8205 case SPEED_100:
8206 bmcr |= BMCR_SPEED100;
8207 break;
8208 case SPEED_1000:
8209 default:
8210 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8211 speed = SPEED_100;
8212 bmcr |= BMCR_SPEED100;
8213 } else {
8214 speed = SPEED_1000;
8215 bmcr |= BMCR_SPEED1000;
8216 }
8217 }
8218
8219 if (extlpbk) {
8220 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8221 tg3_readphy(tp, MII_CTRL1000, &val);
8222 val |= CTL1000_AS_MASTER |
8223 CTL1000_ENABLE_MASTER;
8224 tg3_writephy(tp, MII_CTRL1000, val);
8225 } else {
8226 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8227 MII_TG3_FET_PTEST_TRIM_2;
8228 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8229 }
8230 } else
8231 bmcr |= BMCR_LOOPBACK;
8232
8233 tg3_writephy(tp, MII_BMCR, bmcr);
8234
8235 /* The write needs to be flushed for the FETs */
8236 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8237 tg3_readphy(tp, MII_BMCR, &bmcr);
8238
8239 udelay(40);
8240
8241 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8242 tg3_asic_rev(tp) == ASIC_REV_5785) {
8243 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8244 MII_TG3_FET_PTEST_FRC_TX_LINK |
8245 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8246
8247 /* The write needs to be flushed for the AC131 */
8248 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8249 }
8250
8251 /* Reset to prevent losing 1st rx packet intermittently */
8252 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8253 tg3_flag(tp, 5780_CLASS)) {
8254 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8255 udelay(10);
8256 tw32_f(MAC_RX_MODE, tp->rx_mode);
8257 }
8258
8259 mac_mode = tp->mac_mode &
8260 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8261 if (speed == SPEED_1000)
8262 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8263 else
8264 mac_mode |= MAC_MODE_PORT_MODE_MII;
8265
8266 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8267 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8268
8269 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8270 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8271 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8272 mac_mode |= MAC_MODE_LINK_POLARITY;
8273
8274 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8275 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8276 }
8277
8278 tw32(MAC_MODE, mac_mode);
8279 udelay(40);
8280
8281 return 0;
8282 }
8283
tg3_set_loopback(struct net_device * dev,netdev_features_t features)8284 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8285 {
8286 struct tg3 *tp = netdev_priv(dev);
8287
8288 if (features & NETIF_F_LOOPBACK) {
8289 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8290 return;
8291
8292 spin_lock_bh(&tp->lock);
8293 tg3_mac_loopback(tp, true);
8294 netif_carrier_on(tp->dev);
8295 spin_unlock_bh(&tp->lock);
8296 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8297 } else {
8298 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8299 return;
8300
8301 spin_lock_bh(&tp->lock);
8302 tg3_mac_loopback(tp, false);
8303 /* Force link status check */
8304 tg3_setup_phy(tp, true);
8305 spin_unlock_bh(&tp->lock);
8306 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8307 }
8308 }
8309
tg3_fix_features(struct net_device * dev,netdev_features_t features)8310 static netdev_features_t tg3_fix_features(struct net_device *dev,
8311 netdev_features_t features)
8312 {
8313 struct tg3 *tp = netdev_priv(dev);
8314
8315 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8316 features &= ~NETIF_F_ALL_TSO;
8317
8318 return features;
8319 }
8320
tg3_set_features(struct net_device * dev,netdev_features_t features)8321 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8322 {
8323 netdev_features_t changed = dev->features ^ features;
8324
8325 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8326 tg3_set_loopback(dev, features);
8327
8328 return 0;
8329 }
8330
tg3_rx_prodring_free(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8331 static void tg3_rx_prodring_free(struct tg3 *tp,
8332 struct tg3_rx_prodring_set *tpr)
8333 {
8334 int i;
8335
8336 if (tpr != &tp->napi[0].prodring) {
8337 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8338 i = (i + 1) & tp->rx_std_ring_mask)
8339 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8340 tp->rx_pkt_map_sz);
8341
8342 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8343 for (i = tpr->rx_jmb_cons_idx;
8344 i != tpr->rx_jmb_prod_idx;
8345 i = (i + 1) & tp->rx_jmb_ring_mask) {
8346 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8347 TG3_RX_JMB_MAP_SZ);
8348 }
8349 }
8350
8351 return;
8352 }
8353
8354 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8355 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8356 tp->rx_pkt_map_sz);
8357
8358 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8359 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8360 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8361 TG3_RX_JMB_MAP_SZ);
8362 }
8363 }
8364
8365 /* Initialize rx rings for packet processing.
8366 *
8367 * The chip has been shut down and the driver detached from
8368 * the networking, so no interrupts or new tx packets will
8369 * end up in the driver. tp->{tx,}lock are held and thus
8370 * we may not sleep.
8371 */
tg3_rx_prodring_alloc(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8372 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8373 struct tg3_rx_prodring_set *tpr)
8374 {
8375 u32 i, rx_pkt_dma_sz;
8376
8377 tpr->rx_std_cons_idx = 0;
8378 tpr->rx_std_prod_idx = 0;
8379 tpr->rx_jmb_cons_idx = 0;
8380 tpr->rx_jmb_prod_idx = 0;
8381
8382 if (tpr != &tp->napi[0].prodring) {
8383 memset(&tpr->rx_std_buffers[0], 0,
8384 TG3_RX_STD_BUFF_RING_SIZE(tp));
8385 if (tpr->rx_jmb_buffers)
8386 memset(&tpr->rx_jmb_buffers[0], 0,
8387 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8388 goto done;
8389 }
8390
8391 /* Zero out all descriptors. */
8392 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8393
8394 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8395 if (tg3_flag(tp, 5780_CLASS) &&
8396 tp->dev->mtu > ETH_DATA_LEN)
8397 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8398 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8399
8400 /* Initialize invariants of the rings, we only set this
8401 * stuff once. This works because the card does not
8402 * write into the rx buffer posting rings.
8403 */
8404 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8405 struct tg3_rx_buffer_desc *rxd;
8406
8407 rxd = &tpr->rx_std[i];
8408 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8409 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8410 rxd->opaque = (RXD_OPAQUE_RING_STD |
8411 (i << RXD_OPAQUE_INDEX_SHIFT));
8412 }
8413
8414 /* Now allocate fresh SKBs for each rx ring. */
8415 for (i = 0; i < tp->rx_pending; i++) {
8416 unsigned int frag_size;
8417
8418 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8419 &frag_size) < 0) {
8420 netdev_warn(tp->dev,
8421 "Using a smaller RX standard ring. Only "
8422 "%d out of %d buffers were allocated "
8423 "successfully\n", i, tp->rx_pending);
8424 if (i == 0)
8425 goto initfail;
8426 tp->rx_pending = i;
8427 break;
8428 }
8429 }
8430
8431 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8432 goto done;
8433
8434 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8435
8436 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8437 goto done;
8438
8439 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8440 struct tg3_rx_buffer_desc *rxd;
8441
8442 rxd = &tpr->rx_jmb[i].std;
8443 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8444 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8445 RXD_FLAG_JUMBO;
8446 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8447 (i << RXD_OPAQUE_INDEX_SHIFT));
8448 }
8449
8450 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8451 unsigned int frag_size;
8452
8453 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8454 &frag_size) < 0) {
8455 netdev_warn(tp->dev,
8456 "Using a smaller RX jumbo ring. Only %d "
8457 "out of %d buffers were allocated "
8458 "successfully\n", i, tp->rx_jumbo_pending);
8459 if (i == 0)
8460 goto initfail;
8461 tp->rx_jumbo_pending = i;
8462 break;
8463 }
8464 }
8465
8466 done:
8467 return 0;
8468
8469 initfail:
8470 tg3_rx_prodring_free(tp, tpr);
8471 return -ENOMEM;
8472 }
8473
tg3_rx_prodring_fini(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8474 static void tg3_rx_prodring_fini(struct tg3 *tp,
8475 struct tg3_rx_prodring_set *tpr)
8476 {
8477 kfree(tpr->rx_std_buffers);
8478 tpr->rx_std_buffers = NULL;
8479 kfree(tpr->rx_jmb_buffers);
8480 tpr->rx_jmb_buffers = NULL;
8481 if (tpr->rx_std) {
8482 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8483 tpr->rx_std, tpr->rx_std_mapping);
8484 tpr->rx_std = NULL;
8485 }
8486 if (tpr->rx_jmb) {
8487 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8488 tpr->rx_jmb, tpr->rx_jmb_mapping);
8489 tpr->rx_jmb = NULL;
8490 }
8491 }
8492
tg3_rx_prodring_init(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8493 static int tg3_rx_prodring_init(struct tg3 *tp,
8494 struct tg3_rx_prodring_set *tpr)
8495 {
8496 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8497 GFP_KERNEL);
8498 if (!tpr->rx_std_buffers)
8499 return -ENOMEM;
8500
8501 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8502 TG3_RX_STD_RING_BYTES(tp),
8503 &tpr->rx_std_mapping,
8504 GFP_KERNEL);
8505 if (!tpr->rx_std)
8506 goto err_out;
8507
8508 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8509 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8510 GFP_KERNEL);
8511 if (!tpr->rx_jmb_buffers)
8512 goto err_out;
8513
8514 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8515 TG3_RX_JMB_RING_BYTES(tp),
8516 &tpr->rx_jmb_mapping,
8517 GFP_KERNEL);
8518 if (!tpr->rx_jmb)
8519 goto err_out;
8520 }
8521
8522 return 0;
8523
8524 err_out:
8525 tg3_rx_prodring_fini(tp, tpr);
8526 return -ENOMEM;
8527 }
8528
8529 /* Free up pending packets in all rx/tx rings.
8530 *
8531 * The chip has been shut down and the driver detached from
8532 * the networking, so no interrupts or new tx packets will
8533 * end up in the driver. tp->{tx,}lock is not held and we are not
8534 * in an interrupt context and thus may sleep.
8535 */
tg3_free_rings(struct tg3 * tp)8536 static void tg3_free_rings(struct tg3 *tp)
8537 {
8538 int i, j;
8539
8540 for (j = 0; j < tp->irq_cnt; j++) {
8541 struct tg3_napi *tnapi = &tp->napi[j];
8542
8543 tg3_rx_prodring_free(tp, &tnapi->prodring);
8544
8545 if (!tnapi->tx_buffers)
8546 continue;
8547
8548 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8549 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8550
8551 if (!skb)
8552 continue;
8553
8554 tg3_tx_skb_unmap(tnapi, i,
8555 skb_shinfo(skb)->nr_frags - 1);
8556
8557 dev_consume_skb_any(skb);
8558 }
8559 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8560 }
8561 }
8562
8563 /* Initialize tx/rx rings for packet processing.
8564 *
8565 * The chip has been shut down and the driver detached from
8566 * the networking, so no interrupts or new tx packets will
8567 * end up in the driver. tp->{tx,}lock are held and thus
8568 * we may not sleep.
8569 */
tg3_init_rings(struct tg3 * tp)8570 static int tg3_init_rings(struct tg3 *tp)
8571 {
8572 int i;
8573
8574 /* Free up all the SKBs. */
8575 tg3_free_rings(tp);
8576
8577 for (i = 0; i < tp->irq_cnt; i++) {
8578 struct tg3_napi *tnapi = &tp->napi[i];
8579
8580 tnapi->last_tag = 0;
8581 tnapi->last_irq_tag = 0;
8582 tnapi->hw_status->status = 0;
8583 tnapi->hw_status->status_tag = 0;
8584 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8585
8586 tnapi->tx_prod = 0;
8587 tnapi->tx_cons = 0;
8588 if (tnapi->tx_ring)
8589 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8590
8591 tnapi->rx_rcb_ptr = 0;
8592 if (tnapi->rx_rcb)
8593 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8594
8595 if (tnapi->prodring.rx_std &&
8596 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8597 tg3_free_rings(tp);
8598 return -ENOMEM;
8599 }
8600 }
8601
8602 return 0;
8603 }
8604
tg3_mem_tx_release(struct tg3 * tp)8605 static void tg3_mem_tx_release(struct tg3 *tp)
8606 {
8607 int i;
8608
8609 for (i = 0; i < tp->irq_max; i++) {
8610 struct tg3_napi *tnapi = &tp->napi[i];
8611
8612 if (tnapi->tx_ring) {
8613 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8614 tnapi->tx_ring, tnapi->tx_desc_mapping);
8615 tnapi->tx_ring = NULL;
8616 }
8617
8618 kfree(tnapi->tx_buffers);
8619 tnapi->tx_buffers = NULL;
8620 }
8621 }
8622
tg3_mem_tx_acquire(struct tg3 * tp)8623 static int tg3_mem_tx_acquire(struct tg3 *tp)
8624 {
8625 int i;
8626 struct tg3_napi *tnapi = &tp->napi[0];
8627
8628 /* If multivector TSS is enabled, vector 0 does not handle
8629 * tx interrupts. Don't allocate any resources for it.
8630 */
8631 if (tg3_flag(tp, ENABLE_TSS))
8632 tnapi++;
8633
8634 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8635 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8636 sizeof(struct tg3_tx_ring_info),
8637 GFP_KERNEL);
8638 if (!tnapi->tx_buffers)
8639 goto err_out;
8640
8641 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8642 TG3_TX_RING_BYTES,
8643 &tnapi->tx_desc_mapping,
8644 GFP_KERNEL);
8645 if (!tnapi->tx_ring)
8646 goto err_out;
8647 }
8648
8649 return 0;
8650
8651 err_out:
8652 tg3_mem_tx_release(tp);
8653 return -ENOMEM;
8654 }
8655
tg3_mem_rx_release(struct tg3 * tp)8656 static void tg3_mem_rx_release(struct tg3 *tp)
8657 {
8658 int i;
8659
8660 for (i = 0; i < tp->irq_max; i++) {
8661 struct tg3_napi *tnapi = &tp->napi[i];
8662
8663 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8664
8665 if (!tnapi->rx_rcb)
8666 continue;
8667
8668 dma_free_coherent(&tp->pdev->dev,
8669 TG3_RX_RCB_RING_BYTES(tp),
8670 tnapi->rx_rcb,
8671 tnapi->rx_rcb_mapping);
8672 tnapi->rx_rcb = NULL;
8673 }
8674 }
8675
tg3_mem_rx_acquire(struct tg3 * tp)8676 static int tg3_mem_rx_acquire(struct tg3 *tp)
8677 {
8678 unsigned int i, limit;
8679
8680 limit = tp->rxq_cnt;
8681
8682 /* If RSS is enabled, we need a (dummy) producer ring
8683 * set on vector zero. This is the true hw prodring.
8684 */
8685 if (tg3_flag(tp, ENABLE_RSS))
8686 limit++;
8687
8688 for (i = 0; i < limit; i++) {
8689 struct tg3_napi *tnapi = &tp->napi[i];
8690
8691 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8692 goto err_out;
8693
8694 /* If multivector RSS is enabled, vector 0
8695 * does not handle rx or tx interrupts.
8696 * Don't allocate any resources for it.
8697 */
8698 if (!i && tg3_flag(tp, ENABLE_RSS))
8699 continue;
8700
8701 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8702 TG3_RX_RCB_RING_BYTES(tp),
8703 &tnapi->rx_rcb_mapping,
8704 GFP_KERNEL);
8705 if (!tnapi->rx_rcb)
8706 goto err_out;
8707 }
8708
8709 return 0;
8710
8711 err_out:
8712 tg3_mem_rx_release(tp);
8713 return -ENOMEM;
8714 }
8715
8716 /*
8717 * Must not be invoked with interrupt sources disabled and
8718 * the hardware shutdown down.
8719 */
tg3_free_consistent(struct tg3 * tp)8720 static void tg3_free_consistent(struct tg3 *tp)
8721 {
8722 int i;
8723
8724 for (i = 0; i < tp->irq_cnt; i++) {
8725 struct tg3_napi *tnapi = &tp->napi[i];
8726
8727 if (tnapi->hw_status) {
8728 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8729 tnapi->hw_status,
8730 tnapi->status_mapping);
8731 tnapi->hw_status = NULL;
8732 }
8733 }
8734
8735 tg3_mem_rx_release(tp);
8736 tg3_mem_tx_release(tp);
8737
8738 /* tp->hw_stats can be referenced safely:
8739 * 1. under rtnl_lock
8740 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8741 */
8742 if (tp->hw_stats) {
8743 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8744 tp->hw_stats, tp->stats_mapping);
8745 tp->hw_stats = NULL;
8746 }
8747 }
8748
8749 /*
8750 * Must not be invoked with interrupt sources disabled and
8751 * the hardware shutdown down. Can sleep.
8752 */
tg3_alloc_consistent(struct tg3 * tp)8753 static int tg3_alloc_consistent(struct tg3 *tp)
8754 {
8755 int i;
8756
8757 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8758 sizeof(struct tg3_hw_stats),
8759 &tp->stats_mapping, GFP_KERNEL);
8760 if (!tp->hw_stats)
8761 goto err_out;
8762
8763 for (i = 0; i < tp->irq_cnt; i++) {
8764 struct tg3_napi *tnapi = &tp->napi[i];
8765 struct tg3_hw_status *sblk;
8766
8767 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8768 TG3_HW_STATUS_SIZE,
8769 &tnapi->status_mapping,
8770 GFP_KERNEL);
8771 if (!tnapi->hw_status)
8772 goto err_out;
8773
8774 sblk = tnapi->hw_status;
8775
8776 if (tg3_flag(tp, ENABLE_RSS)) {
8777 u16 *prodptr = NULL;
8778
8779 /*
8780 * When RSS is enabled, the status block format changes
8781 * slightly. The "rx_jumbo_consumer", "reserved",
8782 * and "rx_mini_consumer" members get mapped to the
8783 * other three rx return ring producer indexes.
8784 */
8785 switch (i) {
8786 case 1:
8787 prodptr = &sblk->idx[0].rx_producer;
8788 break;
8789 case 2:
8790 prodptr = &sblk->rx_jumbo_consumer;
8791 break;
8792 case 3:
8793 prodptr = &sblk->reserved;
8794 break;
8795 case 4:
8796 prodptr = &sblk->rx_mini_consumer;
8797 break;
8798 }
8799 tnapi->rx_rcb_prod_idx = prodptr;
8800 } else {
8801 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8802 }
8803 }
8804
8805 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8806 goto err_out;
8807
8808 return 0;
8809
8810 err_out:
8811 tg3_free_consistent(tp);
8812 return -ENOMEM;
8813 }
8814
8815 #define MAX_WAIT_CNT 1000
8816
8817 /* To stop a block, clear the enable bit and poll till it
8818 * clears. tp->lock is held.
8819 */
tg3_stop_block(struct tg3 * tp,unsigned long ofs,u32 enable_bit,bool silent)8820 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8821 {
8822 unsigned int i;
8823 u32 val;
8824
8825 if (tg3_flag(tp, 5705_PLUS)) {
8826 switch (ofs) {
8827 case RCVLSC_MODE:
8828 case DMAC_MODE:
8829 case MBFREE_MODE:
8830 case BUFMGR_MODE:
8831 case MEMARB_MODE:
8832 /* We can't enable/disable these bits of the
8833 * 5705/5750, just say success.
8834 */
8835 return 0;
8836
8837 default:
8838 break;
8839 }
8840 }
8841
8842 val = tr32(ofs);
8843 val &= ~enable_bit;
8844 tw32_f(ofs, val);
8845
8846 for (i = 0; i < MAX_WAIT_CNT; i++) {
8847 if (pci_channel_offline(tp->pdev)) {
8848 dev_err(&tp->pdev->dev,
8849 "tg3_stop_block device offline, "
8850 "ofs=%lx enable_bit=%x\n",
8851 ofs, enable_bit);
8852 return -ENODEV;
8853 }
8854
8855 udelay(100);
8856 val = tr32(ofs);
8857 if ((val & enable_bit) == 0)
8858 break;
8859 }
8860
8861 if (i == MAX_WAIT_CNT && !silent) {
8862 dev_err(&tp->pdev->dev,
8863 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8864 ofs, enable_bit);
8865 return -ENODEV;
8866 }
8867
8868 return 0;
8869 }
8870
8871 /* tp->lock is held. */
tg3_abort_hw(struct tg3 * tp,bool silent)8872 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8873 {
8874 int i, err;
8875
8876 tg3_disable_ints(tp);
8877
8878 if (pci_channel_offline(tp->pdev)) {
8879 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8880 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8881 err = -ENODEV;
8882 goto err_no_dev;
8883 }
8884
8885 tp->rx_mode &= ~RX_MODE_ENABLE;
8886 tw32_f(MAC_RX_MODE, tp->rx_mode);
8887 udelay(10);
8888
8889 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8890 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8891 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8892 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8893 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8894 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8895
8896 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8897 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8898 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8899 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8900 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8901 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8902 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8903
8904 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8905 tw32_f(MAC_MODE, tp->mac_mode);
8906 udelay(40);
8907
8908 tp->tx_mode &= ~TX_MODE_ENABLE;
8909 tw32_f(MAC_TX_MODE, tp->tx_mode);
8910
8911 for (i = 0; i < MAX_WAIT_CNT; i++) {
8912 udelay(100);
8913 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8914 break;
8915 }
8916 if (i >= MAX_WAIT_CNT) {
8917 dev_err(&tp->pdev->dev,
8918 "%s timed out, TX_MODE_ENABLE will not clear "
8919 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8920 err |= -ENODEV;
8921 }
8922
8923 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8924 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8925 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8926
8927 tw32(FTQ_RESET, 0xffffffff);
8928 tw32(FTQ_RESET, 0x00000000);
8929
8930 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8931 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8932
8933 err_no_dev:
8934 for (i = 0; i < tp->irq_cnt; i++) {
8935 struct tg3_napi *tnapi = &tp->napi[i];
8936 if (tnapi->hw_status)
8937 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8938 }
8939
8940 return err;
8941 }
8942
8943 /* Save PCI command register before chip reset */
tg3_save_pci_state(struct tg3 * tp)8944 static void tg3_save_pci_state(struct tg3 *tp)
8945 {
8946 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8947 }
8948
8949 /* Restore PCI state after chip reset */
tg3_restore_pci_state(struct tg3 * tp)8950 static void tg3_restore_pci_state(struct tg3 *tp)
8951 {
8952 u32 val;
8953
8954 /* Re-enable indirect register accesses. */
8955 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8956 tp->misc_host_ctrl);
8957
8958 /* Set MAX PCI retry to zero. */
8959 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8960 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8961 tg3_flag(tp, PCIX_MODE))
8962 val |= PCISTATE_RETRY_SAME_DMA;
8963 /* Allow reads and writes to the APE register and memory space. */
8964 if (tg3_flag(tp, ENABLE_APE))
8965 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8966 PCISTATE_ALLOW_APE_SHMEM_WR |
8967 PCISTATE_ALLOW_APE_PSPACE_WR;
8968 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8969
8970 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8971
8972 if (!tg3_flag(tp, PCI_EXPRESS)) {
8973 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8974 tp->pci_cacheline_sz);
8975 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8976 tp->pci_lat_timer);
8977 }
8978
8979 /* Make sure PCI-X relaxed ordering bit is clear. */
8980 if (tg3_flag(tp, PCIX_MODE)) {
8981 u16 pcix_cmd;
8982
8983 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8984 &pcix_cmd);
8985 pcix_cmd &= ~PCI_X_CMD_ERO;
8986 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8987 pcix_cmd);
8988 }
8989
8990 if (tg3_flag(tp, 5780_CLASS)) {
8991
8992 /* Chip reset on 5780 will reset MSI enable bit,
8993 * so need to restore it.
8994 */
8995 if (tg3_flag(tp, USING_MSI)) {
8996 u16 ctrl;
8997
8998 pci_read_config_word(tp->pdev,
8999 tp->msi_cap + PCI_MSI_FLAGS,
9000 &ctrl);
9001 pci_write_config_word(tp->pdev,
9002 tp->msi_cap + PCI_MSI_FLAGS,
9003 ctrl | PCI_MSI_FLAGS_ENABLE);
9004 val = tr32(MSGINT_MODE);
9005 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9006 }
9007 }
9008 }
9009
tg3_override_clk(struct tg3 * tp)9010 static void tg3_override_clk(struct tg3 *tp)
9011 {
9012 u32 val;
9013
9014 switch (tg3_asic_rev(tp)) {
9015 case ASIC_REV_5717:
9016 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9017 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9018 TG3_CPMU_MAC_ORIDE_ENABLE);
9019 break;
9020
9021 case ASIC_REV_5719:
9022 case ASIC_REV_5720:
9023 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9024 break;
9025
9026 default:
9027 return;
9028 }
9029 }
9030
tg3_restore_clk(struct tg3 * tp)9031 static void tg3_restore_clk(struct tg3 *tp)
9032 {
9033 u32 val;
9034
9035 switch (tg3_asic_rev(tp)) {
9036 case ASIC_REV_5717:
9037 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9038 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9039 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9040 break;
9041
9042 case ASIC_REV_5719:
9043 case ASIC_REV_5720:
9044 val = tr32(TG3_CPMU_CLCK_ORIDE);
9045 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9046 break;
9047
9048 default:
9049 return;
9050 }
9051 }
9052
9053 /* tp->lock is held. */
tg3_chip_reset(struct tg3 * tp)9054 static int tg3_chip_reset(struct tg3 *tp)
9055 __releases(tp->lock)
9056 __acquires(tp->lock)
9057 {
9058 u32 val;
9059 void (*write_op)(struct tg3 *, u32, u32);
9060 int i, err;
9061
9062 if (!pci_device_is_present(tp->pdev))
9063 return -ENODEV;
9064
9065 tg3_nvram_lock(tp);
9066
9067 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9068
9069 /* No matching tg3_nvram_unlock() after this because
9070 * chip reset below will undo the nvram lock.
9071 */
9072 tp->nvram_lock_cnt = 0;
9073
9074 /* GRC_MISC_CFG core clock reset will clear the memory
9075 * enable bit in PCI register 4 and the MSI enable bit
9076 * on some chips, so we save relevant registers here.
9077 */
9078 tg3_save_pci_state(tp);
9079
9080 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9081 tg3_flag(tp, 5755_PLUS))
9082 tw32(GRC_FASTBOOT_PC, 0);
9083
9084 /*
9085 * We must avoid the readl() that normally takes place.
9086 * It locks machines, causes machine checks, and other
9087 * fun things. So, temporarily disable the 5701
9088 * hardware workaround, while we do the reset.
9089 */
9090 write_op = tp->write32;
9091 if (write_op == tg3_write_flush_reg32)
9092 tp->write32 = tg3_write32;
9093
9094 /* Prevent the irq handler from reading or writing PCI registers
9095 * during chip reset when the memory enable bit in the PCI command
9096 * register may be cleared. The chip does not generate interrupt
9097 * at this time, but the irq handler may still be called due to irq
9098 * sharing or irqpoll.
9099 */
9100 tg3_flag_set(tp, CHIP_RESETTING);
9101 for (i = 0; i < tp->irq_cnt; i++) {
9102 struct tg3_napi *tnapi = &tp->napi[i];
9103 if (tnapi->hw_status) {
9104 tnapi->hw_status->status = 0;
9105 tnapi->hw_status->status_tag = 0;
9106 }
9107 tnapi->last_tag = 0;
9108 tnapi->last_irq_tag = 0;
9109 }
9110 smp_mb();
9111
9112 tg3_full_unlock(tp);
9113
9114 for (i = 0; i < tp->irq_cnt; i++)
9115 synchronize_irq(tp->napi[i].irq_vec);
9116
9117 tg3_full_lock(tp, 0);
9118
9119 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9120 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9121 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9122 }
9123
9124 /* do the reset */
9125 val = GRC_MISC_CFG_CORECLK_RESET;
9126
9127 if (tg3_flag(tp, PCI_EXPRESS)) {
9128 /* Force PCIe 1.0a mode */
9129 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9130 !tg3_flag(tp, 57765_PLUS) &&
9131 tr32(TG3_PCIE_PHY_TSTCTL) ==
9132 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9133 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9134
9135 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9136 tw32(GRC_MISC_CFG, (1 << 29));
9137 val |= (1 << 29);
9138 }
9139 }
9140
9141 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9142 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9143 tw32(GRC_VCPU_EXT_CTRL,
9144 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9145 }
9146
9147 /* Set the clock to the highest frequency to avoid timeouts. With link
9148 * aware mode, the clock speed could be slow and bootcode does not
9149 * complete within the expected time. Override the clock to allow the
9150 * bootcode to finish sooner and then restore it.
9151 */
9152 tg3_override_clk(tp);
9153
9154 /* Manage gphy power for all CPMU absent PCIe devices. */
9155 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9156 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9157
9158 tw32(GRC_MISC_CFG, val);
9159
9160 /* restore 5701 hardware bug workaround write method */
9161 tp->write32 = write_op;
9162
9163 /* Unfortunately, we have to delay before the PCI read back.
9164 * Some 575X chips even will not respond to a PCI cfg access
9165 * when the reset command is given to the chip.
9166 *
9167 * How do these hardware designers expect things to work
9168 * properly if the PCI write is posted for a long period
9169 * of time? It is always necessary to have some method by
9170 * which a register read back can occur to push the write
9171 * out which does the reset.
9172 *
9173 * For most tg3 variants the trick below was working.
9174 * Ho hum...
9175 */
9176 udelay(120);
9177
9178 /* Flush PCI posted writes. The normal MMIO registers
9179 * are inaccessible at this time so this is the only
9180 * way to make this reliably (actually, this is no longer
9181 * the case, see above). I tried to use indirect
9182 * register read/write but this upset some 5701 variants.
9183 */
9184 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9185
9186 udelay(120);
9187
9188 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9189 u16 val16;
9190
9191 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9192 int j;
9193 u32 cfg_val;
9194
9195 /* Wait for link training to complete. */
9196 for (j = 0; j < 5000; j++)
9197 udelay(100);
9198
9199 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9200 pci_write_config_dword(tp->pdev, 0xc4,
9201 cfg_val | (1 << 15));
9202 }
9203
9204 /* Clear the "no snoop" and "relaxed ordering" bits. */
9205 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9206 /*
9207 * Older PCIe devices only support the 128 byte
9208 * MPS setting. Enforce the restriction.
9209 */
9210 if (!tg3_flag(tp, CPMU_PRESENT))
9211 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9212 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9213
9214 /* Clear error status */
9215 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9216 PCI_EXP_DEVSTA_CED |
9217 PCI_EXP_DEVSTA_NFED |
9218 PCI_EXP_DEVSTA_FED |
9219 PCI_EXP_DEVSTA_URD);
9220 }
9221
9222 tg3_restore_pci_state(tp);
9223
9224 tg3_flag_clear(tp, CHIP_RESETTING);
9225 tg3_flag_clear(tp, ERROR_PROCESSED);
9226
9227 val = 0;
9228 if (tg3_flag(tp, 5780_CLASS))
9229 val = tr32(MEMARB_MODE);
9230 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9231
9232 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9233 tg3_stop_fw(tp);
9234 tw32(0x5000, 0x400);
9235 }
9236
9237 if (tg3_flag(tp, IS_SSB_CORE)) {
9238 /*
9239 * BCM4785: In order to avoid repercussions from using
9240 * potentially defective internal ROM, stop the Rx RISC CPU,
9241 * which is not required.
9242 */
9243 tg3_stop_fw(tp);
9244 tg3_halt_cpu(tp, RX_CPU_BASE);
9245 }
9246
9247 err = tg3_poll_fw(tp);
9248 if (err)
9249 return err;
9250
9251 tw32(GRC_MODE, tp->grc_mode);
9252
9253 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9254 val = tr32(0xc4);
9255
9256 tw32(0xc4, val | (1 << 15));
9257 }
9258
9259 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9260 tg3_asic_rev(tp) == ASIC_REV_5705) {
9261 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9262 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9263 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9264 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9265 }
9266
9267 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9268 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9269 val = tp->mac_mode;
9270 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9271 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9272 val = tp->mac_mode;
9273 } else
9274 val = 0;
9275
9276 tw32_f(MAC_MODE, val);
9277 udelay(40);
9278
9279 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9280
9281 tg3_mdio_start(tp);
9282
9283 if (tg3_flag(tp, PCI_EXPRESS) &&
9284 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9285 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9286 !tg3_flag(tp, 57765_PLUS)) {
9287 val = tr32(0x7c00);
9288
9289 tw32(0x7c00, val | (1 << 25));
9290 }
9291
9292 tg3_restore_clk(tp);
9293
9294 /* Increase the core clock speed to fix tx timeout issue for 5762
9295 * with 100Mbps link speed.
9296 */
9297 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9298 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9299 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9300 TG3_CPMU_MAC_ORIDE_ENABLE);
9301 }
9302
9303 /* Reprobe ASF enable state. */
9304 tg3_flag_clear(tp, ENABLE_ASF);
9305 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9306 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9307
9308 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9309 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9310 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9311 u32 nic_cfg;
9312
9313 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9314 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9315 tg3_flag_set(tp, ENABLE_ASF);
9316 tp->last_event_jiffies = jiffies;
9317 if (tg3_flag(tp, 5750_PLUS))
9318 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9319
9320 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9321 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9322 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9323 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9324 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9325 }
9326 }
9327
9328 return 0;
9329 }
9330
9331 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9332 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9333 static void __tg3_set_rx_mode(struct net_device *);
9334
9335 /* tp->lock is held. */
tg3_halt(struct tg3 * tp,int kind,bool silent)9336 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9337 {
9338 int err, i;
9339
9340 tg3_stop_fw(tp);
9341
9342 tg3_write_sig_pre_reset(tp, kind);
9343
9344 tg3_abort_hw(tp, silent);
9345 err = tg3_chip_reset(tp);
9346
9347 __tg3_set_mac_addr(tp, false);
9348
9349 tg3_write_sig_legacy(tp, kind);
9350 tg3_write_sig_post_reset(tp, kind);
9351
9352 if (tp->hw_stats) {
9353 /* Save the stats across chip resets... */
9354 tg3_get_nstats(tp, &tp->net_stats_prev);
9355 tg3_get_estats(tp, &tp->estats_prev);
9356
9357 /* And make sure the next sample is new data */
9358 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9359
9360 for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
9361 struct tg3_napi *tnapi = &tp->napi[i];
9362
9363 tnapi->rx_dropped = 0;
9364 tnapi->tx_dropped = 0;
9365 }
9366 }
9367
9368 return err;
9369 }
9370
tg3_set_mac_addr(struct net_device * dev,void * p)9371 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9372 {
9373 struct tg3 *tp = netdev_priv(dev);
9374 struct sockaddr *addr = p;
9375 int err = 0;
9376 bool skip_mac_1 = false;
9377
9378 if (!is_valid_ether_addr(addr->sa_data))
9379 return -EADDRNOTAVAIL;
9380
9381 eth_hw_addr_set(dev, addr->sa_data);
9382
9383 if (!netif_running(dev))
9384 return 0;
9385
9386 if (tg3_flag(tp, ENABLE_ASF)) {
9387 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9388
9389 addr0_high = tr32(MAC_ADDR_0_HIGH);
9390 addr0_low = tr32(MAC_ADDR_0_LOW);
9391 addr1_high = tr32(MAC_ADDR_1_HIGH);
9392 addr1_low = tr32(MAC_ADDR_1_LOW);
9393
9394 /* Skip MAC addr 1 if ASF is using it. */
9395 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9396 !(addr1_high == 0 && addr1_low == 0))
9397 skip_mac_1 = true;
9398 }
9399 spin_lock_bh(&tp->lock);
9400 __tg3_set_mac_addr(tp, skip_mac_1);
9401 __tg3_set_rx_mode(dev);
9402 spin_unlock_bh(&tp->lock);
9403
9404 return err;
9405 }
9406
9407 /* tp->lock is held. */
tg3_set_bdinfo(struct tg3 * tp,u32 bdinfo_addr,dma_addr_t mapping,u32 maxlen_flags,u32 nic_addr)9408 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9409 dma_addr_t mapping, u32 maxlen_flags,
9410 u32 nic_addr)
9411 {
9412 tg3_write_mem(tp,
9413 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9414 ((u64) mapping >> 32));
9415 tg3_write_mem(tp,
9416 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9417 ((u64) mapping & 0xffffffff));
9418 tg3_write_mem(tp,
9419 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9420 maxlen_flags);
9421
9422 if (!tg3_flag(tp, 5705_PLUS))
9423 tg3_write_mem(tp,
9424 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9425 nic_addr);
9426 }
9427
9428
tg3_coal_tx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9429 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9430 {
9431 int i = 0;
9432
9433 if (!tg3_flag(tp, ENABLE_TSS)) {
9434 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9435 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9436 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9437 } else {
9438 tw32(HOSTCC_TXCOL_TICKS, 0);
9439 tw32(HOSTCC_TXMAX_FRAMES, 0);
9440 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9441
9442 for (; i < tp->txq_cnt; i++) {
9443 u32 reg;
9444
9445 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9446 tw32(reg, ec->tx_coalesce_usecs);
9447 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9448 tw32(reg, ec->tx_max_coalesced_frames);
9449 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9450 tw32(reg, ec->tx_max_coalesced_frames_irq);
9451 }
9452 }
9453
9454 for (; i < tp->irq_max - 1; i++) {
9455 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9456 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9457 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9458 }
9459 }
9460
tg3_coal_rx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9461 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9462 {
9463 int i = 0;
9464 u32 limit = tp->rxq_cnt;
9465
9466 if (!tg3_flag(tp, ENABLE_RSS)) {
9467 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9468 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9469 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9470 limit--;
9471 } else {
9472 tw32(HOSTCC_RXCOL_TICKS, 0);
9473 tw32(HOSTCC_RXMAX_FRAMES, 0);
9474 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9475 }
9476
9477 for (; i < limit; i++) {
9478 u32 reg;
9479
9480 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9481 tw32(reg, ec->rx_coalesce_usecs);
9482 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9483 tw32(reg, ec->rx_max_coalesced_frames);
9484 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9485 tw32(reg, ec->rx_max_coalesced_frames_irq);
9486 }
9487
9488 for (; i < tp->irq_max - 1; i++) {
9489 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9490 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9491 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9492 }
9493 }
9494
__tg3_set_coalesce(struct tg3 * tp,struct ethtool_coalesce * ec)9495 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9496 {
9497 tg3_coal_tx_init(tp, ec);
9498 tg3_coal_rx_init(tp, ec);
9499
9500 if (!tg3_flag(tp, 5705_PLUS)) {
9501 u32 val = ec->stats_block_coalesce_usecs;
9502
9503 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9504 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9505
9506 if (!tp->link_up)
9507 val = 0;
9508
9509 tw32(HOSTCC_STAT_COAL_TICKS, val);
9510 }
9511 }
9512
9513 /* tp->lock is held. */
tg3_tx_rcbs_disable(struct tg3 * tp)9514 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9515 {
9516 u32 txrcb, limit;
9517
9518 /* Disable all transmit rings but the first. */
9519 if (!tg3_flag(tp, 5705_PLUS))
9520 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9521 else if (tg3_flag(tp, 5717_PLUS))
9522 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9523 else if (tg3_flag(tp, 57765_CLASS) ||
9524 tg3_asic_rev(tp) == ASIC_REV_5762)
9525 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9526 else
9527 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9528
9529 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9530 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9531 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9532 BDINFO_FLAGS_DISABLED);
9533 }
9534
9535 /* tp->lock is held. */
tg3_tx_rcbs_init(struct tg3 * tp)9536 static void tg3_tx_rcbs_init(struct tg3 *tp)
9537 {
9538 int i = 0;
9539 u32 txrcb = NIC_SRAM_SEND_RCB;
9540
9541 if (tg3_flag(tp, ENABLE_TSS))
9542 i++;
9543
9544 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9545 struct tg3_napi *tnapi = &tp->napi[i];
9546
9547 if (!tnapi->tx_ring)
9548 continue;
9549
9550 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9551 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9552 NIC_SRAM_TX_BUFFER_DESC);
9553 }
9554 }
9555
9556 /* tp->lock is held. */
tg3_rx_ret_rcbs_disable(struct tg3 * tp)9557 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9558 {
9559 u32 rxrcb, limit;
9560
9561 /* Disable all receive return rings but the first. */
9562 if (tg3_flag(tp, 5717_PLUS))
9563 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9564 else if (!tg3_flag(tp, 5705_PLUS))
9565 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9566 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9567 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9568 tg3_flag(tp, 57765_CLASS))
9569 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9570 else
9571 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9572
9573 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9574 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9575 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9576 BDINFO_FLAGS_DISABLED);
9577 }
9578
9579 /* tp->lock is held. */
tg3_rx_ret_rcbs_init(struct tg3 * tp)9580 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9581 {
9582 int i = 0;
9583 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9584
9585 if (tg3_flag(tp, ENABLE_RSS))
9586 i++;
9587
9588 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9589 struct tg3_napi *tnapi = &tp->napi[i];
9590
9591 if (!tnapi->rx_rcb)
9592 continue;
9593
9594 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9595 (tp->rx_ret_ring_mask + 1) <<
9596 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9597 }
9598 }
9599
9600 /* tp->lock is held. */
tg3_rings_reset(struct tg3 * tp)9601 static void tg3_rings_reset(struct tg3 *tp)
9602 {
9603 int i;
9604 u32 stblk;
9605 struct tg3_napi *tnapi = &tp->napi[0];
9606
9607 tg3_tx_rcbs_disable(tp);
9608
9609 tg3_rx_ret_rcbs_disable(tp);
9610
9611 /* Disable interrupts */
9612 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9613 tp->napi[0].chk_msi_cnt = 0;
9614 tp->napi[0].last_rx_cons = 0;
9615 tp->napi[0].last_tx_cons = 0;
9616
9617 /* Zero mailbox registers. */
9618 if (tg3_flag(tp, SUPPORT_MSIX)) {
9619 for (i = 1; i < tp->irq_max; i++) {
9620 tp->napi[i].tx_prod = 0;
9621 tp->napi[i].tx_cons = 0;
9622 if (tg3_flag(tp, ENABLE_TSS))
9623 tw32_mailbox(tp->napi[i].prodmbox, 0);
9624 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9625 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9626 tp->napi[i].chk_msi_cnt = 0;
9627 tp->napi[i].last_rx_cons = 0;
9628 tp->napi[i].last_tx_cons = 0;
9629 }
9630 if (!tg3_flag(tp, ENABLE_TSS))
9631 tw32_mailbox(tp->napi[0].prodmbox, 0);
9632 } else {
9633 tp->napi[0].tx_prod = 0;
9634 tp->napi[0].tx_cons = 0;
9635 tw32_mailbox(tp->napi[0].prodmbox, 0);
9636 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9637 }
9638
9639 /* Make sure the NIC-based send BD rings are disabled. */
9640 if (!tg3_flag(tp, 5705_PLUS)) {
9641 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9642 for (i = 0; i < 16; i++)
9643 tw32_tx_mbox(mbox + i * 8, 0);
9644 }
9645
9646 /* Clear status block in ram. */
9647 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9648
9649 /* Set status block DMA address */
9650 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9651 ((u64) tnapi->status_mapping >> 32));
9652 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9653 ((u64) tnapi->status_mapping & 0xffffffff));
9654
9655 stblk = HOSTCC_STATBLCK_RING1;
9656
9657 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9658 u64 mapping = (u64)tnapi->status_mapping;
9659 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9660 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9661 stblk += 8;
9662
9663 /* Clear status block in ram. */
9664 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9665 }
9666
9667 tg3_tx_rcbs_init(tp);
9668 tg3_rx_ret_rcbs_init(tp);
9669 }
9670
tg3_setup_rxbd_thresholds(struct tg3 * tp)9671 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9672 {
9673 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9674
9675 if (!tg3_flag(tp, 5750_PLUS) ||
9676 tg3_flag(tp, 5780_CLASS) ||
9677 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9678 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9679 tg3_flag(tp, 57765_PLUS))
9680 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9681 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9682 tg3_asic_rev(tp) == ASIC_REV_5787)
9683 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9684 else
9685 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9686
9687 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9688 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9689
9690 val = min(nic_rep_thresh, host_rep_thresh);
9691 tw32(RCVBDI_STD_THRESH, val);
9692
9693 if (tg3_flag(tp, 57765_PLUS))
9694 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9695
9696 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9697 return;
9698
9699 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9700
9701 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9702
9703 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9704 tw32(RCVBDI_JUMBO_THRESH, val);
9705
9706 if (tg3_flag(tp, 57765_PLUS))
9707 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9708 }
9709
calc_crc(unsigned char * buf,int len)9710 static inline u32 calc_crc(unsigned char *buf, int len)
9711 {
9712 u32 reg;
9713 u32 tmp;
9714 int j, k;
9715
9716 reg = 0xffffffff;
9717
9718 for (j = 0; j < len; j++) {
9719 reg ^= buf[j];
9720
9721 for (k = 0; k < 8; k++) {
9722 tmp = reg & 0x01;
9723
9724 reg >>= 1;
9725
9726 if (tmp)
9727 reg ^= CRC32_POLY_LE;
9728 }
9729 }
9730
9731 return ~reg;
9732 }
9733
tg3_set_multi(struct tg3 * tp,unsigned int accept_all)9734 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9735 {
9736 /* accept or reject all multicast frames */
9737 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9738 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9739 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9740 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9741 }
9742
__tg3_set_rx_mode(struct net_device * dev)9743 static void __tg3_set_rx_mode(struct net_device *dev)
9744 {
9745 struct tg3 *tp = netdev_priv(dev);
9746 u32 rx_mode;
9747
9748 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9749 RX_MODE_KEEP_VLAN_TAG);
9750
9751 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9752 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9753 * flag clear.
9754 */
9755 if (!tg3_flag(tp, ENABLE_ASF))
9756 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9757 #endif
9758
9759 if (dev->flags & IFF_PROMISC) {
9760 /* Promiscuous mode. */
9761 rx_mode |= RX_MODE_PROMISC;
9762 } else if (dev->flags & IFF_ALLMULTI) {
9763 /* Accept all multicast. */
9764 tg3_set_multi(tp, 1);
9765 } else if (netdev_mc_empty(dev)) {
9766 /* Reject all multicast. */
9767 tg3_set_multi(tp, 0);
9768 } else {
9769 /* Accept one or more multicast(s). */
9770 struct netdev_hw_addr *ha;
9771 u32 mc_filter[4] = { 0, };
9772 u32 regidx;
9773 u32 bit;
9774 u32 crc;
9775
9776 netdev_for_each_mc_addr(ha, dev) {
9777 crc = calc_crc(ha->addr, ETH_ALEN);
9778 bit = ~crc & 0x7f;
9779 regidx = (bit & 0x60) >> 5;
9780 bit &= 0x1f;
9781 mc_filter[regidx] |= (1 << bit);
9782 }
9783
9784 tw32(MAC_HASH_REG_0, mc_filter[0]);
9785 tw32(MAC_HASH_REG_1, mc_filter[1]);
9786 tw32(MAC_HASH_REG_2, mc_filter[2]);
9787 tw32(MAC_HASH_REG_3, mc_filter[3]);
9788 }
9789
9790 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9791 rx_mode |= RX_MODE_PROMISC;
9792 } else if (!(dev->flags & IFF_PROMISC)) {
9793 /* Add all entries into to the mac addr filter list */
9794 int i = 0;
9795 struct netdev_hw_addr *ha;
9796
9797 netdev_for_each_uc_addr(ha, dev) {
9798 __tg3_set_one_mac_addr(tp, ha->addr,
9799 i + TG3_UCAST_ADDR_IDX(tp));
9800 i++;
9801 }
9802 }
9803
9804 if (rx_mode != tp->rx_mode) {
9805 tp->rx_mode = rx_mode;
9806 tw32_f(MAC_RX_MODE, rx_mode);
9807 udelay(10);
9808 }
9809 }
9810
tg3_rss_init_dflt_indir_tbl(struct tg3 * tp,u32 qcnt)9811 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9812 {
9813 int i;
9814
9815 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9816 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9817 }
9818
tg3_rss_check_indir_tbl(struct tg3 * tp)9819 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9820 {
9821 int i;
9822
9823 if (!tg3_flag(tp, SUPPORT_MSIX))
9824 return;
9825
9826 if (tp->rxq_cnt == 1) {
9827 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9828 return;
9829 }
9830
9831 /* Validate table against current IRQ count */
9832 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9833 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9834 break;
9835 }
9836
9837 if (i != TG3_RSS_INDIR_TBL_SIZE)
9838 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9839 }
9840
tg3_rss_write_indir_tbl(struct tg3 * tp)9841 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9842 {
9843 int i = 0;
9844 u32 reg = MAC_RSS_INDIR_TBL_0;
9845
9846 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9847 u32 val = tp->rss_ind_tbl[i];
9848 i++;
9849 for (; i % 8; i++) {
9850 val <<= 4;
9851 val |= tp->rss_ind_tbl[i];
9852 }
9853 tw32(reg, val);
9854 reg += 4;
9855 }
9856 }
9857
tg3_lso_rd_dma_workaround_bit(struct tg3 * tp)9858 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9859 {
9860 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9861 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9862 else
9863 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9864 }
9865
9866 /* tp->lock is held. */
tg3_reset_hw(struct tg3 * tp,bool reset_phy)9867 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9868 {
9869 u32 val, rdmac_mode;
9870 int i, err, limit;
9871 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9872
9873 tg3_disable_ints(tp);
9874
9875 tg3_stop_fw(tp);
9876
9877 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9878
9879 if (tg3_flag(tp, INIT_COMPLETE))
9880 tg3_abort_hw(tp, 1);
9881
9882 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9883 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9884 tg3_phy_pull_config(tp);
9885 tg3_eee_pull_config(tp, NULL);
9886 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9887 }
9888
9889 /* Enable MAC control of LPI */
9890 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9891 tg3_setup_eee(tp);
9892
9893 if (reset_phy)
9894 tg3_phy_reset(tp);
9895
9896 err = tg3_chip_reset(tp);
9897 if (err)
9898 return err;
9899
9900 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9901
9902 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9903 val = tr32(TG3_CPMU_CTRL);
9904 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9905 tw32(TG3_CPMU_CTRL, val);
9906
9907 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9908 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9909 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9910 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9911
9912 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9913 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9914 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9915 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9916
9917 val = tr32(TG3_CPMU_HST_ACC);
9918 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9919 val |= CPMU_HST_ACC_MACCLK_6_25;
9920 tw32(TG3_CPMU_HST_ACC, val);
9921 }
9922
9923 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9924 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9925 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9926 PCIE_PWR_MGMT_L1_THRESH_4MS;
9927 tw32(PCIE_PWR_MGMT_THRESH, val);
9928
9929 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9930 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9931
9932 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9933
9934 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9935 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9936 }
9937
9938 if (tg3_flag(tp, L1PLLPD_EN)) {
9939 u32 grc_mode = tr32(GRC_MODE);
9940
9941 /* Access the lower 1K of PL PCIE block registers. */
9942 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9943 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9944
9945 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9946 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9947 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9948
9949 tw32(GRC_MODE, grc_mode);
9950 }
9951
9952 if (tg3_flag(tp, 57765_CLASS)) {
9953 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9954 u32 grc_mode = tr32(GRC_MODE);
9955
9956 /* Access the lower 1K of PL PCIE block registers. */
9957 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9958 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9959
9960 val = tr32(TG3_PCIE_TLDLPL_PORT +
9961 TG3_PCIE_PL_LO_PHYCTL5);
9962 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9963 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9964
9965 tw32(GRC_MODE, grc_mode);
9966 }
9967
9968 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9969 u32 grc_mode;
9970
9971 /* Fix transmit hangs */
9972 val = tr32(TG3_CPMU_PADRNG_CTL);
9973 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9974 tw32(TG3_CPMU_PADRNG_CTL, val);
9975
9976 grc_mode = tr32(GRC_MODE);
9977
9978 /* Access the lower 1K of DL PCIE block registers. */
9979 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9980 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9981
9982 val = tr32(TG3_PCIE_TLDLPL_PORT +
9983 TG3_PCIE_DL_LO_FTSMAX);
9984 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9985 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9986 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9987
9988 tw32(GRC_MODE, grc_mode);
9989 }
9990
9991 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9992 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9993 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9994 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9995 }
9996
9997 /* This works around an issue with Athlon chipsets on
9998 * B3 tigon3 silicon. This bit has no effect on any
9999 * other revision. But do not set this on PCI Express
10000 * chips and don't even touch the clocks if the CPMU is present.
10001 */
10002 if (!tg3_flag(tp, CPMU_PRESENT)) {
10003 if (!tg3_flag(tp, PCI_EXPRESS))
10004 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10005 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10006 }
10007
10008 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10009 tg3_flag(tp, PCIX_MODE)) {
10010 val = tr32(TG3PCI_PCISTATE);
10011 val |= PCISTATE_RETRY_SAME_DMA;
10012 tw32(TG3PCI_PCISTATE, val);
10013 }
10014
10015 if (tg3_flag(tp, ENABLE_APE)) {
10016 /* Allow reads and writes to the
10017 * APE register and memory space.
10018 */
10019 val = tr32(TG3PCI_PCISTATE);
10020 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10021 PCISTATE_ALLOW_APE_SHMEM_WR |
10022 PCISTATE_ALLOW_APE_PSPACE_WR;
10023 tw32(TG3PCI_PCISTATE, val);
10024 }
10025
10026 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10027 /* Enable some hw fixes. */
10028 val = tr32(TG3PCI_MSI_DATA);
10029 val |= (1 << 26) | (1 << 28) | (1 << 29);
10030 tw32(TG3PCI_MSI_DATA, val);
10031 }
10032
10033 /* Descriptor ring init may make accesses to the
10034 * NIC SRAM area to setup the TX descriptors, so we
10035 * can only do this after the hardware has been
10036 * successfully reset.
10037 */
10038 err = tg3_init_rings(tp);
10039 if (err)
10040 return err;
10041
10042 if (tg3_flag(tp, 57765_PLUS)) {
10043 val = tr32(TG3PCI_DMA_RW_CTRL) &
10044 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10045 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10046 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10047 if (!tg3_flag(tp, 57765_CLASS) &&
10048 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10049 tg3_asic_rev(tp) != ASIC_REV_5762)
10050 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10051 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10052 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10053 tg3_asic_rev(tp) != ASIC_REV_5761) {
10054 /* This value is determined during the probe time DMA
10055 * engine test, tg3_test_dma.
10056 */
10057 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10058 }
10059
10060 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10061 GRC_MODE_4X_NIC_SEND_RINGS |
10062 GRC_MODE_NO_TX_PHDR_CSUM |
10063 GRC_MODE_NO_RX_PHDR_CSUM);
10064 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10065
10066 /* Pseudo-header checksum is done by hardware logic and not
10067 * the offload processers, so make the chip do the pseudo-
10068 * header checksums on receive. For transmit it is more
10069 * convenient to do the pseudo-header checksum in software
10070 * as Linux does that on transmit for us in all cases.
10071 */
10072 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10073
10074 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10075 if (tp->rxptpctl)
10076 tw32(TG3_RX_PTP_CTL,
10077 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10078
10079 if (tg3_flag(tp, PTP_CAPABLE))
10080 val |= GRC_MODE_TIME_SYNC_ENABLE;
10081
10082 tw32(GRC_MODE, tp->grc_mode | val);
10083
10084 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10085 * south bridge limitation. As a workaround, Driver is setting MRRS
10086 * to 2048 instead of default 4096.
10087 */
10088 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10089 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10090 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10091 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10092 }
10093
10094 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10095 val = tr32(GRC_MISC_CFG);
10096 val &= ~0xff;
10097 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10098 tw32(GRC_MISC_CFG, val);
10099
10100 /* Initialize MBUF/DESC pool. */
10101 if (tg3_flag(tp, 5750_PLUS)) {
10102 /* Do nothing. */
10103 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10104 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10105 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10106 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10107 else
10108 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10109 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10110 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10111 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10112 int fw_len;
10113
10114 fw_len = tp->fw_len;
10115 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10116 tw32(BUFMGR_MB_POOL_ADDR,
10117 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10118 tw32(BUFMGR_MB_POOL_SIZE,
10119 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10120 }
10121
10122 if (tp->dev->mtu <= ETH_DATA_LEN) {
10123 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10124 tp->bufmgr_config.mbuf_read_dma_low_water);
10125 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10126 tp->bufmgr_config.mbuf_mac_rx_low_water);
10127 tw32(BUFMGR_MB_HIGH_WATER,
10128 tp->bufmgr_config.mbuf_high_water);
10129 } else {
10130 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10131 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10132 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10133 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10134 tw32(BUFMGR_MB_HIGH_WATER,
10135 tp->bufmgr_config.mbuf_high_water_jumbo);
10136 }
10137 tw32(BUFMGR_DMA_LOW_WATER,
10138 tp->bufmgr_config.dma_low_water);
10139 tw32(BUFMGR_DMA_HIGH_WATER,
10140 tp->bufmgr_config.dma_high_water);
10141
10142 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10143 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10144 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10145 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10146 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10147 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10148 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10149 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10150 tw32(BUFMGR_MODE, val);
10151 for (i = 0; i < 2000; i++) {
10152 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10153 break;
10154 udelay(10);
10155 }
10156 if (i >= 2000) {
10157 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10158 return -ENODEV;
10159 }
10160
10161 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10162 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10163
10164 tg3_setup_rxbd_thresholds(tp);
10165
10166 /* Initialize TG3_BDINFO's at:
10167 * RCVDBDI_STD_BD: standard eth size rx ring
10168 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10169 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10170 *
10171 * like so:
10172 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10173 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10174 * ring attribute flags
10175 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10176 *
10177 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10178 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10179 *
10180 * The size of each ring is fixed in the firmware, but the location is
10181 * configurable.
10182 */
10183 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10184 ((u64) tpr->rx_std_mapping >> 32));
10185 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10186 ((u64) tpr->rx_std_mapping & 0xffffffff));
10187 if (!tg3_flag(tp, 5717_PLUS))
10188 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10189 NIC_SRAM_RX_BUFFER_DESC);
10190
10191 /* Disable the mini ring */
10192 if (!tg3_flag(tp, 5705_PLUS))
10193 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10194 BDINFO_FLAGS_DISABLED);
10195
10196 /* Program the jumbo buffer descriptor ring control
10197 * blocks on those devices that have them.
10198 */
10199 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10200 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10201
10202 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10203 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10204 ((u64) tpr->rx_jmb_mapping >> 32));
10205 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10206 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10207 val = TG3_RX_JMB_RING_SIZE(tp) <<
10208 BDINFO_FLAGS_MAXLEN_SHIFT;
10209 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10210 val | BDINFO_FLAGS_USE_EXT_RECV);
10211 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10212 tg3_flag(tp, 57765_CLASS) ||
10213 tg3_asic_rev(tp) == ASIC_REV_5762)
10214 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10215 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10216 } else {
10217 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10218 BDINFO_FLAGS_DISABLED);
10219 }
10220
10221 if (tg3_flag(tp, 57765_PLUS)) {
10222 val = TG3_RX_STD_RING_SIZE(tp);
10223 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10224 val |= (TG3_RX_STD_DMA_SZ << 2);
10225 } else
10226 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10227 } else
10228 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10229
10230 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10231
10232 tpr->rx_std_prod_idx = tp->rx_pending;
10233 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10234
10235 tpr->rx_jmb_prod_idx =
10236 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10237 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10238
10239 tg3_rings_reset(tp);
10240
10241 /* Initialize MAC address and backoff seed. */
10242 __tg3_set_mac_addr(tp, false);
10243
10244 /* MTU + ethernet header + FCS + optional VLAN tag */
10245 tw32(MAC_RX_MTU_SIZE,
10246 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10247
10248 /* The slot time is changed by tg3_setup_phy if we
10249 * run at gigabit with half duplex.
10250 */
10251 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10252 (6 << TX_LENGTHS_IPG_SHIFT) |
10253 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10254
10255 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10256 tg3_asic_rev(tp) == ASIC_REV_5762)
10257 val |= tr32(MAC_TX_LENGTHS) &
10258 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10259 TX_LENGTHS_CNT_DWN_VAL_MSK);
10260
10261 tw32(MAC_TX_LENGTHS, val);
10262
10263 /* Receive rules. */
10264 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10265 tw32(RCVLPC_CONFIG, 0x0181);
10266
10267 /* Calculate RDMAC_MODE setting early, we need it to determine
10268 * the RCVLPC_STATE_ENABLE mask.
10269 */
10270 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10271 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10272 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10273 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10274 RDMAC_MODE_LNGREAD_ENAB);
10275
10276 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10277 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10278
10279 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10280 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10281 tg3_asic_rev(tp) == ASIC_REV_57780)
10282 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10283 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10284 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10285
10286 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10287 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10288 if (tg3_flag(tp, TSO_CAPABLE)) {
10289 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10290 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10291 !tg3_flag(tp, IS_5788)) {
10292 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10293 }
10294 }
10295
10296 if (tg3_flag(tp, PCI_EXPRESS))
10297 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10298
10299 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10300 tp->dma_limit = 0;
10301 if (tp->dev->mtu <= ETH_DATA_LEN) {
10302 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10303 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10304 }
10305 }
10306
10307 if (tg3_flag(tp, HW_TSO_1) ||
10308 tg3_flag(tp, HW_TSO_2) ||
10309 tg3_flag(tp, HW_TSO_3))
10310 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10311
10312 if (tg3_flag(tp, 57765_PLUS) ||
10313 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10314 tg3_asic_rev(tp) == ASIC_REV_57780)
10315 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10316
10317 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10318 tg3_asic_rev(tp) == ASIC_REV_5762)
10319 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10320
10321 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10322 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10323 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10324 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10325 tg3_flag(tp, 57765_PLUS)) {
10326 u32 tgtreg;
10327
10328 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10329 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10330 else
10331 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10332
10333 val = tr32(tgtreg);
10334 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10335 tg3_asic_rev(tp) == ASIC_REV_5762) {
10336 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10337 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10338 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10339 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10340 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10341 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10342 }
10343 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10344 }
10345
10346 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10347 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10348 tg3_asic_rev(tp) == ASIC_REV_5762) {
10349 u32 tgtreg;
10350
10351 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10352 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10353 else
10354 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10355
10356 val = tr32(tgtreg);
10357 tw32(tgtreg, val |
10358 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10359 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10360 }
10361
10362 /* Receive/send statistics. */
10363 if (tg3_flag(tp, 5750_PLUS)) {
10364 val = tr32(RCVLPC_STATS_ENABLE);
10365 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10366 tw32(RCVLPC_STATS_ENABLE, val);
10367 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10368 tg3_flag(tp, TSO_CAPABLE)) {
10369 val = tr32(RCVLPC_STATS_ENABLE);
10370 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10371 tw32(RCVLPC_STATS_ENABLE, val);
10372 } else {
10373 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10374 }
10375 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10376 tw32(SNDDATAI_STATSENAB, 0xffffff);
10377 tw32(SNDDATAI_STATSCTRL,
10378 (SNDDATAI_SCTRL_ENABLE |
10379 SNDDATAI_SCTRL_FASTUPD));
10380
10381 /* Setup host coalescing engine. */
10382 tw32(HOSTCC_MODE, 0);
10383 for (i = 0; i < 2000; i++) {
10384 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10385 break;
10386 udelay(10);
10387 }
10388
10389 __tg3_set_coalesce(tp, &tp->coal);
10390
10391 if (!tg3_flag(tp, 5705_PLUS)) {
10392 /* Status/statistics block address. See tg3_timer,
10393 * the tg3_periodic_fetch_stats call there, and
10394 * tg3_get_stats to see how this works for 5705/5750 chips.
10395 */
10396 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10397 ((u64) tp->stats_mapping >> 32));
10398 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10399 ((u64) tp->stats_mapping & 0xffffffff));
10400 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10401
10402 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10403
10404 /* Clear statistics and status block memory areas */
10405 for (i = NIC_SRAM_STATS_BLK;
10406 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10407 i += sizeof(u32)) {
10408 tg3_write_mem(tp, i, 0);
10409 udelay(40);
10410 }
10411 }
10412
10413 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10414
10415 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10416 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10417 if (!tg3_flag(tp, 5705_PLUS))
10418 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10419
10420 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10421 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10422 /* reset to prevent losing 1st rx packet intermittently */
10423 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10424 udelay(10);
10425 }
10426
10427 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10428 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10429 MAC_MODE_FHDE_ENABLE;
10430 if (tg3_flag(tp, ENABLE_APE))
10431 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10432 if (!tg3_flag(tp, 5705_PLUS) &&
10433 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10434 tg3_asic_rev(tp) != ASIC_REV_5700)
10435 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10436 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10437 udelay(40);
10438
10439 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10440 * If TG3_FLAG_IS_NIC is zero, we should read the
10441 * register to preserve the GPIO settings for LOMs. The GPIOs,
10442 * whether used as inputs or outputs, are set by boot code after
10443 * reset.
10444 */
10445 if (!tg3_flag(tp, IS_NIC)) {
10446 u32 gpio_mask;
10447
10448 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10449 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10450 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10451
10452 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10453 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10454 GRC_LCLCTRL_GPIO_OUTPUT3;
10455
10456 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10457 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10458
10459 tp->grc_local_ctrl &= ~gpio_mask;
10460 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10461
10462 /* GPIO1 must be driven high for eeprom write protect */
10463 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10464 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10465 GRC_LCLCTRL_GPIO_OUTPUT1);
10466 }
10467 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10468 udelay(100);
10469
10470 if (tg3_flag(tp, USING_MSIX)) {
10471 val = tr32(MSGINT_MODE);
10472 val |= MSGINT_MODE_ENABLE;
10473 if (tp->irq_cnt > 1)
10474 val |= MSGINT_MODE_MULTIVEC_EN;
10475 if (!tg3_flag(tp, 1SHOT_MSI))
10476 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10477 tw32(MSGINT_MODE, val);
10478 }
10479
10480 if (!tg3_flag(tp, 5705_PLUS)) {
10481 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10482 udelay(40);
10483 }
10484
10485 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10486 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10487 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10488 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10489 WDMAC_MODE_LNGREAD_ENAB);
10490
10491 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10492 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10493 if (tg3_flag(tp, TSO_CAPABLE) &&
10494 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10495 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10496 /* nothing */
10497 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10498 !tg3_flag(tp, IS_5788)) {
10499 val |= WDMAC_MODE_RX_ACCEL;
10500 }
10501 }
10502
10503 /* Enable host coalescing bug fix */
10504 if (tg3_flag(tp, 5755_PLUS))
10505 val |= WDMAC_MODE_STATUS_TAG_FIX;
10506
10507 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10508 val |= WDMAC_MODE_BURST_ALL_DATA;
10509
10510 tw32_f(WDMAC_MODE, val);
10511 udelay(40);
10512
10513 if (tg3_flag(tp, PCIX_MODE)) {
10514 u16 pcix_cmd;
10515
10516 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10517 &pcix_cmd);
10518 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10519 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10520 pcix_cmd |= PCI_X_CMD_READ_2K;
10521 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10522 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10523 pcix_cmd |= PCI_X_CMD_READ_2K;
10524 }
10525 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10526 pcix_cmd);
10527 }
10528
10529 tw32_f(RDMAC_MODE, rdmac_mode);
10530 udelay(40);
10531
10532 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10533 tg3_asic_rev(tp) == ASIC_REV_5720) {
10534 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10535 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10536 break;
10537 }
10538 if (i < TG3_NUM_RDMA_CHANNELS) {
10539 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10540 val |= tg3_lso_rd_dma_workaround_bit(tp);
10541 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10542 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10543 }
10544 }
10545
10546 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10547 if (!tg3_flag(tp, 5705_PLUS))
10548 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10549
10550 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10551 tw32(SNDDATAC_MODE,
10552 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10553 else
10554 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10555
10556 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10557 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10558 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10559 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10560 val |= RCVDBDI_MODE_LRG_RING_SZ;
10561 tw32(RCVDBDI_MODE, val);
10562 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10563 if (tg3_flag(tp, HW_TSO_1) ||
10564 tg3_flag(tp, HW_TSO_2) ||
10565 tg3_flag(tp, HW_TSO_3))
10566 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10567 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10568 if (tg3_flag(tp, ENABLE_TSS))
10569 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10570 tw32(SNDBDI_MODE, val);
10571 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10572
10573 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10574 err = tg3_load_5701_a0_firmware_fix(tp);
10575 if (err)
10576 return err;
10577 }
10578
10579 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10580 /* Ignore any errors for the firmware download. If download
10581 * fails, the device will operate with EEE disabled
10582 */
10583 tg3_load_57766_firmware(tp);
10584 }
10585
10586 if (tg3_flag(tp, TSO_CAPABLE)) {
10587 err = tg3_load_tso_firmware(tp);
10588 if (err)
10589 return err;
10590 }
10591
10592 tp->tx_mode = TX_MODE_ENABLE;
10593
10594 if (tg3_flag(tp, 5755_PLUS) ||
10595 tg3_asic_rev(tp) == ASIC_REV_5906)
10596 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10597
10598 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10599 tg3_asic_rev(tp) == ASIC_REV_5762) {
10600 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10601 tp->tx_mode &= ~val;
10602 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10603 }
10604
10605 tw32_f(MAC_TX_MODE, tp->tx_mode);
10606 udelay(100);
10607
10608 if (tg3_flag(tp, ENABLE_RSS)) {
10609 u32 rss_key[10];
10610
10611 tg3_rss_write_indir_tbl(tp);
10612
10613 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10614
10615 for (i = 0; i < 10 ; i++)
10616 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10617 }
10618
10619 tp->rx_mode = RX_MODE_ENABLE;
10620 if (tg3_flag(tp, 5755_PLUS))
10621 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10622
10623 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10624 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10625
10626 if (tg3_flag(tp, ENABLE_RSS))
10627 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10628 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10629 RX_MODE_RSS_IPV6_HASH_EN |
10630 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10631 RX_MODE_RSS_IPV4_HASH_EN |
10632 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10633
10634 tw32_f(MAC_RX_MODE, tp->rx_mode);
10635 udelay(10);
10636
10637 tw32(MAC_LED_CTRL, tp->led_ctrl);
10638
10639 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10640 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10641 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10642 udelay(10);
10643 }
10644 tw32_f(MAC_RX_MODE, tp->rx_mode);
10645 udelay(10);
10646
10647 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10648 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10649 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10650 /* Set drive transmission level to 1.2V */
10651 /* only if the signal pre-emphasis bit is not set */
10652 val = tr32(MAC_SERDES_CFG);
10653 val &= 0xfffff000;
10654 val |= 0x880;
10655 tw32(MAC_SERDES_CFG, val);
10656 }
10657 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10658 tw32(MAC_SERDES_CFG, 0x616000);
10659 }
10660
10661 /* Prevent chip from dropping frames when flow control
10662 * is enabled.
10663 */
10664 if (tg3_flag(tp, 57765_CLASS))
10665 val = 1;
10666 else
10667 val = 2;
10668 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10669
10670 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10671 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10672 /* Use hardware link auto-negotiation */
10673 tg3_flag_set(tp, HW_AUTONEG);
10674 }
10675
10676 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10677 tg3_asic_rev(tp) == ASIC_REV_5714) {
10678 u32 tmp;
10679
10680 tmp = tr32(SERDES_RX_CTRL);
10681 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10682 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10683 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10684 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10685 }
10686
10687 if (!tg3_flag(tp, USE_PHYLIB)) {
10688 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10689 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10690
10691 err = tg3_setup_phy(tp, false);
10692 if (err)
10693 return err;
10694
10695 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10696 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10697 u32 tmp;
10698
10699 /* Clear CRC stats. */
10700 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10701 tg3_writephy(tp, MII_TG3_TEST1,
10702 tmp | MII_TG3_TEST1_CRC_EN);
10703 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10704 }
10705 }
10706 }
10707
10708 __tg3_set_rx_mode(tp->dev);
10709
10710 /* Initialize receive rules. */
10711 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10712 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10713 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10714 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10715
10716 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10717 limit = 8;
10718 else
10719 limit = 16;
10720 if (tg3_flag(tp, ENABLE_ASF))
10721 limit -= 4;
10722 switch (limit) {
10723 case 16:
10724 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10725 fallthrough;
10726 case 15:
10727 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10728 fallthrough;
10729 case 14:
10730 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10731 fallthrough;
10732 case 13:
10733 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10734 fallthrough;
10735 case 12:
10736 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10737 fallthrough;
10738 case 11:
10739 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10740 fallthrough;
10741 case 10:
10742 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10743 fallthrough;
10744 case 9:
10745 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10746 fallthrough;
10747 case 8:
10748 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10749 fallthrough;
10750 case 7:
10751 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10752 fallthrough;
10753 case 6:
10754 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10755 fallthrough;
10756 case 5:
10757 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10758 fallthrough;
10759 case 4:
10760 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10761 case 3:
10762 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10763 case 2:
10764 case 1:
10765
10766 default:
10767 break;
10768 }
10769
10770 if (tg3_flag(tp, ENABLE_APE))
10771 /* Write our heartbeat update interval to APE. */
10772 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10773 APE_HOST_HEARTBEAT_INT_5SEC);
10774
10775 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10776
10777 return 0;
10778 }
10779
10780 /* Called at device open time to get the chip ready for
10781 * packet processing. Invoked with tp->lock held.
10782 */
tg3_init_hw(struct tg3 * tp,bool reset_phy)10783 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10784 {
10785 /* Chip may have been just powered on. If so, the boot code may still
10786 * be running initialization. Wait for it to finish to avoid races in
10787 * accessing the hardware.
10788 */
10789 tg3_enable_register_access(tp);
10790 tg3_poll_fw(tp);
10791
10792 tg3_switch_clocks(tp);
10793
10794 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10795
10796 return tg3_reset_hw(tp, reset_phy);
10797 }
10798
10799 #ifdef CONFIG_TIGON3_HWMON
tg3_sd_scan_scratchpad(struct tg3 * tp,struct tg3_ocir * ocir)10800 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10801 {
10802 u32 off, len = TG3_OCIR_LEN;
10803 int i;
10804
10805 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10806 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10807
10808 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10809 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10810 memset(ocir, 0, len);
10811 }
10812 }
10813
10814 /* sysfs attributes for hwmon */
tg3_show_temp(struct device * dev,struct device_attribute * devattr,char * buf)10815 static ssize_t tg3_show_temp(struct device *dev,
10816 struct device_attribute *devattr, char *buf)
10817 {
10818 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10819 struct tg3 *tp = dev_get_drvdata(dev);
10820 u32 temperature;
10821
10822 spin_lock_bh(&tp->lock);
10823 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10824 sizeof(temperature));
10825 spin_unlock_bh(&tp->lock);
10826 return sprintf(buf, "%u\n", temperature * 1000);
10827 }
10828
10829
10830 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10831 TG3_TEMP_SENSOR_OFFSET);
10832 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10833 TG3_TEMP_CAUTION_OFFSET);
10834 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10835 TG3_TEMP_MAX_OFFSET);
10836
10837 static struct attribute *tg3_attrs[] = {
10838 &sensor_dev_attr_temp1_input.dev_attr.attr,
10839 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10840 &sensor_dev_attr_temp1_max.dev_attr.attr,
10841 NULL
10842 };
10843 ATTRIBUTE_GROUPS(tg3);
10844
tg3_hwmon_close(struct tg3 * tp)10845 static void tg3_hwmon_close(struct tg3 *tp)
10846 {
10847 if (tp->hwmon_dev) {
10848 hwmon_device_unregister(tp->hwmon_dev);
10849 tp->hwmon_dev = NULL;
10850 }
10851 }
10852
tg3_hwmon_open(struct tg3 * tp)10853 static void tg3_hwmon_open(struct tg3 *tp)
10854 {
10855 int i;
10856 u32 size = 0;
10857 struct pci_dev *pdev = tp->pdev;
10858 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10859
10860 tg3_sd_scan_scratchpad(tp, ocirs);
10861
10862 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10863 if (!ocirs[i].src_data_length)
10864 continue;
10865
10866 size += ocirs[i].src_hdr_length;
10867 size += ocirs[i].src_data_length;
10868 }
10869
10870 if (!size)
10871 return;
10872
10873 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10874 tp, tg3_groups);
10875 if (IS_ERR(tp->hwmon_dev)) {
10876 tp->hwmon_dev = NULL;
10877 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10878 }
10879 }
10880 #else
tg3_hwmon_close(struct tg3 * tp)10881 static inline void tg3_hwmon_close(struct tg3 *tp) { }
tg3_hwmon_open(struct tg3 * tp)10882 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10883 #endif /* CONFIG_TIGON3_HWMON */
10884
10885
10886 #define TG3_STAT_ADD32(PSTAT, REG) \
10887 do { u32 __val = tr32(REG); \
10888 (PSTAT)->low += __val; \
10889 if ((PSTAT)->low < __val) \
10890 (PSTAT)->high += 1; \
10891 } while (0)
10892
tg3_periodic_fetch_stats(struct tg3 * tp)10893 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10894 {
10895 struct tg3_hw_stats *sp = tp->hw_stats;
10896
10897 if (!tp->link_up)
10898 return;
10899
10900 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10901 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10902 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10903 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10904 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10905 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10906 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10907 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10908 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10909 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10910 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10911 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10912 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10913 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10914 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10915 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10916 u32 val;
10917
10918 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10919 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10920 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10921 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10922 }
10923
10924 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10925 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10926 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10927 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10928 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10929 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10930 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10931 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10932 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10933 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10934 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10935 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10936 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10937 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10938
10939 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10940 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10941 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10942 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10943 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10944 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10945 } else {
10946 u32 val = tr32(HOSTCC_FLOW_ATTN);
10947 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10948 if (val) {
10949 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10950 sp->rx_discards.low += val;
10951 if (sp->rx_discards.low < val)
10952 sp->rx_discards.high += 1;
10953 }
10954 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10955 }
10956 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10957 }
10958
tg3_chk_missed_msi(struct tg3 * tp)10959 static void tg3_chk_missed_msi(struct tg3 *tp)
10960 {
10961 u32 i;
10962
10963 for (i = 0; i < tp->irq_cnt; i++) {
10964 struct tg3_napi *tnapi = &tp->napi[i];
10965
10966 if (tg3_has_work(tnapi)) {
10967 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10968 tnapi->last_tx_cons == tnapi->tx_cons) {
10969 if (tnapi->chk_msi_cnt < 1) {
10970 tnapi->chk_msi_cnt++;
10971 return;
10972 }
10973 tg3_msi(0, tnapi);
10974 }
10975 }
10976 tnapi->chk_msi_cnt = 0;
10977 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10978 tnapi->last_tx_cons = tnapi->tx_cons;
10979 }
10980 }
10981
tg3_timer(struct timer_list * t)10982 static void tg3_timer(struct timer_list *t)
10983 {
10984 struct tg3 *tp = from_timer(tp, t, timer);
10985
10986 spin_lock(&tp->lock);
10987
10988 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10989 spin_unlock(&tp->lock);
10990 goto restart_timer;
10991 }
10992
10993 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10994 tg3_flag(tp, 57765_CLASS))
10995 tg3_chk_missed_msi(tp);
10996
10997 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10998 /* BCM4785: Flush posted writes from GbE to host memory. */
10999 tr32(HOSTCC_MODE);
11000 }
11001
11002 if (!tg3_flag(tp, TAGGED_STATUS)) {
11003 /* All of this garbage is because when using non-tagged
11004 * IRQ status the mailbox/status_block protocol the chip
11005 * uses with the cpu is race prone.
11006 */
11007 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11008 tw32(GRC_LOCAL_CTRL,
11009 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11010 } else {
11011 tw32(HOSTCC_MODE, tp->coalesce_mode |
11012 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11013 }
11014
11015 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11016 spin_unlock(&tp->lock);
11017 tg3_reset_task_schedule(tp);
11018 goto restart_timer;
11019 }
11020 }
11021
11022 /* This part only runs once per second. */
11023 if (!--tp->timer_counter) {
11024 if (tg3_flag(tp, 5705_PLUS))
11025 tg3_periodic_fetch_stats(tp);
11026
11027 if (tp->setlpicnt && !--tp->setlpicnt)
11028 tg3_phy_eee_enable(tp);
11029
11030 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11031 u32 mac_stat;
11032 int phy_event;
11033
11034 mac_stat = tr32(MAC_STATUS);
11035
11036 phy_event = 0;
11037 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11038 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11039 phy_event = 1;
11040 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11041 phy_event = 1;
11042
11043 if (phy_event)
11044 tg3_setup_phy(tp, false);
11045 } else if (tg3_flag(tp, POLL_SERDES)) {
11046 u32 mac_stat = tr32(MAC_STATUS);
11047 int need_setup = 0;
11048
11049 if (tp->link_up &&
11050 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11051 need_setup = 1;
11052 }
11053 if (!tp->link_up &&
11054 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11055 MAC_STATUS_SIGNAL_DET))) {
11056 need_setup = 1;
11057 }
11058 if (need_setup) {
11059 if (!tp->serdes_counter) {
11060 tw32_f(MAC_MODE,
11061 (tp->mac_mode &
11062 ~MAC_MODE_PORT_MODE_MASK));
11063 udelay(40);
11064 tw32_f(MAC_MODE, tp->mac_mode);
11065 udelay(40);
11066 }
11067 tg3_setup_phy(tp, false);
11068 }
11069 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11070 tg3_flag(tp, 5780_CLASS)) {
11071 tg3_serdes_parallel_detect(tp);
11072 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11073 u32 cpmu = tr32(TG3_CPMU_STATUS);
11074 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11075 TG3_CPMU_STATUS_LINK_MASK);
11076
11077 if (link_up != tp->link_up)
11078 tg3_setup_phy(tp, false);
11079 }
11080
11081 tp->timer_counter = tp->timer_multiplier;
11082 }
11083
11084 /* Heartbeat is only sent once every 2 seconds.
11085 *
11086 * The heartbeat is to tell the ASF firmware that the host
11087 * driver is still alive. In the event that the OS crashes,
11088 * ASF needs to reset the hardware to free up the FIFO space
11089 * that may be filled with rx packets destined for the host.
11090 * If the FIFO is full, ASF will no longer function properly.
11091 *
11092 * Unintended resets have been reported on real time kernels
11093 * where the timer doesn't run on time. Netpoll will also have
11094 * same problem.
11095 *
11096 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11097 * to check the ring condition when the heartbeat is expiring
11098 * before doing the reset. This will prevent most unintended
11099 * resets.
11100 */
11101 if (!--tp->asf_counter) {
11102 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11103 tg3_wait_for_event_ack(tp);
11104
11105 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11106 FWCMD_NICDRV_ALIVE3);
11107 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11108 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11109 TG3_FW_UPDATE_TIMEOUT_SEC);
11110
11111 tg3_generate_fw_event(tp);
11112 }
11113 tp->asf_counter = tp->asf_multiplier;
11114 }
11115
11116 /* Update the APE heartbeat every 5 seconds.*/
11117 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11118
11119 spin_unlock(&tp->lock);
11120
11121 restart_timer:
11122 tp->timer.expires = jiffies + tp->timer_offset;
11123 add_timer(&tp->timer);
11124 }
11125
tg3_timer_init(struct tg3 * tp)11126 static void tg3_timer_init(struct tg3 *tp)
11127 {
11128 if (tg3_flag(tp, TAGGED_STATUS) &&
11129 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11130 !tg3_flag(tp, 57765_CLASS))
11131 tp->timer_offset = HZ;
11132 else
11133 tp->timer_offset = HZ / 10;
11134
11135 BUG_ON(tp->timer_offset > HZ);
11136
11137 tp->timer_multiplier = (HZ / tp->timer_offset);
11138 tp->asf_multiplier = (HZ / tp->timer_offset) *
11139 TG3_FW_UPDATE_FREQ_SEC;
11140
11141 timer_setup(&tp->timer, tg3_timer, 0);
11142 }
11143
tg3_timer_start(struct tg3 * tp)11144 static void tg3_timer_start(struct tg3 *tp)
11145 {
11146 tp->asf_counter = tp->asf_multiplier;
11147 tp->timer_counter = tp->timer_multiplier;
11148
11149 tp->timer.expires = jiffies + tp->timer_offset;
11150 add_timer(&tp->timer);
11151 }
11152
tg3_timer_stop(struct tg3 * tp)11153 static void tg3_timer_stop(struct tg3 *tp)
11154 {
11155 del_timer_sync(&tp->timer);
11156 }
11157
11158 /* Restart hardware after configuration changes, self-test, etc.
11159 * Invoked with tp->lock held.
11160 */
tg3_restart_hw(struct tg3 * tp,bool reset_phy)11161 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11162 __releases(tp->lock)
11163 __acquires(tp->lock)
11164 {
11165 int err;
11166
11167 err = tg3_init_hw(tp, reset_phy);
11168 if (err) {
11169 netdev_err(tp->dev,
11170 "Failed to re-initialize device, aborting\n");
11171 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11172 tg3_full_unlock(tp);
11173 tg3_timer_stop(tp);
11174 tp->irq_sync = 0;
11175 tg3_napi_enable(tp);
11176 dev_close(tp->dev);
11177 tg3_full_lock(tp, 0);
11178 }
11179 return err;
11180 }
11181
tg3_reset_task(struct work_struct * work)11182 static void tg3_reset_task(struct work_struct *work)
11183 {
11184 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11185 int err;
11186
11187 rtnl_lock();
11188 tg3_full_lock(tp, 0);
11189
11190 if (tp->pcierr_recovery || !netif_running(tp->dev) ||
11191 tp->pdev->error_state != pci_channel_io_normal) {
11192 tg3_flag_clear(tp, RESET_TASK_PENDING);
11193 tg3_full_unlock(tp);
11194 rtnl_unlock();
11195 return;
11196 }
11197
11198 tg3_full_unlock(tp);
11199
11200 tg3_phy_stop(tp);
11201
11202 tg3_netif_stop(tp);
11203
11204 tg3_full_lock(tp, 1);
11205
11206 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11207 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11208 tp->write32_rx_mbox = tg3_write_flush_reg32;
11209 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11210 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11211 }
11212
11213 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11214 err = tg3_init_hw(tp, true);
11215 if (err) {
11216 tg3_full_unlock(tp);
11217 tp->irq_sync = 0;
11218 tg3_napi_enable(tp);
11219 /* Clear this flag so that tg3_reset_task_cancel() will not
11220 * call cancel_work_sync() and wait forever.
11221 */
11222 tg3_flag_clear(tp, RESET_TASK_PENDING);
11223 dev_close(tp->dev);
11224 goto out;
11225 }
11226
11227 tg3_netif_start(tp);
11228 tg3_full_unlock(tp);
11229 tg3_phy_start(tp);
11230 tg3_flag_clear(tp, RESET_TASK_PENDING);
11231 out:
11232 rtnl_unlock();
11233 }
11234
tg3_request_irq(struct tg3 * tp,int irq_num)11235 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11236 {
11237 irq_handler_t fn;
11238 unsigned long flags;
11239 char *name;
11240 struct tg3_napi *tnapi = &tp->napi[irq_num];
11241
11242 if (tp->irq_cnt == 1)
11243 name = tp->dev->name;
11244 else {
11245 name = &tnapi->irq_lbl[0];
11246 if (tnapi->tx_buffers && tnapi->rx_rcb)
11247 snprintf(name, IFNAMSIZ,
11248 "%s-txrx-%d", tp->dev->name, irq_num);
11249 else if (tnapi->tx_buffers)
11250 snprintf(name, IFNAMSIZ,
11251 "%s-tx-%d", tp->dev->name, irq_num);
11252 else if (tnapi->rx_rcb)
11253 snprintf(name, IFNAMSIZ,
11254 "%s-rx-%d", tp->dev->name, irq_num);
11255 else
11256 snprintf(name, IFNAMSIZ,
11257 "%s-%d", tp->dev->name, irq_num);
11258 name[IFNAMSIZ-1] = 0;
11259 }
11260
11261 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11262 fn = tg3_msi;
11263 if (tg3_flag(tp, 1SHOT_MSI))
11264 fn = tg3_msi_1shot;
11265 flags = 0;
11266 } else {
11267 fn = tg3_interrupt;
11268 if (tg3_flag(tp, TAGGED_STATUS))
11269 fn = tg3_interrupt_tagged;
11270 flags = IRQF_SHARED;
11271 }
11272
11273 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11274 }
11275
tg3_test_interrupt(struct tg3 * tp)11276 static int tg3_test_interrupt(struct tg3 *tp)
11277 {
11278 struct tg3_napi *tnapi = &tp->napi[0];
11279 struct net_device *dev = tp->dev;
11280 int err, i, intr_ok = 0;
11281 u32 val;
11282
11283 if (!netif_running(dev))
11284 return -ENODEV;
11285
11286 tg3_disable_ints(tp);
11287
11288 free_irq(tnapi->irq_vec, tnapi);
11289
11290 /*
11291 * Turn off MSI one shot mode. Otherwise this test has no
11292 * observable way to know whether the interrupt was delivered.
11293 */
11294 if (tg3_flag(tp, 57765_PLUS)) {
11295 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11296 tw32(MSGINT_MODE, val);
11297 }
11298
11299 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11300 IRQF_SHARED, dev->name, tnapi);
11301 if (err)
11302 return err;
11303
11304 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11305 tg3_enable_ints(tp);
11306
11307 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11308 tnapi->coal_now);
11309
11310 for (i = 0; i < 5; i++) {
11311 u32 int_mbox, misc_host_ctrl;
11312
11313 int_mbox = tr32_mailbox(tnapi->int_mbox);
11314 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11315
11316 if ((int_mbox != 0) ||
11317 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11318 intr_ok = 1;
11319 break;
11320 }
11321
11322 if (tg3_flag(tp, 57765_PLUS) &&
11323 tnapi->hw_status->status_tag != tnapi->last_tag)
11324 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11325
11326 msleep(10);
11327 }
11328
11329 tg3_disable_ints(tp);
11330
11331 free_irq(tnapi->irq_vec, tnapi);
11332
11333 err = tg3_request_irq(tp, 0);
11334
11335 if (err)
11336 return err;
11337
11338 if (intr_ok) {
11339 /* Reenable MSI one shot mode. */
11340 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11341 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11342 tw32(MSGINT_MODE, val);
11343 }
11344 return 0;
11345 }
11346
11347 return -EIO;
11348 }
11349
11350 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11351 * successfully restored
11352 */
tg3_test_msi(struct tg3 * tp)11353 static int tg3_test_msi(struct tg3 *tp)
11354 {
11355 int err;
11356 u16 pci_cmd;
11357
11358 if (!tg3_flag(tp, USING_MSI))
11359 return 0;
11360
11361 /* Turn off SERR reporting in case MSI terminates with Master
11362 * Abort.
11363 */
11364 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11365 pci_write_config_word(tp->pdev, PCI_COMMAND,
11366 pci_cmd & ~PCI_COMMAND_SERR);
11367
11368 err = tg3_test_interrupt(tp);
11369
11370 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11371
11372 if (!err)
11373 return 0;
11374
11375 /* other failures */
11376 if (err != -EIO)
11377 return err;
11378
11379 /* MSI test failed, go back to INTx mode */
11380 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11381 "to INTx mode. Please report this failure to the PCI "
11382 "maintainer and include system chipset information\n");
11383
11384 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11385
11386 pci_disable_msi(tp->pdev);
11387
11388 tg3_flag_clear(tp, USING_MSI);
11389 tp->napi[0].irq_vec = tp->pdev->irq;
11390
11391 err = tg3_request_irq(tp, 0);
11392 if (err)
11393 return err;
11394
11395 /* Need to reset the chip because the MSI cycle may have terminated
11396 * with Master Abort.
11397 */
11398 tg3_full_lock(tp, 1);
11399
11400 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11401 err = tg3_init_hw(tp, true);
11402
11403 tg3_full_unlock(tp);
11404
11405 if (err)
11406 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11407
11408 return err;
11409 }
11410
tg3_request_firmware(struct tg3 * tp)11411 static int tg3_request_firmware(struct tg3 *tp)
11412 {
11413 const struct tg3_firmware_hdr *fw_hdr;
11414
11415 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11416 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11417 tp->fw_needed);
11418 return -ENOENT;
11419 }
11420
11421 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11422
11423 /* Firmware blob starts with version numbers, followed by
11424 * start address and _full_ length including BSS sections
11425 * (which must be longer than the actual data, of course
11426 */
11427
11428 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11429 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11430 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11431 tp->fw_len, tp->fw_needed);
11432 release_firmware(tp->fw);
11433 tp->fw = NULL;
11434 return -EINVAL;
11435 }
11436
11437 /* We no longer need firmware; we have it. */
11438 tp->fw_needed = NULL;
11439 return 0;
11440 }
11441
tg3_irq_count(struct tg3 * tp)11442 static u32 tg3_irq_count(struct tg3 *tp)
11443 {
11444 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11445
11446 if (irq_cnt > 1) {
11447 /* We want as many rx rings enabled as there are cpus.
11448 * In multiqueue MSI-X mode, the first MSI-X vector
11449 * only deals with link interrupts, etc, so we add
11450 * one to the number of vectors we are requesting.
11451 */
11452 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11453 }
11454
11455 return irq_cnt;
11456 }
11457
tg3_enable_msix(struct tg3 * tp)11458 static bool tg3_enable_msix(struct tg3 *tp)
11459 {
11460 int i, rc;
11461 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11462
11463 tp->txq_cnt = tp->txq_req;
11464 tp->rxq_cnt = tp->rxq_req;
11465 if (!tp->rxq_cnt)
11466 tp->rxq_cnt = netif_get_num_default_rss_queues();
11467 if (tp->rxq_cnt > tp->rxq_max)
11468 tp->rxq_cnt = tp->rxq_max;
11469
11470 /* Disable multiple TX rings by default. Simple round-robin hardware
11471 * scheduling of the TX rings can cause starvation of rings with
11472 * small packets when other rings have TSO or jumbo packets.
11473 */
11474 if (!tp->txq_req)
11475 tp->txq_cnt = 1;
11476
11477 tp->irq_cnt = tg3_irq_count(tp);
11478
11479 for (i = 0; i < tp->irq_max; i++) {
11480 msix_ent[i].entry = i;
11481 msix_ent[i].vector = 0;
11482 }
11483
11484 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11485 if (rc < 0) {
11486 return false;
11487 } else if (rc < tp->irq_cnt) {
11488 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11489 tp->irq_cnt, rc);
11490 tp->irq_cnt = rc;
11491 tp->rxq_cnt = max(rc - 1, 1);
11492 if (tp->txq_cnt)
11493 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11494 }
11495
11496 for (i = 0; i < tp->irq_max; i++)
11497 tp->napi[i].irq_vec = msix_ent[i].vector;
11498
11499 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11500 pci_disable_msix(tp->pdev);
11501 return false;
11502 }
11503
11504 if (tp->irq_cnt == 1)
11505 return true;
11506
11507 tg3_flag_set(tp, ENABLE_RSS);
11508
11509 if (tp->txq_cnt > 1)
11510 tg3_flag_set(tp, ENABLE_TSS);
11511
11512 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11513
11514 return true;
11515 }
11516
tg3_ints_init(struct tg3 * tp)11517 static void tg3_ints_init(struct tg3 *tp)
11518 {
11519 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11520 !tg3_flag(tp, TAGGED_STATUS)) {
11521 /* All MSI supporting chips should support tagged
11522 * status. Assert that this is the case.
11523 */
11524 netdev_warn(tp->dev,
11525 "MSI without TAGGED_STATUS? Not using MSI\n");
11526 goto defcfg;
11527 }
11528
11529 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11530 tg3_flag_set(tp, USING_MSIX);
11531 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11532 tg3_flag_set(tp, USING_MSI);
11533
11534 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11535 u32 msi_mode = tr32(MSGINT_MODE);
11536 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11537 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11538 if (!tg3_flag(tp, 1SHOT_MSI))
11539 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11540 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11541 }
11542 defcfg:
11543 if (!tg3_flag(tp, USING_MSIX)) {
11544 tp->irq_cnt = 1;
11545 tp->napi[0].irq_vec = tp->pdev->irq;
11546 }
11547
11548 if (tp->irq_cnt == 1) {
11549 tp->txq_cnt = 1;
11550 tp->rxq_cnt = 1;
11551 netif_set_real_num_tx_queues(tp->dev, 1);
11552 netif_set_real_num_rx_queues(tp->dev, 1);
11553 }
11554 }
11555
tg3_ints_fini(struct tg3 * tp)11556 static void tg3_ints_fini(struct tg3 *tp)
11557 {
11558 if (tg3_flag(tp, USING_MSIX))
11559 pci_disable_msix(tp->pdev);
11560 else if (tg3_flag(tp, USING_MSI))
11561 pci_disable_msi(tp->pdev);
11562 tg3_flag_clear(tp, USING_MSI);
11563 tg3_flag_clear(tp, USING_MSIX);
11564 tg3_flag_clear(tp, ENABLE_RSS);
11565 tg3_flag_clear(tp, ENABLE_TSS);
11566 }
11567
tg3_start(struct tg3 * tp,bool reset_phy,bool test_irq,bool init)11568 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11569 bool init)
11570 {
11571 struct net_device *dev = tp->dev;
11572 int i, err;
11573
11574 /*
11575 * Setup interrupts first so we know how
11576 * many NAPI resources to allocate
11577 */
11578 tg3_ints_init(tp);
11579
11580 tg3_rss_check_indir_tbl(tp);
11581
11582 /* The placement of this call is tied
11583 * to the setup and use of Host TX descriptors.
11584 */
11585 err = tg3_alloc_consistent(tp);
11586 if (err)
11587 goto out_ints_fini;
11588
11589 tg3_napi_init(tp);
11590
11591 tg3_napi_enable(tp);
11592
11593 for (i = 0; i < tp->irq_cnt; i++) {
11594 err = tg3_request_irq(tp, i);
11595 if (err) {
11596 for (i--; i >= 0; i--) {
11597 struct tg3_napi *tnapi = &tp->napi[i];
11598
11599 free_irq(tnapi->irq_vec, tnapi);
11600 }
11601 goto out_napi_fini;
11602 }
11603 }
11604
11605 tg3_full_lock(tp, 0);
11606
11607 if (init)
11608 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11609
11610 err = tg3_init_hw(tp, reset_phy);
11611 if (err) {
11612 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11613 tg3_free_rings(tp);
11614 }
11615
11616 tg3_full_unlock(tp);
11617
11618 if (err)
11619 goto out_free_irq;
11620
11621 if (test_irq && tg3_flag(tp, USING_MSI)) {
11622 err = tg3_test_msi(tp);
11623
11624 if (err) {
11625 tg3_full_lock(tp, 0);
11626 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11627 tg3_free_rings(tp);
11628 tg3_full_unlock(tp);
11629
11630 goto out_napi_fini;
11631 }
11632
11633 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11634 u32 val = tr32(PCIE_TRANSACTION_CFG);
11635
11636 tw32(PCIE_TRANSACTION_CFG,
11637 val | PCIE_TRANS_CFG_1SHOT_MSI);
11638 }
11639 }
11640
11641 tg3_phy_start(tp);
11642
11643 tg3_hwmon_open(tp);
11644
11645 tg3_full_lock(tp, 0);
11646
11647 tg3_timer_start(tp);
11648 tg3_flag_set(tp, INIT_COMPLETE);
11649 tg3_enable_ints(tp);
11650
11651 tg3_ptp_resume(tp);
11652
11653 tg3_full_unlock(tp);
11654
11655 netif_tx_start_all_queues(dev);
11656
11657 /*
11658 * Reset loopback feature if it was turned on while the device was down
11659 * make sure that it's installed properly now.
11660 */
11661 if (dev->features & NETIF_F_LOOPBACK)
11662 tg3_set_loopback(dev, dev->features);
11663
11664 return 0;
11665
11666 out_free_irq:
11667 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11668 struct tg3_napi *tnapi = &tp->napi[i];
11669 free_irq(tnapi->irq_vec, tnapi);
11670 }
11671
11672 out_napi_fini:
11673 tg3_napi_disable(tp);
11674 tg3_napi_fini(tp);
11675 tg3_free_consistent(tp);
11676
11677 out_ints_fini:
11678 tg3_ints_fini(tp);
11679
11680 return err;
11681 }
11682
tg3_stop(struct tg3 * tp)11683 static void tg3_stop(struct tg3 *tp)
11684 {
11685 int i;
11686
11687 tg3_reset_task_cancel(tp);
11688 tg3_netif_stop(tp);
11689
11690 tg3_timer_stop(tp);
11691
11692 tg3_hwmon_close(tp);
11693
11694 tg3_phy_stop(tp);
11695
11696 tg3_full_lock(tp, 1);
11697
11698 tg3_disable_ints(tp);
11699
11700 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11701 tg3_free_rings(tp);
11702 tg3_flag_clear(tp, INIT_COMPLETE);
11703
11704 tg3_full_unlock(tp);
11705
11706 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11707 struct tg3_napi *tnapi = &tp->napi[i];
11708 free_irq(tnapi->irq_vec, tnapi);
11709 }
11710
11711 tg3_ints_fini(tp);
11712
11713 tg3_napi_fini(tp);
11714
11715 tg3_free_consistent(tp);
11716 }
11717
tg3_open(struct net_device * dev)11718 static int tg3_open(struct net_device *dev)
11719 {
11720 struct tg3 *tp = netdev_priv(dev);
11721 int err;
11722
11723 if (tp->pcierr_recovery) {
11724 netdev_err(dev, "Failed to open device. PCI error recovery "
11725 "in progress\n");
11726 return -EAGAIN;
11727 }
11728
11729 if (tp->fw_needed) {
11730 err = tg3_request_firmware(tp);
11731 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11732 if (err) {
11733 netdev_warn(tp->dev, "EEE capability disabled\n");
11734 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11735 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11736 netdev_warn(tp->dev, "EEE capability restored\n");
11737 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11738 }
11739 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11740 if (err)
11741 return err;
11742 } else if (err) {
11743 netdev_warn(tp->dev, "TSO capability disabled\n");
11744 tg3_flag_clear(tp, TSO_CAPABLE);
11745 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11746 netdev_notice(tp->dev, "TSO capability restored\n");
11747 tg3_flag_set(tp, TSO_CAPABLE);
11748 }
11749 }
11750
11751 tg3_carrier_off(tp);
11752
11753 err = tg3_power_up(tp);
11754 if (err)
11755 return err;
11756
11757 tg3_full_lock(tp, 0);
11758
11759 tg3_disable_ints(tp);
11760 tg3_flag_clear(tp, INIT_COMPLETE);
11761
11762 tg3_full_unlock(tp);
11763
11764 err = tg3_start(tp,
11765 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11766 true, true);
11767 if (err) {
11768 tg3_frob_aux_power(tp, false);
11769 pci_set_power_state(tp->pdev, PCI_D3hot);
11770 }
11771
11772 return err;
11773 }
11774
tg3_close(struct net_device * dev)11775 static int tg3_close(struct net_device *dev)
11776 {
11777 struct tg3 *tp = netdev_priv(dev);
11778
11779 if (tp->pcierr_recovery) {
11780 netdev_err(dev, "Failed to close device. PCI error recovery "
11781 "in progress\n");
11782 return -EAGAIN;
11783 }
11784
11785 tg3_stop(tp);
11786
11787 if (pci_device_is_present(tp->pdev)) {
11788 tg3_power_down_prepare(tp);
11789
11790 tg3_carrier_off(tp);
11791 }
11792 return 0;
11793 }
11794
get_stat64(tg3_stat64_t * val)11795 static inline u64 get_stat64(tg3_stat64_t *val)
11796 {
11797 return ((u64)val->high << 32) | ((u64)val->low);
11798 }
11799
tg3_calc_crc_errors(struct tg3 * tp)11800 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11801 {
11802 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11803
11804 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11805 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11806 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11807 u32 val;
11808
11809 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11810 tg3_writephy(tp, MII_TG3_TEST1,
11811 val | MII_TG3_TEST1_CRC_EN);
11812 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11813 } else
11814 val = 0;
11815
11816 tp->phy_crc_errors += val;
11817
11818 return tp->phy_crc_errors;
11819 }
11820
11821 return get_stat64(&hw_stats->rx_fcs_errors);
11822 }
11823
11824 #define ESTAT_ADD(member) \
11825 estats->member = old_estats->member + \
11826 get_stat64(&hw_stats->member)
11827
tg3_get_estats(struct tg3 * tp,struct tg3_ethtool_stats * estats)11828 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11829 {
11830 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11831 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11832
11833 ESTAT_ADD(rx_octets);
11834 ESTAT_ADD(rx_fragments);
11835 ESTAT_ADD(rx_ucast_packets);
11836 ESTAT_ADD(rx_mcast_packets);
11837 ESTAT_ADD(rx_bcast_packets);
11838 ESTAT_ADD(rx_fcs_errors);
11839 ESTAT_ADD(rx_align_errors);
11840 ESTAT_ADD(rx_xon_pause_rcvd);
11841 ESTAT_ADD(rx_xoff_pause_rcvd);
11842 ESTAT_ADD(rx_mac_ctrl_rcvd);
11843 ESTAT_ADD(rx_xoff_entered);
11844 ESTAT_ADD(rx_frame_too_long_errors);
11845 ESTAT_ADD(rx_jabbers);
11846 ESTAT_ADD(rx_undersize_packets);
11847 ESTAT_ADD(rx_in_length_errors);
11848 ESTAT_ADD(rx_out_length_errors);
11849 ESTAT_ADD(rx_64_or_less_octet_packets);
11850 ESTAT_ADD(rx_65_to_127_octet_packets);
11851 ESTAT_ADD(rx_128_to_255_octet_packets);
11852 ESTAT_ADD(rx_256_to_511_octet_packets);
11853 ESTAT_ADD(rx_512_to_1023_octet_packets);
11854 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11855 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11856 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11857 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11858 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11859
11860 ESTAT_ADD(tx_octets);
11861 ESTAT_ADD(tx_collisions);
11862 ESTAT_ADD(tx_xon_sent);
11863 ESTAT_ADD(tx_xoff_sent);
11864 ESTAT_ADD(tx_flow_control);
11865 ESTAT_ADD(tx_mac_errors);
11866 ESTAT_ADD(tx_single_collisions);
11867 ESTAT_ADD(tx_mult_collisions);
11868 ESTAT_ADD(tx_deferred);
11869 ESTAT_ADD(tx_excessive_collisions);
11870 ESTAT_ADD(tx_late_collisions);
11871 ESTAT_ADD(tx_collide_2times);
11872 ESTAT_ADD(tx_collide_3times);
11873 ESTAT_ADD(tx_collide_4times);
11874 ESTAT_ADD(tx_collide_5times);
11875 ESTAT_ADD(tx_collide_6times);
11876 ESTAT_ADD(tx_collide_7times);
11877 ESTAT_ADD(tx_collide_8times);
11878 ESTAT_ADD(tx_collide_9times);
11879 ESTAT_ADD(tx_collide_10times);
11880 ESTAT_ADD(tx_collide_11times);
11881 ESTAT_ADD(tx_collide_12times);
11882 ESTAT_ADD(tx_collide_13times);
11883 ESTAT_ADD(tx_collide_14times);
11884 ESTAT_ADD(tx_collide_15times);
11885 ESTAT_ADD(tx_ucast_packets);
11886 ESTAT_ADD(tx_mcast_packets);
11887 ESTAT_ADD(tx_bcast_packets);
11888 ESTAT_ADD(tx_carrier_sense_errors);
11889 ESTAT_ADD(tx_discards);
11890 ESTAT_ADD(tx_errors);
11891
11892 ESTAT_ADD(dma_writeq_full);
11893 ESTAT_ADD(dma_write_prioq_full);
11894 ESTAT_ADD(rxbds_empty);
11895 ESTAT_ADD(rx_discards);
11896 ESTAT_ADD(rx_errors);
11897 ESTAT_ADD(rx_threshold_hit);
11898
11899 ESTAT_ADD(dma_readq_full);
11900 ESTAT_ADD(dma_read_prioq_full);
11901 ESTAT_ADD(tx_comp_queue_full);
11902
11903 ESTAT_ADD(ring_set_send_prod_index);
11904 ESTAT_ADD(ring_status_update);
11905 ESTAT_ADD(nic_irqs);
11906 ESTAT_ADD(nic_avoided_irqs);
11907 ESTAT_ADD(nic_tx_threshold_hit);
11908
11909 ESTAT_ADD(mbuf_lwm_thresh_hit);
11910 }
11911
tg3_get_nstats(struct tg3 * tp,struct rtnl_link_stats64 * stats)11912 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11913 {
11914 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11915 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11916 unsigned long rx_dropped;
11917 unsigned long tx_dropped;
11918 int i;
11919
11920 stats->rx_packets = old_stats->rx_packets +
11921 get_stat64(&hw_stats->rx_ucast_packets) +
11922 get_stat64(&hw_stats->rx_mcast_packets) +
11923 get_stat64(&hw_stats->rx_bcast_packets);
11924
11925 stats->tx_packets = old_stats->tx_packets +
11926 get_stat64(&hw_stats->tx_ucast_packets) +
11927 get_stat64(&hw_stats->tx_mcast_packets) +
11928 get_stat64(&hw_stats->tx_bcast_packets);
11929
11930 stats->rx_bytes = old_stats->rx_bytes +
11931 get_stat64(&hw_stats->rx_octets);
11932 stats->tx_bytes = old_stats->tx_bytes +
11933 get_stat64(&hw_stats->tx_octets);
11934
11935 stats->rx_errors = old_stats->rx_errors +
11936 get_stat64(&hw_stats->rx_errors);
11937 stats->tx_errors = old_stats->tx_errors +
11938 get_stat64(&hw_stats->tx_errors) +
11939 get_stat64(&hw_stats->tx_mac_errors) +
11940 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11941 get_stat64(&hw_stats->tx_discards);
11942
11943 stats->multicast = old_stats->multicast +
11944 get_stat64(&hw_stats->rx_mcast_packets);
11945 stats->collisions = old_stats->collisions +
11946 get_stat64(&hw_stats->tx_collisions);
11947
11948 stats->rx_length_errors = old_stats->rx_length_errors +
11949 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11950 get_stat64(&hw_stats->rx_undersize_packets);
11951
11952 stats->rx_frame_errors = old_stats->rx_frame_errors +
11953 get_stat64(&hw_stats->rx_align_errors);
11954 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11955 get_stat64(&hw_stats->tx_discards);
11956 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11957 get_stat64(&hw_stats->tx_carrier_sense_errors);
11958
11959 stats->rx_crc_errors = old_stats->rx_crc_errors +
11960 tg3_calc_crc_errors(tp);
11961
11962 stats->rx_missed_errors = old_stats->rx_missed_errors +
11963 get_stat64(&hw_stats->rx_discards);
11964
11965 /* Aggregate per-queue counters. The per-queue counters are updated
11966 * by a single writer, race-free. The result computed by this loop
11967 * might not be 100% accurate (counters can be updated in the middle of
11968 * the loop) but the next tg3_get_nstats() will recompute the current
11969 * value so it is acceptable.
11970 *
11971 * Note that these counters wrap around at 4G on 32bit machines.
11972 */
11973 rx_dropped = (unsigned long)(old_stats->rx_dropped);
11974 tx_dropped = (unsigned long)(old_stats->tx_dropped);
11975
11976 for (i = 0; i < tp->irq_cnt; i++) {
11977 struct tg3_napi *tnapi = &tp->napi[i];
11978
11979 rx_dropped += tnapi->rx_dropped;
11980 tx_dropped += tnapi->tx_dropped;
11981 }
11982
11983 stats->rx_dropped = rx_dropped;
11984 stats->tx_dropped = tx_dropped;
11985 }
11986
tg3_get_regs_len(struct net_device * dev)11987 static int tg3_get_regs_len(struct net_device *dev)
11988 {
11989 return TG3_REG_BLK_SIZE;
11990 }
11991
tg3_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)11992 static void tg3_get_regs(struct net_device *dev,
11993 struct ethtool_regs *regs, void *_p)
11994 {
11995 struct tg3 *tp = netdev_priv(dev);
11996
11997 regs->version = 0;
11998
11999 memset(_p, 0, TG3_REG_BLK_SIZE);
12000
12001 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12002 return;
12003
12004 tg3_full_lock(tp, 0);
12005
12006 tg3_dump_legacy_regs(tp, (u32 *)_p);
12007
12008 tg3_full_unlock(tp);
12009 }
12010
tg3_get_eeprom_len(struct net_device * dev)12011 static int tg3_get_eeprom_len(struct net_device *dev)
12012 {
12013 struct tg3 *tp = netdev_priv(dev);
12014
12015 return tp->nvram_size;
12016 }
12017
tg3_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12018 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12019 {
12020 struct tg3 *tp = netdev_priv(dev);
12021 int ret, cpmu_restore = 0;
12022 u8 *pd;
12023 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12024 __be32 val;
12025
12026 if (tg3_flag(tp, NO_NVRAM))
12027 return -EINVAL;
12028
12029 offset = eeprom->offset;
12030 len = eeprom->len;
12031 eeprom->len = 0;
12032
12033 eeprom->magic = TG3_EEPROM_MAGIC;
12034
12035 /* Override clock, link aware and link idle modes */
12036 if (tg3_flag(tp, CPMU_PRESENT)) {
12037 cpmu_val = tr32(TG3_CPMU_CTRL);
12038 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12039 CPMU_CTRL_LINK_IDLE_MODE)) {
12040 tw32(TG3_CPMU_CTRL, cpmu_val &
12041 ~(CPMU_CTRL_LINK_AWARE_MODE |
12042 CPMU_CTRL_LINK_IDLE_MODE));
12043 cpmu_restore = 1;
12044 }
12045 }
12046 tg3_override_clk(tp);
12047
12048 if (offset & 3) {
12049 /* adjustments to start on required 4 byte boundary */
12050 b_offset = offset & 3;
12051 b_count = 4 - b_offset;
12052 if (b_count > len) {
12053 /* i.e. offset=1 len=2 */
12054 b_count = len;
12055 }
12056 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12057 if (ret)
12058 goto eeprom_done;
12059 memcpy(data, ((char *)&val) + b_offset, b_count);
12060 len -= b_count;
12061 offset += b_count;
12062 eeprom->len += b_count;
12063 }
12064
12065 /* read bytes up to the last 4 byte boundary */
12066 pd = &data[eeprom->len];
12067 for (i = 0; i < (len - (len & 3)); i += 4) {
12068 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12069 if (ret) {
12070 if (i)
12071 i -= 4;
12072 eeprom->len += i;
12073 goto eeprom_done;
12074 }
12075 memcpy(pd + i, &val, 4);
12076 if (need_resched()) {
12077 if (signal_pending(current)) {
12078 eeprom->len += i;
12079 ret = -EINTR;
12080 goto eeprom_done;
12081 }
12082 cond_resched();
12083 }
12084 }
12085 eeprom->len += i;
12086
12087 if (len & 3) {
12088 /* read last bytes not ending on 4 byte boundary */
12089 pd = &data[eeprom->len];
12090 b_count = len & 3;
12091 b_offset = offset + len - b_count;
12092 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12093 if (ret)
12094 goto eeprom_done;
12095 memcpy(pd, &val, b_count);
12096 eeprom->len += b_count;
12097 }
12098 ret = 0;
12099
12100 eeprom_done:
12101 /* Restore clock, link aware and link idle modes */
12102 tg3_restore_clk(tp);
12103 if (cpmu_restore)
12104 tw32(TG3_CPMU_CTRL, cpmu_val);
12105
12106 return ret;
12107 }
12108
tg3_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12109 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12110 {
12111 struct tg3 *tp = netdev_priv(dev);
12112 int ret;
12113 u32 offset, len, b_offset, odd_len;
12114 u8 *buf;
12115 __be32 start = 0, end;
12116
12117 if (tg3_flag(tp, NO_NVRAM) ||
12118 eeprom->magic != TG3_EEPROM_MAGIC)
12119 return -EINVAL;
12120
12121 offset = eeprom->offset;
12122 len = eeprom->len;
12123
12124 if ((b_offset = (offset & 3))) {
12125 /* adjustments to start on required 4 byte boundary */
12126 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12127 if (ret)
12128 return ret;
12129 len += b_offset;
12130 offset &= ~3;
12131 if (len < 4)
12132 len = 4;
12133 }
12134
12135 odd_len = 0;
12136 if (len & 3) {
12137 /* adjustments to end on required 4 byte boundary */
12138 odd_len = 1;
12139 len = (len + 3) & ~3;
12140 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12141 if (ret)
12142 return ret;
12143 }
12144
12145 buf = data;
12146 if (b_offset || odd_len) {
12147 buf = kmalloc(len, GFP_KERNEL);
12148 if (!buf)
12149 return -ENOMEM;
12150 if (b_offset)
12151 memcpy(buf, &start, 4);
12152 if (odd_len)
12153 memcpy(buf+len-4, &end, 4);
12154 memcpy(buf + b_offset, data, eeprom->len);
12155 }
12156
12157 ret = tg3_nvram_write_block(tp, offset, len, buf);
12158
12159 if (buf != data)
12160 kfree(buf);
12161
12162 return ret;
12163 }
12164
tg3_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)12165 static int tg3_get_link_ksettings(struct net_device *dev,
12166 struct ethtool_link_ksettings *cmd)
12167 {
12168 struct tg3 *tp = netdev_priv(dev);
12169 u32 supported, advertising;
12170
12171 if (tg3_flag(tp, USE_PHYLIB)) {
12172 struct phy_device *phydev;
12173 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12174 return -EAGAIN;
12175 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12176 phy_ethtool_ksettings_get(phydev, cmd);
12177
12178 return 0;
12179 }
12180
12181 supported = (SUPPORTED_Autoneg);
12182
12183 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12184 supported |= (SUPPORTED_1000baseT_Half |
12185 SUPPORTED_1000baseT_Full);
12186
12187 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12188 supported |= (SUPPORTED_100baseT_Half |
12189 SUPPORTED_100baseT_Full |
12190 SUPPORTED_10baseT_Half |
12191 SUPPORTED_10baseT_Full |
12192 SUPPORTED_TP);
12193 cmd->base.port = PORT_TP;
12194 } else {
12195 supported |= SUPPORTED_FIBRE;
12196 cmd->base.port = PORT_FIBRE;
12197 }
12198 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12199 supported);
12200
12201 advertising = tp->link_config.advertising;
12202 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12203 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12204 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12205 advertising |= ADVERTISED_Pause;
12206 } else {
12207 advertising |= ADVERTISED_Pause |
12208 ADVERTISED_Asym_Pause;
12209 }
12210 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12211 advertising |= ADVERTISED_Asym_Pause;
12212 }
12213 }
12214 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12215 advertising);
12216
12217 if (netif_running(dev) && tp->link_up) {
12218 cmd->base.speed = tp->link_config.active_speed;
12219 cmd->base.duplex = tp->link_config.active_duplex;
12220 ethtool_convert_legacy_u32_to_link_mode(
12221 cmd->link_modes.lp_advertising,
12222 tp->link_config.rmt_adv);
12223
12224 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12225 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12226 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12227 else
12228 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12229 }
12230 } else {
12231 cmd->base.speed = SPEED_UNKNOWN;
12232 cmd->base.duplex = DUPLEX_UNKNOWN;
12233 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12234 }
12235 cmd->base.phy_address = tp->phy_addr;
12236 cmd->base.autoneg = tp->link_config.autoneg;
12237 return 0;
12238 }
12239
tg3_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)12240 static int tg3_set_link_ksettings(struct net_device *dev,
12241 const struct ethtool_link_ksettings *cmd)
12242 {
12243 struct tg3 *tp = netdev_priv(dev);
12244 u32 speed = cmd->base.speed;
12245 u32 advertising;
12246
12247 if (tg3_flag(tp, USE_PHYLIB)) {
12248 struct phy_device *phydev;
12249 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12250 return -EAGAIN;
12251 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12252 return phy_ethtool_ksettings_set(phydev, cmd);
12253 }
12254
12255 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12256 cmd->base.autoneg != AUTONEG_DISABLE)
12257 return -EINVAL;
12258
12259 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12260 cmd->base.duplex != DUPLEX_FULL &&
12261 cmd->base.duplex != DUPLEX_HALF)
12262 return -EINVAL;
12263
12264 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12265 cmd->link_modes.advertising);
12266
12267 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12268 u32 mask = ADVERTISED_Autoneg |
12269 ADVERTISED_Pause |
12270 ADVERTISED_Asym_Pause;
12271
12272 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12273 mask |= ADVERTISED_1000baseT_Half |
12274 ADVERTISED_1000baseT_Full;
12275
12276 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12277 mask |= ADVERTISED_100baseT_Half |
12278 ADVERTISED_100baseT_Full |
12279 ADVERTISED_10baseT_Half |
12280 ADVERTISED_10baseT_Full |
12281 ADVERTISED_TP;
12282 else
12283 mask |= ADVERTISED_FIBRE;
12284
12285 if (advertising & ~mask)
12286 return -EINVAL;
12287
12288 mask &= (ADVERTISED_1000baseT_Half |
12289 ADVERTISED_1000baseT_Full |
12290 ADVERTISED_100baseT_Half |
12291 ADVERTISED_100baseT_Full |
12292 ADVERTISED_10baseT_Half |
12293 ADVERTISED_10baseT_Full);
12294
12295 advertising &= mask;
12296 } else {
12297 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12298 if (speed != SPEED_1000)
12299 return -EINVAL;
12300
12301 if (cmd->base.duplex != DUPLEX_FULL)
12302 return -EINVAL;
12303 } else {
12304 if (speed != SPEED_100 &&
12305 speed != SPEED_10)
12306 return -EINVAL;
12307 }
12308 }
12309
12310 tg3_full_lock(tp, 0);
12311
12312 tp->link_config.autoneg = cmd->base.autoneg;
12313 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12314 tp->link_config.advertising = (advertising |
12315 ADVERTISED_Autoneg);
12316 tp->link_config.speed = SPEED_UNKNOWN;
12317 tp->link_config.duplex = DUPLEX_UNKNOWN;
12318 } else {
12319 tp->link_config.advertising = 0;
12320 tp->link_config.speed = speed;
12321 tp->link_config.duplex = cmd->base.duplex;
12322 }
12323
12324 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12325
12326 tg3_warn_mgmt_link_flap(tp);
12327
12328 if (netif_running(dev))
12329 tg3_setup_phy(tp, true);
12330
12331 tg3_full_unlock(tp);
12332
12333 return 0;
12334 }
12335
tg3_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)12336 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12337 {
12338 struct tg3 *tp = netdev_priv(dev);
12339
12340 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12341 strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12342 strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12343 }
12344
tg3_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12345 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12346 {
12347 struct tg3 *tp = netdev_priv(dev);
12348
12349 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12350 wol->supported = WAKE_MAGIC;
12351 else
12352 wol->supported = 0;
12353 wol->wolopts = 0;
12354 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12355 wol->wolopts = WAKE_MAGIC;
12356 memset(&wol->sopass, 0, sizeof(wol->sopass));
12357 }
12358
tg3_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12359 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12360 {
12361 struct tg3 *tp = netdev_priv(dev);
12362 struct device *dp = &tp->pdev->dev;
12363
12364 if (wol->wolopts & ~WAKE_MAGIC)
12365 return -EINVAL;
12366 if ((wol->wolopts & WAKE_MAGIC) &&
12367 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12368 return -EINVAL;
12369
12370 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12371
12372 if (device_may_wakeup(dp))
12373 tg3_flag_set(tp, WOL_ENABLE);
12374 else
12375 tg3_flag_clear(tp, WOL_ENABLE);
12376
12377 return 0;
12378 }
12379
tg3_get_msglevel(struct net_device * dev)12380 static u32 tg3_get_msglevel(struct net_device *dev)
12381 {
12382 struct tg3 *tp = netdev_priv(dev);
12383 return tp->msg_enable;
12384 }
12385
tg3_set_msglevel(struct net_device * dev,u32 value)12386 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12387 {
12388 struct tg3 *tp = netdev_priv(dev);
12389 tp->msg_enable = value;
12390 }
12391
tg3_nway_reset(struct net_device * dev)12392 static int tg3_nway_reset(struct net_device *dev)
12393 {
12394 struct tg3 *tp = netdev_priv(dev);
12395 int r;
12396
12397 if (!netif_running(dev))
12398 return -EAGAIN;
12399
12400 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12401 return -EINVAL;
12402
12403 tg3_warn_mgmt_link_flap(tp);
12404
12405 if (tg3_flag(tp, USE_PHYLIB)) {
12406 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12407 return -EAGAIN;
12408 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12409 } else {
12410 u32 bmcr;
12411
12412 spin_lock_bh(&tp->lock);
12413 r = -EINVAL;
12414 tg3_readphy(tp, MII_BMCR, &bmcr);
12415 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12416 ((bmcr & BMCR_ANENABLE) ||
12417 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12418 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12419 BMCR_ANENABLE);
12420 r = 0;
12421 }
12422 spin_unlock_bh(&tp->lock);
12423 }
12424
12425 return r;
12426 }
12427
tg3_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12428 static void tg3_get_ringparam(struct net_device *dev,
12429 struct ethtool_ringparam *ering,
12430 struct kernel_ethtool_ringparam *kernel_ering,
12431 struct netlink_ext_ack *extack)
12432 {
12433 struct tg3 *tp = netdev_priv(dev);
12434
12435 ering->rx_max_pending = tp->rx_std_ring_mask;
12436 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12437 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12438 else
12439 ering->rx_jumbo_max_pending = 0;
12440
12441 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12442
12443 ering->rx_pending = tp->rx_pending;
12444 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12445 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12446 else
12447 ering->rx_jumbo_pending = 0;
12448
12449 ering->tx_pending = tp->napi[0].tx_pending;
12450 }
12451
tg3_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12452 static int tg3_set_ringparam(struct net_device *dev,
12453 struct ethtool_ringparam *ering,
12454 struct kernel_ethtool_ringparam *kernel_ering,
12455 struct netlink_ext_ack *extack)
12456 {
12457 struct tg3 *tp = netdev_priv(dev);
12458 int i, irq_sync = 0, err = 0;
12459 bool reset_phy = false;
12460
12461 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12462 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12463 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12464 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12465 (tg3_flag(tp, TSO_BUG) &&
12466 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12467 return -EINVAL;
12468
12469 if (netif_running(dev)) {
12470 tg3_phy_stop(tp);
12471 tg3_netif_stop(tp);
12472 irq_sync = 1;
12473 }
12474
12475 tg3_full_lock(tp, irq_sync);
12476
12477 tp->rx_pending = ering->rx_pending;
12478
12479 if (tg3_flag(tp, MAX_RXPEND_64) &&
12480 tp->rx_pending > 63)
12481 tp->rx_pending = 63;
12482
12483 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12484 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12485
12486 for (i = 0; i < tp->irq_max; i++)
12487 tp->napi[i].tx_pending = ering->tx_pending;
12488
12489 if (netif_running(dev)) {
12490 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12491 /* Reset PHY to avoid PHY lock up */
12492 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12493 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12494 tg3_asic_rev(tp) == ASIC_REV_5720)
12495 reset_phy = true;
12496
12497 err = tg3_restart_hw(tp, reset_phy);
12498 if (!err)
12499 tg3_netif_start(tp);
12500 }
12501
12502 tg3_full_unlock(tp);
12503
12504 if (irq_sync && !err)
12505 tg3_phy_start(tp);
12506
12507 return err;
12508 }
12509
tg3_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12510 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12511 {
12512 struct tg3 *tp = netdev_priv(dev);
12513
12514 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12515
12516 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12517 epause->rx_pause = 1;
12518 else
12519 epause->rx_pause = 0;
12520
12521 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12522 epause->tx_pause = 1;
12523 else
12524 epause->tx_pause = 0;
12525 }
12526
tg3_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12527 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12528 {
12529 struct tg3 *tp = netdev_priv(dev);
12530 int err = 0;
12531 bool reset_phy = false;
12532
12533 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12534 tg3_warn_mgmt_link_flap(tp);
12535
12536 if (tg3_flag(tp, USE_PHYLIB)) {
12537 struct phy_device *phydev;
12538
12539 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12540
12541 if (!phy_validate_pause(phydev, epause))
12542 return -EINVAL;
12543
12544 tp->link_config.flowctrl = 0;
12545 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12546 if (epause->rx_pause) {
12547 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12548
12549 if (epause->tx_pause) {
12550 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12551 }
12552 } else if (epause->tx_pause) {
12553 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12554 }
12555
12556 if (epause->autoneg)
12557 tg3_flag_set(tp, PAUSE_AUTONEG);
12558 else
12559 tg3_flag_clear(tp, PAUSE_AUTONEG);
12560
12561 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12562 if (phydev->autoneg) {
12563 /* phy_set_asym_pause() will
12564 * renegotiate the link to inform our
12565 * link partner of our flow control
12566 * settings, even if the flow control
12567 * is forced. Let tg3_adjust_link()
12568 * do the final flow control setup.
12569 */
12570 return 0;
12571 }
12572
12573 if (!epause->autoneg)
12574 tg3_setup_flow_control(tp, 0, 0);
12575 }
12576 } else {
12577 int irq_sync = 0;
12578
12579 if (netif_running(dev)) {
12580 tg3_netif_stop(tp);
12581 irq_sync = 1;
12582 }
12583
12584 tg3_full_lock(tp, irq_sync);
12585
12586 if (epause->autoneg)
12587 tg3_flag_set(tp, PAUSE_AUTONEG);
12588 else
12589 tg3_flag_clear(tp, PAUSE_AUTONEG);
12590 if (epause->rx_pause)
12591 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12592 else
12593 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12594 if (epause->tx_pause)
12595 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12596 else
12597 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12598
12599 if (netif_running(dev)) {
12600 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12601 /* Reset PHY to avoid PHY lock up */
12602 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12603 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12604 tg3_asic_rev(tp) == ASIC_REV_5720)
12605 reset_phy = true;
12606
12607 err = tg3_restart_hw(tp, reset_phy);
12608 if (!err)
12609 tg3_netif_start(tp);
12610 }
12611
12612 tg3_full_unlock(tp);
12613 }
12614
12615 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12616
12617 return err;
12618 }
12619
tg3_get_sset_count(struct net_device * dev,int sset)12620 static int tg3_get_sset_count(struct net_device *dev, int sset)
12621 {
12622 switch (sset) {
12623 case ETH_SS_TEST:
12624 return TG3_NUM_TEST;
12625 case ETH_SS_STATS:
12626 return TG3_NUM_STATS;
12627 default:
12628 return -EOPNOTSUPP;
12629 }
12630 }
12631
tg3_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules __always_unused)12632 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12633 u32 *rules __always_unused)
12634 {
12635 struct tg3 *tp = netdev_priv(dev);
12636
12637 if (!tg3_flag(tp, SUPPORT_MSIX))
12638 return -EOPNOTSUPP;
12639
12640 switch (info->cmd) {
12641 case ETHTOOL_GRXRINGS:
12642 if (netif_running(tp->dev))
12643 info->data = tp->rxq_cnt;
12644 else {
12645 info->data = num_online_cpus();
12646 if (info->data > TG3_RSS_MAX_NUM_QS)
12647 info->data = TG3_RSS_MAX_NUM_QS;
12648 }
12649
12650 return 0;
12651
12652 default:
12653 return -EOPNOTSUPP;
12654 }
12655 }
12656
tg3_get_rxfh_indir_size(struct net_device * dev)12657 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12658 {
12659 u32 size = 0;
12660 struct tg3 *tp = netdev_priv(dev);
12661
12662 if (tg3_flag(tp, SUPPORT_MSIX))
12663 size = TG3_RSS_INDIR_TBL_SIZE;
12664
12665 return size;
12666 }
12667
tg3_get_rxfh(struct net_device * dev,u32 * indir,u8 * key,u8 * hfunc)12668 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12669 {
12670 struct tg3 *tp = netdev_priv(dev);
12671 int i;
12672
12673 if (hfunc)
12674 *hfunc = ETH_RSS_HASH_TOP;
12675 if (!indir)
12676 return 0;
12677
12678 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12679 indir[i] = tp->rss_ind_tbl[i];
12680
12681 return 0;
12682 }
12683
tg3_set_rxfh(struct net_device * dev,const u32 * indir,const u8 * key,const u8 hfunc)12684 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12685 const u8 hfunc)
12686 {
12687 struct tg3 *tp = netdev_priv(dev);
12688 size_t i;
12689
12690 /* We require at least one supported parameter to be changed and no
12691 * change in any of the unsupported parameters
12692 */
12693 if (key ||
12694 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12695 return -EOPNOTSUPP;
12696
12697 if (!indir)
12698 return 0;
12699
12700 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12701 tp->rss_ind_tbl[i] = indir[i];
12702
12703 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12704 return 0;
12705
12706 /* It is legal to write the indirection
12707 * table while the device is running.
12708 */
12709 tg3_full_lock(tp, 0);
12710 tg3_rss_write_indir_tbl(tp);
12711 tg3_full_unlock(tp);
12712
12713 return 0;
12714 }
12715
tg3_get_channels(struct net_device * dev,struct ethtool_channels * channel)12716 static void tg3_get_channels(struct net_device *dev,
12717 struct ethtool_channels *channel)
12718 {
12719 struct tg3 *tp = netdev_priv(dev);
12720 u32 deflt_qs = netif_get_num_default_rss_queues();
12721
12722 channel->max_rx = tp->rxq_max;
12723 channel->max_tx = tp->txq_max;
12724
12725 if (netif_running(dev)) {
12726 channel->rx_count = tp->rxq_cnt;
12727 channel->tx_count = tp->txq_cnt;
12728 } else {
12729 if (tp->rxq_req)
12730 channel->rx_count = tp->rxq_req;
12731 else
12732 channel->rx_count = min(deflt_qs, tp->rxq_max);
12733
12734 if (tp->txq_req)
12735 channel->tx_count = tp->txq_req;
12736 else
12737 channel->tx_count = min(deflt_qs, tp->txq_max);
12738 }
12739 }
12740
tg3_set_channels(struct net_device * dev,struct ethtool_channels * channel)12741 static int tg3_set_channels(struct net_device *dev,
12742 struct ethtool_channels *channel)
12743 {
12744 struct tg3 *tp = netdev_priv(dev);
12745
12746 if (!tg3_flag(tp, SUPPORT_MSIX))
12747 return -EOPNOTSUPP;
12748
12749 if (channel->rx_count > tp->rxq_max ||
12750 channel->tx_count > tp->txq_max)
12751 return -EINVAL;
12752
12753 tp->rxq_req = channel->rx_count;
12754 tp->txq_req = channel->tx_count;
12755
12756 if (!netif_running(dev))
12757 return 0;
12758
12759 tg3_stop(tp);
12760
12761 tg3_carrier_off(tp);
12762
12763 tg3_start(tp, true, false, false);
12764
12765 return 0;
12766 }
12767
tg3_get_strings(struct net_device * dev,u32 stringset,u8 * buf)12768 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12769 {
12770 switch (stringset) {
12771 case ETH_SS_STATS:
12772 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12773 break;
12774 case ETH_SS_TEST:
12775 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12776 break;
12777 default:
12778 WARN_ON(1); /* we need a WARN() */
12779 break;
12780 }
12781 }
12782
tg3_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)12783 static int tg3_set_phys_id(struct net_device *dev,
12784 enum ethtool_phys_id_state state)
12785 {
12786 struct tg3 *tp = netdev_priv(dev);
12787
12788 switch (state) {
12789 case ETHTOOL_ID_ACTIVE:
12790 return 1; /* cycle on/off once per second */
12791
12792 case ETHTOOL_ID_ON:
12793 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12794 LED_CTRL_1000MBPS_ON |
12795 LED_CTRL_100MBPS_ON |
12796 LED_CTRL_10MBPS_ON |
12797 LED_CTRL_TRAFFIC_OVERRIDE |
12798 LED_CTRL_TRAFFIC_BLINK |
12799 LED_CTRL_TRAFFIC_LED);
12800 break;
12801
12802 case ETHTOOL_ID_OFF:
12803 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12804 LED_CTRL_TRAFFIC_OVERRIDE);
12805 break;
12806
12807 case ETHTOOL_ID_INACTIVE:
12808 tw32(MAC_LED_CTRL, tp->led_ctrl);
12809 break;
12810 }
12811
12812 return 0;
12813 }
12814
tg3_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)12815 static void tg3_get_ethtool_stats(struct net_device *dev,
12816 struct ethtool_stats *estats, u64 *tmp_stats)
12817 {
12818 struct tg3 *tp = netdev_priv(dev);
12819
12820 if (tp->hw_stats)
12821 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12822 else
12823 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12824 }
12825
tg3_vpd_readblock(struct tg3 * tp,unsigned int * vpdlen)12826 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12827 {
12828 int i;
12829 __be32 *buf;
12830 u32 offset = 0, len = 0;
12831 u32 magic, val;
12832
12833 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12834 return NULL;
12835
12836 if (magic == TG3_EEPROM_MAGIC) {
12837 for (offset = TG3_NVM_DIR_START;
12838 offset < TG3_NVM_DIR_END;
12839 offset += TG3_NVM_DIRENT_SIZE) {
12840 if (tg3_nvram_read(tp, offset, &val))
12841 return NULL;
12842
12843 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12844 TG3_NVM_DIRTYPE_EXTVPD)
12845 break;
12846 }
12847
12848 if (offset != TG3_NVM_DIR_END) {
12849 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12850 if (tg3_nvram_read(tp, offset + 4, &offset))
12851 return NULL;
12852
12853 offset = tg3_nvram_logical_addr(tp, offset);
12854 }
12855
12856 if (!offset || !len) {
12857 offset = TG3_NVM_VPD_OFF;
12858 len = TG3_NVM_VPD_LEN;
12859 }
12860
12861 buf = kmalloc(len, GFP_KERNEL);
12862 if (!buf)
12863 return NULL;
12864
12865 for (i = 0; i < len; i += 4) {
12866 /* The data is in little-endian format in NVRAM.
12867 * Use the big-endian read routines to preserve
12868 * the byte order as it exists in NVRAM.
12869 */
12870 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12871 goto error;
12872 }
12873 *vpdlen = len;
12874 } else {
12875 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12876 if (IS_ERR(buf))
12877 return NULL;
12878 }
12879
12880 return buf;
12881
12882 error:
12883 kfree(buf);
12884 return NULL;
12885 }
12886
12887 #define NVRAM_TEST_SIZE 0x100
12888 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12889 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12890 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12891 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12892 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12893 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12894 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12895 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12896
tg3_test_nvram(struct tg3 * tp)12897 static int tg3_test_nvram(struct tg3 *tp)
12898 {
12899 u32 csum, magic;
12900 __be32 *buf;
12901 int i, j, k, err = 0, size;
12902 unsigned int len;
12903
12904 if (tg3_flag(tp, NO_NVRAM))
12905 return 0;
12906
12907 if (tg3_nvram_read(tp, 0, &magic) != 0)
12908 return -EIO;
12909
12910 if (magic == TG3_EEPROM_MAGIC)
12911 size = NVRAM_TEST_SIZE;
12912 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12913 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12914 TG3_EEPROM_SB_FORMAT_1) {
12915 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12916 case TG3_EEPROM_SB_REVISION_0:
12917 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12918 break;
12919 case TG3_EEPROM_SB_REVISION_2:
12920 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12921 break;
12922 case TG3_EEPROM_SB_REVISION_3:
12923 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12924 break;
12925 case TG3_EEPROM_SB_REVISION_4:
12926 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12927 break;
12928 case TG3_EEPROM_SB_REVISION_5:
12929 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12930 break;
12931 case TG3_EEPROM_SB_REVISION_6:
12932 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12933 break;
12934 default:
12935 return -EIO;
12936 }
12937 } else
12938 return 0;
12939 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12940 size = NVRAM_SELFBOOT_HW_SIZE;
12941 else
12942 return -EIO;
12943
12944 buf = kmalloc(size, GFP_KERNEL);
12945 if (buf == NULL)
12946 return -ENOMEM;
12947
12948 err = -EIO;
12949 for (i = 0, j = 0; i < size; i += 4, j++) {
12950 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12951 if (err)
12952 break;
12953 }
12954 if (i < size)
12955 goto out;
12956
12957 /* Selfboot format */
12958 magic = be32_to_cpu(buf[0]);
12959 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12960 TG3_EEPROM_MAGIC_FW) {
12961 u8 *buf8 = (u8 *) buf, csum8 = 0;
12962
12963 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12964 TG3_EEPROM_SB_REVISION_2) {
12965 /* For rev 2, the csum doesn't include the MBA. */
12966 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12967 csum8 += buf8[i];
12968 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12969 csum8 += buf8[i];
12970 } else {
12971 for (i = 0; i < size; i++)
12972 csum8 += buf8[i];
12973 }
12974
12975 if (csum8 == 0) {
12976 err = 0;
12977 goto out;
12978 }
12979
12980 err = -EIO;
12981 goto out;
12982 }
12983
12984 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12985 TG3_EEPROM_MAGIC_HW) {
12986 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12987 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12988 u8 *buf8 = (u8 *) buf;
12989
12990 /* Separate the parity bits and the data bytes. */
12991 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12992 if ((i == 0) || (i == 8)) {
12993 int l;
12994 u8 msk;
12995
12996 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12997 parity[k++] = buf8[i] & msk;
12998 i++;
12999 } else if (i == 16) {
13000 int l;
13001 u8 msk;
13002
13003 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
13004 parity[k++] = buf8[i] & msk;
13005 i++;
13006
13007 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13008 parity[k++] = buf8[i] & msk;
13009 i++;
13010 }
13011 data[j++] = buf8[i];
13012 }
13013
13014 err = -EIO;
13015 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13016 u8 hw8 = hweight8(data[i]);
13017
13018 if ((hw8 & 0x1) && parity[i])
13019 goto out;
13020 else if (!(hw8 & 0x1) && !parity[i])
13021 goto out;
13022 }
13023 err = 0;
13024 goto out;
13025 }
13026
13027 err = -EIO;
13028
13029 /* Bootstrap checksum at offset 0x10 */
13030 csum = calc_crc((unsigned char *) buf, 0x10);
13031 if (csum != le32_to_cpu(buf[0x10/4]))
13032 goto out;
13033
13034 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13035 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13036 if (csum != le32_to_cpu(buf[0xfc/4]))
13037 goto out;
13038
13039 kfree(buf);
13040
13041 buf = tg3_vpd_readblock(tp, &len);
13042 if (!buf)
13043 return -ENOMEM;
13044
13045 err = pci_vpd_check_csum(buf, len);
13046 /* go on if no checksum found */
13047 if (err == 1)
13048 err = 0;
13049 out:
13050 kfree(buf);
13051 return err;
13052 }
13053
13054 #define TG3_SERDES_TIMEOUT_SEC 2
13055 #define TG3_COPPER_TIMEOUT_SEC 6
13056
tg3_test_link(struct tg3 * tp)13057 static int tg3_test_link(struct tg3 *tp)
13058 {
13059 int i, max;
13060
13061 if (!netif_running(tp->dev))
13062 return -ENODEV;
13063
13064 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13065 max = TG3_SERDES_TIMEOUT_SEC;
13066 else
13067 max = TG3_COPPER_TIMEOUT_SEC;
13068
13069 for (i = 0; i < max; i++) {
13070 if (tp->link_up)
13071 return 0;
13072
13073 if (msleep_interruptible(1000))
13074 break;
13075 }
13076
13077 return -EIO;
13078 }
13079
13080 /* Only test the commonly used registers */
tg3_test_registers(struct tg3 * tp)13081 static int tg3_test_registers(struct tg3 *tp)
13082 {
13083 int i, is_5705, is_5750;
13084 u32 offset, read_mask, write_mask, val, save_val, read_val;
13085 static struct {
13086 u16 offset;
13087 u16 flags;
13088 #define TG3_FL_5705 0x1
13089 #define TG3_FL_NOT_5705 0x2
13090 #define TG3_FL_NOT_5788 0x4
13091 #define TG3_FL_NOT_5750 0x8
13092 u32 read_mask;
13093 u32 write_mask;
13094 } reg_tbl[] = {
13095 /* MAC Control Registers */
13096 { MAC_MODE, TG3_FL_NOT_5705,
13097 0x00000000, 0x00ef6f8c },
13098 { MAC_MODE, TG3_FL_5705,
13099 0x00000000, 0x01ef6b8c },
13100 { MAC_STATUS, TG3_FL_NOT_5705,
13101 0x03800107, 0x00000000 },
13102 { MAC_STATUS, TG3_FL_5705,
13103 0x03800100, 0x00000000 },
13104 { MAC_ADDR_0_HIGH, 0x0000,
13105 0x00000000, 0x0000ffff },
13106 { MAC_ADDR_0_LOW, 0x0000,
13107 0x00000000, 0xffffffff },
13108 { MAC_RX_MTU_SIZE, 0x0000,
13109 0x00000000, 0x0000ffff },
13110 { MAC_TX_MODE, 0x0000,
13111 0x00000000, 0x00000070 },
13112 { MAC_TX_LENGTHS, 0x0000,
13113 0x00000000, 0x00003fff },
13114 { MAC_RX_MODE, TG3_FL_NOT_5705,
13115 0x00000000, 0x000007fc },
13116 { MAC_RX_MODE, TG3_FL_5705,
13117 0x00000000, 0x000007dc },
13118 { MAC_HASH_REG_0, 0x0000,
13119 0x00000000, 0xffffffff },
13120 { MAC_HASH_REG_1, 0x0000,
13121 0x00000000, 0xffffffff },
13122 { MAC_HASH_REG_2, 0x0000,
13123 0x00000000, 0xffffffff },
13124 { MAC_HASH_REG_3, 0x0000,
13125 0x00000000, 0xffffffff },
13126
13127 /* Receive Data and Receive BD Initiator Control Registers. */
13128 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13129 0x00000000, 0xffffffff },
13130 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13131 0x00000000, 0xffffffff },
13132 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13133 0x00000000, 0x00000003 },
13134 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13135 0x00000000, 0xffffffff },
13136 { RCVDBDI_STD_BD+0, 0x0000,
13137 0x00000000, 0xffffffff },
13138 { RCVDBDI_STD_BD+4, 0x0000,
13139 0x00000000, 0xffffffff },
13140 { RCVDBDI_STD_BD+8, 0x0000,
13141 0x00000000, 0xffff0002 },
13142 { RCVDBDI_STD_BD+0xc, 0x0000,
13143 0x00000000, 0xffffffff },
13144
13145 /* Receive BD Initiator Control Registers. */
13146 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13147 0x00000000, 0xffffffff },
13148 { RCVBDI_STD_THRESH, TG3_FL_5705,
13149 0x00000000, 0x000003ff },
13150 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13151 0x00000000, 0xffffffff },
13152
13153 /* Host Coalescing Control Registers. */
13154 { HOSTCC_MODE, TG3_FL_NOT_5705,
13155 0x00000000, 0x00000004 },
13156 { HOSTCC_MODE, TG3_FL_5705,
13157 0x00000000, 0x000000f6 },
13158 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13159 0x00000000, 0xffffffff },
13160 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13161 0x00000000, 0x000003ff },
13162 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13163 0x00000000, 0xffffffff },
13164 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13165 0x00000000, 0x000003ff },
13166 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13167 0x00000000, 0xffffffff },
13168 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13169 0x00000000, 0x000000ff },
13170 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13171 0x00000000, 0xffffffff },
13172 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13173 0x00000000, 0x000000ff },
13174 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13175 0x00000000, 0xffffffff },
13176 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13177 0x00000000, 0xffffffff },
13178 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13179 0x00000000, 0xffffffff },
13180 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13181 0x00000000, 0x000000ff },
13182 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13183 0x00000000, 0xffffffff },
13184 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13185 0x00000000, 0x000000ff },
13186 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13187 0x00000000, 0xffffffff },
13188 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13189 0x00000000, 0xffffffff },
13190 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13191 0x00000000, 0xffffffff },
13192 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13193 0x00000000, 0xffffffff },
13194 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13195 0x00000000, 0xffffffff },
13196 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13197 0xffffffff, 0x00000000 },
13198 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13199 0xffffffff, 0x00000000 },
13200
13201 /* Buffer Manager Control Registers. */
13202 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13203 0x00000000, 0x007fff80 },
13204 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13205 0x00000000, 0x007fffff },
13206 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13207 0x00000000, 0x0000003f },
13208 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13209 0x00000000, 0x000001ff },
13210 { BUFMGR_MB_HIGH_WATER, 0x0000,
13211 0x00000000, 0x000001ff },
13212 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13213 0xffffffff, 0x00000000 },
13214 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13215 0xffffffff, 0x00000000 },
13216
13217 /* Mailbox Registers */
13218 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13219 0x00000000, 0x000001ff },
13220 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13221 0x00000000, 0x000001ff },
13222 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13223 0x00000000, 0x000007ff },
13224 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13225 0x00000000, 0x000001ff },
13226
13227 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13228 };
13229
13230 is_5705 = is_5750 = 0;
13231 if (tg3_flag(tp, 5705_PLUS)) {
13232 is_5705 = 1;
13233 if (tg3_flag(tp, 5750_PLUS))
13234 is_5750 = 1;
13235 }
13236
13237 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13238 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13239 continue;
13240
13241 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13242 continue;
13243
13244 if (tg3_flag(tp, IS_5788) &&
13245 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13246 continue;
13247
13248 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13249 continue;
13250
13251 offset = (u32) reg_tbl[i].offset;
13252 read_mask = reg_tbl[i].read_mask;
13253 write_mask = reg_tbl[i].write_mask;
13254
13255 /* Save the original register content */
13256 save_val = tr32(offset);
13257
13258 /* Determine the read-only value. */
13259 read_val = save_val & read_mask;
13260
13261 /* Write zero to the register, then make sure the read-only bits
13262 * are not changed and the read/write bits are all zeros.
13263 */
13264 tw32(offset, 0);
13265
13266 val = tr32(offset);
13267
13268 /* Test the read-only and read/write bits. */
13269 if (((val & read_mask) != read_val) || (val & write_mask))
13270 goto out;
13271
13272 /* Write ones to all the bits defined by RdMask and WrMask, then
13273 * make sure the read-only bits are not changed and the
13274 * read/write bits are all ones.
13275 */
13276 tw32(offset, read_mask | write_mask);
13277
13278 val = tr32(offset);
13279
13280 /* Test the read-only bits. */
13281 if ((val & read_mask) != read_val)
13282 goto out;
13283
13284 /* Test the read/write bits. */
13285 if ((val & write_mask) != write_mask)
13286 goto out;
13287
13288 tw32(offset, save_val);
13289 }
13290
13291 return 0;
13292
13293 out:
13294 if (netif_msg_hw(tp))
13295 netdev_err(tp->dev,
13296 "Register test failed at offset %x\n", offset);
13297 tw32(offset, save_val);
13298 return -EIO;
13299 }
13300
tg3_do_mem_test(struct tg3 * tp,u32 offset,u32 len)13301 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13302 {
13303 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13304 int i;
13305 u32 j;
13306
13307 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13308 for (j = 0; j < len; j += 4) {
13309 u32 val;
13310
13311 tg3_write_mem(tp, offset + j, test_pattern[i]);
13312 tg3_read_mem(tp, offset + j, &val);
13313 if (val != test_pattern[i])
13314 return -EIO;
13315 }
13316 }
13317 return 0;
13318 }
13319
tg3_test_memory(struct tg3 * tp)13320 static int tg3_test_memory(struct tg3 *tp)
13321 {
13322 static struct mem_entry {
13323 u32 offset;
13324 u32 len;
13325 } mem_tbl_570x[] = {
13326 { 0x00000000, 0x00b50},
13327 { 0x00002000, 0x1c000},
13328 { 0xffffffff, 0x00000}
13329 }, mem_tbl_5705[] = {
13330 { 0x00000100, 0x0000c},
13331 { 0x00000200, 0x00008},
13332 { 0x00004000, 0x00800},
13333 { 0x00006000, 0x01000},
13334 { 0x00008000, 0x02000},
13335 { 0x00010000, 0x0e000},
13336 { 0xffffffff, 0x00000}
13337 }, mem_tbl_5755[] = {
13338 { 0x00000200, 0x00008},
13339 { 0x00004000, 0x00800},
13340 { 0x00006000, 0x00800},
13341 { 0x00008000, 0x02000},
13342 { 0x00010000, 0x0c000},
13343 { 0xffffffff, 0x00000}
13344 }, mem_tbl_5906[] = {
13345 { 0x00000200, 0x00008},
13346 { 0x00004000, 0x00400},
13347 { 0x00006000, 0x00400},
13348 { 0x00008000, 0x01000},
13349 { 0x00010000, 0x01000},
13350 { 0xffffffff, 0x00000}
13351 }, mem_tbl_5717[] = {
13352 { 0x00000200, 0x00008},
13353 { 0x00010000, 0x0a000},
13354 { 0x00020000, 0x13c00},
13355 { 0xffffffff, 0x00000}
13356 }, mem_tbl_57765[] = {
13357 { 0x00000200, 0x00008},
13358 { 0x00004000, 0x00800},
13359 { 0x00006000, 0x09800},
13360 { 0x00010000, 0x0a000},
13361 { 0xffffffff, 0x00000}
13362 };
13363 struct mem_entry *mem_tbl;
13364 int err = 0;
13365 int i;
13366
13367 if (tg3_flag(tp, 5717_PLUS))
13368 mem_tbl = mem_tbl_5717;
13369 else if (tg3_flag(tp, 57765_CLASS) ||
13370 tg3_asic_rev(tp) == ASIC_REV_5762)
13371 mem_tbl = mem_tbl_57765;
13372 else if (tg3_flag(tp, 5755_PLUS))
13373 mem_tbl = mem_tbl_5755;
13374 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13375 mem_tbl = mem_tbl_5906;
13376 else if (tg3_flag(tp, 5705_PLUS))
13377 mem_tbl = mem_tbl_5705;
13378 else
13379 mem_tbl = mem_tbl_570x;
13380
13381 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13382 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13383 if (err)
13384 break;
13385 }
13386
13387 return err;
13388 }
13389
13390 #define TG3_TSO_MSS 500
13391
13392 #define TG3_TSO_IP_HDR_LEN 20
13393 #define TG3_TSO_TCP_HDR_LEN 20
13394 #define TG3_TSO_TCP_OPT_LEN 12
13395
13396 static const u8 tg3_tso_header[] = {
13397 0x08, 0x00,
13398 0x45, 0x00, 0x00, 0x00,
13399 0x00, 0x00, 0x40, 0x00,
13400 0x40, 0x06, 0x00, 0x00,
13401 0x0a, 0x00, 0x00, 0x01,
13402 0x0a, 0x00, 0x00, 0x02,
13403 0x0d, 0x00, 0xe0, 0x00,
13404 0x00, 0x00, 0x01, 0x00,
13405 0x00, 0x00, 0x02, 0x00,
13406 0x80, 0x10, 0x10, 0x00,
13407 0x14, 0x09, 0x00, 0x00,
13408 0x01, 0x01, 0x08, 0x0a,
13409 0x11, 0x11, 0x11, 0x11,
13410 0x11, 0x11, 0x11, 0x11,
13411 };
13412
tg3_run_loopback(struct tg3 * tp,u32 pktsz,bool tso_loopback)13413 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13414 {
13415 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13416 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13417 u32 budget;
13418 struct sk_buff *skb;
13419 u8 *tx_data, *rx_data;
13420 dma_addr_t map;
13421 int num_pkts, tx_len, rx_len, i, err;
13422 struct tg3_rx_buffer_desc *desc;
13423 struct tg3_napi *tnapi, *rnapi;
13424 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13425
13426 tnapi = &tp->napi[0];
13427 rnapi = &tp->napi[0];
13428 if (tp->irq_cnt > 1) {
13429 if (tg3_flag(tp, ENABLE_RSS))
13430 rnapi = &tp->napi[1];
13431 if (tg3_flag(tp, ENABLE_TSS))
13432 tnapi = &tp->napi[1];
13433 }
13434 coal_now = tnapi->coal_now | rnapi->coal_now;
13435
13436 err = -EIO;
13437
13438 tx_len = pktsz;
13439 skb = netdev_alloc_skb(tp->dev, tx_len);
13440 if (!skb)
13441 return -ENOMEM;
13442
13443 tx_data = skb_put(skb, tx_len);
13444 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13445 memset(tx_data + ETH_ALEN, 0x0, 8);
13446
13447 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13448
13449 if (tso_loopback) {
13450 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13451
13452 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13453 TG3_TSO_TCP_OPT_LEN;
13454
13455 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13456 sizeof(tg3_tso_header));
13457 mss = TG3_TSO_MSS;
13458
13459 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13460 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13461
13462 /* Set the total length field in the IP header */
13463 iph->tot_len = htons((u16)(mss + hdr_len));
13464
13465 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13466 TXD_FLAG_CPU_POST_DMA);
13467
13468 if (tg3_flag(tp, HW_TSO_1) ||
13469 tg3_flag(tp, HW_TSO_2) ||
13470 tg3_flag(tp, HW_TSO_3)) {
13471 struct tcphdr *th;
13472 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13473 th = (struct tcphdr *)&tx_data[val];
13474 th->check = 0;
13475 } else
13476 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13477
13478 if (tg3_flag(tp, HW_TSO_3)) {
13479 mss |= (hdr_len & 0xc) << 12;
13480 if (hdr_len & 0x10)
13481 base_flags |= 0x00000010;
13482 base_flags |= (hdr_len & 0x3e0) << 5;
13483 } else if (tg3_flag(tp, HW_TSO_2))
13484 mss |= hdr_len << 9;
13485 else if (tg3_flag(tp, HW_TSO_1) ||
13486 tg3_asic_rev(tp) == ASIC_REV_5705) {
13487 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13488 } else {
13489 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13490 }
13491
13492 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13493 } else {
13494 num_pkts = 1;
13495 data_off = ETH_HLEN;
13496
13497 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13498 tx_len > VLAN_ETH_FRAME_LEN)
13499 base_flags |= TXD_FLAG_JMB_PKT;
13500 }
13501
13502 for (i = data_off; i < tx_len; i++)
13503 tx_data[i] = (u8) (i & 0xff);
13504
13505 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13506 if (dma_mapping_error(&tp->pdev->dev, map)) {
13507 dev_kfree_skb(skb);
13508 return -EIO;
13509 }
13510
13511 val = tnapi->tx_prod;
13512 tnapi->tx_buffers[val].skb = skb;
13513 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13514
13515 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13516 rnapi->coal_now);
13517
13518 udelay(10);
13519
13520 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13521
13522 budget = tg3_tx_avail(tnapi);
13523 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13524 base_flags | TXD_FLAG_END, mss, 0)) {
13525 tnapi->tx_buffers[val].skb = NULL;
13526 dev_kfree_skb(skb);
13527 return -EIO;
13528 }
13529
13530 tnapi->tx_prod++;
13531
13532 /* Sync BD data before updating mailbox */
13533 wmb();
13534
13535 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13536 tr32_mailbox(tnapi->prodmbox);
13537
13538 udelay(10);
13539
13540 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13541 for (i = 0; i < 35; i++) {
13542 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13543 coal_now);
13544
13545 udelay(10);
13546
13547 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13548 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13549 if ((tx_idx == tnapi->tx_prod) &&
13550 (rx_idx == (rx_start_idx + num_pkts)))
13551 break;
13552 }
13553
13554 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13555 dev_kfree_skb(skb);
13556
13557 if (tx_idx != tnapi->tx_prod)
13558 goto out;
13559
13560 if (rx_idx != rx_start_idx + num_pkts)
13561 goto out;
13562
13563 val = data_off;
13564 while (rx_idx != rx_start_idx) {
13565 desc = &rnapi->rx_rcb[rx_start_idx++];
13566 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13567 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13568
13569 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13570 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13571 goto out;
13572
13573 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13574 - ETH_FCS_LEN;
13575
13576 if (!tso_loopback) {
13577 if (rx_len != tx_len)
13578 goto out;
13579
13580 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13581 if (opaque_key != RXD_OPAQUE_RING_STD)
13582 goto out;
13583 } else {
13584 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13585 goto out;
13586 }
13587 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13588 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13589 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13590 goto out;
13591 }
13592
13593 if (opaque_key == RXD_OPAQUE_RING_STD) {
13594 rx_data = tpr->rx_std_buffers[desc_idx].data;
13595 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13596 mapping);
13597 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13598 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13599 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13600 mapping);
13601 } else
13602 goto out;
13603
13604 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13605 DMA_FROM_DEVICE);
13606
13607 rx_data += TG3_RX_OFFSET(tp);
13608 for (i = data_off; i < rx_len; i++, val++) {
13609 if (*(rx_data + i) != (u8) (val & 0xff))
13610 goto out;
13611 }
13612 }
13613
13614 err = 0;
13615
13616 /* tg3_free_rings will unmap and free the rx_data */
13617 out:
13618 return err;
13619 }
13620
13621 #define TG3_STD_LOOPBACK_FAILED 1
13622 #define TG3_JMB_LOOPBACK_FAILED 2
13623 #define TG3_TSO_LOOPBACK_FAILED 4
13624 #define TG3_LOOPBACK_FAILED \
13625 (TG3_STD_LOOPBACK_FAILED | \
13626 TG3_JMB_LOOPBACK_FAILED | \
13627 TG3_TSO_LOOPBACK_FAILED)
13628
tg3_test_loopback(struct tg3 * tp,u64 * data,bool do_extlpbk)13629 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13630 {
13631 int err = -EIO;
13632 u32 eee_cap;
13633 u32 jmb_pkt_sz = 9000;
13634
13635 if (tp->dma_limit)
13636 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13637
13638 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13639 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13640
13641 if (!netif_running(tp->dev)) {
13642 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13643 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13644 if (do_extlpbk)
13645 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13646 goto done;
13647 }
13648
13649 err = tg3_reset_hw(tp, true);
13650 if (err) {
13651 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13652 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13653 if (do_extlpbk)
13654 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13655 goto done;
13656 }
13657
13658 if (tg3_flag(tp, ENABLE_RSS)) {
13659 int i;
13660
13661 /* Reroute all rx packets to the 1st queue */
13662 for (i = MAC_RSS_INDIR_TBL_0;
13663 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13664 tw32(i, 0x0);
13665 }
13666
13667 /* HW errata - mac loopback fails in some cases on 5780.
13668 * Normal traffic and PHY loopback are not affected by
13669 * errata. Also, the MAC loopback test is deprecated for
13670 * all newer ASIC revisions.
13671 */
13672 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13673 !tg3_flag(tp, CPMU_PRESENT)) {
13674 tg3_mac_loopback(tp, true);
13675
13676 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13677 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13678
13679 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13680 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13681 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13682
13683 tg3_mac_loopback(tp, false);
13684 }
13685
13686 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13687 !tg3_flag(tp, USE_PHYLIB)) {
13688 int i;
13689
13690 tg3_phy_lpbk_set(tp, 0, false);
13691
13692 /* Wait for link */
13693 for (i = 0; i < 100; i++) {
13694 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13695 break;
13696 mdelay(1);
13697 }
13698
13699 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13700 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13701 if (tg3_flag(tp, TSO_CAPABLE) &&
13702 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13703 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13704 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13705 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13706 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13707
13708 if (do_extlpbk) {
13709 tg3_phy_lpbk_set(tp, 0, true);
13710
13711 /* All link indications report up, but the hardware
13712 * isn't really ready for about 20 msec. Double it
13713 * to be sure.
13714 */
13715 mdelay(40);
13716
13717 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13718 data[TG3_EXT_LOOPB_TEST] |=
13719 TG3_STD_LOOPBACK_FAILED;
13720 if (tg3_flag(tp, TSO_CAPABLE) &&
13721 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13722 data[TG3_EXT_LOOPB_TEST] |=
13723 TG3_TSO_LOOPBACK_FAILED;
13724 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13725 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13726 data[TG3_EXT_LOOPB_TEST] |=
13727 TG3_JMB_LOOPBACK_FAILED;
13728 }
13729
13730 /* Re-enable gphy autopowerdown. */
13731 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13732 tg3_phy_toggle_apd(tp, true);
13733 }
13734
13735 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13736 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13737
13738 done:
13739 tp->phy_flags |= eee_cap;
13740
13741 return err;
13742 }
13743
tg3_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * data)13744 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13745 u64 *data)
13746 {
13747 struct tg3 *tp = netdev_priv(dev);
13748 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13749
13750 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13751 if (tg3_power_up(tp)) {
13752 etest->flags |= ETH_TEST_FL_FAILED;
13753 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13754 return;
13755 }
13756 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13757 }
13758
13759 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13760
13761 if (tg3_test_nvram(tp) != 0) {
13762 etest->flags |= ETH_TEST_FL_FAILED;
13763 data[TG3_NVRAM_TEST] = 1;
13764 }
13765 if (!doextlpbk && tg3_test_link(tp)) {
13766 etest->flags |= ETH_TEST_FL_FAILED;
13767 data[TG3_LINK_TEST] = 1;
13768 }
13769 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13770 int err, err2 = 0, irq_sync = 0;
13771
13772 if (netif_running(dev)) {
13773 tg3_phy_stop(tp);
13774 tg3_netif_stop(tp);
13775 irq_sync = 1;
13776 }
13777
13778 tg3_full_lock(tp, irq_sync);
13779 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13780 err = tg3_nvram_lock(tp);
13781 tg3_halt_cpu(tp, RX_CPU_BASE);
13782 if (!tg3_flag(tp, 5705_PLUS))
13783 tg3_halt_cpu(tp, TX_CPU_BASE);
13784 if (!err)
13785 tg3_nvram_unlock(tp);
13786
13787 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13788 tg3_phy_reset(tp);
13789
13790 if (tg3_test_registers(tp) != 0) {
13791 etest->flags |= ETH_TEST_FL_FAILED;
13792 data[TG3_REGISTER_TEST] = 1;
13793 }
13794
13795 if (tg3_test_memory(tp) != 0) {
13796 etest->flags |= ETH_TEST_FL_FAILED;
13797 data[TG3_MEMORY_TEST] = 1;
13798 }
13799
13800 if (doextlpbk)
13801 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13802
13803 if (tg3_test_loopback(tp, data, doextlpbk))
13804 etest->flags |= ETH_TEST_FL_FAILED;
13805
13806 tg3_full_unlock(tp);
13807
13808 if (tg3_test_interrupt(tp) != 0) {
13809 etest->flags |= ETH_TEST_FL_FAILED;
13810 data[TG3_INTERRUPT_TEST] = 1;
13811 }
13812
13813 tg3_full_lock(tp, 0);
13814
13815 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13816 if (netif_running(dev)) {
13817 tg3_flag_set(tp, INIT_COMPLETE);
13818 err2 = tg3_restart_hw(tp, true);
13819 if (!err2)
13820 tg3_netif_start(tp);
13821 }
13822
13823 tg3_full_unlock(tp);
13824
13825 if (irq_sync && !err2)
13826 tg3_phy_start(tp);
13827 }
13828 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13829 tg3_power_down_prepare(tp);
13830
13831 }
13832
tg3_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)13833 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13834 {
13835 struct tg3 *tp = netdev_priv(dev);
13836 struct hwtstamp_config stmpconf;
13837
13838 if (!tg3_flag(tp, PTP_CAPABLE))
13839 return -EOPNOTSUPP;
13840
13841 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13842 return -EFAULT;
13843
13844 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13845 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13846 return -ERANGE;
13847
13848 switch (stmpconf.rx_filter) {
13849 case HWTSTAMP_FILTER_NONE:
13850 tp->rxptpctl = 0;
13851 break;
13852 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13853 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13854 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13855 break;
13856 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13857 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13858 TG3_RX_PTP_CTL_SYNC_EVNT;
13859 break;
13860 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13861 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13862 TG3_RX_PTP_CTL_DELAY_REQ;
13863 break;
13864 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13865 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13866 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13867 break;
13868 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13869 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13870 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13871 break;
13872 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13873 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13874 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13875 break;
13876 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13877 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13878 TG3_RX_PTP_CTL_SYNC_EVNT;
13879 break;
13880 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13881 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13882 TG3_RX_PTP_CTL_SYNC_EVNT;
13883 break;
13884 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13885 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13886 TG3_RX_PTP_CTL_SYNC_EVNT;
13887 break;
13888 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13889 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13890 TG3_RX_PTP_CTL_DELAY_REQ;
13891 break;
13892 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13893 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13894 TG3_RX_PTP_CTL_DELAY_REQ;
13895 break;
13896 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13897 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13898 TG3_RX_PTP_CTL_DELAY_REQ;
13899 break;
13900 default:
13901 return -ERANGE;
13902 }
13903
13904 if (netif_running(dev) && tp->rxptpctl)
13905 tw32(TG3_RX_PTP_CTL,
13906 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13907
13908 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13909 tg3_flag_set(tp, TX_TSTAMP_EN);
13910 else
13911 tg3_flag_clear(tp, TX_TSTAMP_EN);
13912
13913 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13914 -EFAULT : 0;
13915 }
13916
tg3_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)13917 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13918 {
13919 struct tg3 *tp = netdev_priv(dev);
13920 struct hwtstamp_config stmpconf;
13921
13922 if (!tg3_flag(tp, PTP_CAPABLE))
13923 return -EOPNOTSUPP;
13924
13925 stmpconf.flags = 0;
13926 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13927 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13928
13929 switch (tp->rxptpctl) {
13930 case 0:
13931 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13932 break;
13933 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13934 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13935 break;
13936 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13937 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13938 break;
13939 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13940 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13941 break;
13942 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13943 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13944 break;
13945 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13946 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13947 break;
13948 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13949 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13950 break;
13951 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13952 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13953 break;
13954 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13955 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13956 break;
13957 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13958 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13959 break;
13960 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13961 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13962 break;
13963 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13964 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13965 break;
13966 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13967 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13968 break;
13969 default:
13970 WARN_ON_ONCE(1);
13971 return -ERANGE;
13972 }
13973
13974 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13975 -EFAULT : 0;
13976 }
13977
tg3_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)13978 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13979 {
13980 struct mii_ioctl_data *data = if_mii(ifr);
13981 struct tg3 *tp = netdev_priv(dev);
13982 int err;
13983
13984 if (tg3_flag(tp, USE_PHYLIB)) {
13985 struct phy_device *phydev;
13986 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13987 return -EAGAIN;
13988 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13989 return phy_mii_ioctl(phydev, ifr, cmd);
13990 }
13991
13992 switch (cmd) {
13993 case SIOCGMIIPHY:
13994 data->phy_id = tp->phy_addr;
13995
13996 fallthrough;
13997 case SIOCGMIIREG: {
13998 u32 mii_regval;
13999
14000 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14001 break; /* We have no PHY */
14002
14003 if (!netif_running(dev))
14004 return -EAGAIN;
14005
14006 spin_lock_bh(&tp->lock);
14007 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14008 data->reg_num & 0x1f, &mii_regval);
14009 spin_unlock_bh(&tp->lock);
14010
14011 data->val_out = mii_regval;
14012
14013 return err;
14014 }
14015
14016 case SIOCSMIIREG:
14017 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14018 break; /* We have no PHY */
14019
14020 if (!netif_running(dev))
14021 return -EAGAIN;
14022
14023 spin_lock_bh(&tp->lock);
14024 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14025 data->reg_num & 0x1f, data->val_in);
14026 spin_unlock_bh(&tp->lock);
14027
14028 return err;
14029
14030 case SIOCSHWTSTAMP:
14031 return tg3_hwtstamp_set(dev, ifr);
14032
14033 case SIOCGHWTSTAMP:
14034 return tg3_hwtstamp_get(dev, ifr);
14035
14036 default:
14037 /* do nothing */
14038 break;
14039 }
14040 return -EOPNOTSUPP;
14041 }
14042
tg3_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14043 static int tg3_get_coalesce(struct net_device *dev,
14044 struct ethtool_coalesce *ec,
14045 struct kernel_ethtool_coalesce *kernel_coal,
14046 struct netlink_ext_ack *extack)
14047 {
14048 struct tg3 *tp = netdev_priv(dev);
14049
14050 memcpy(ec, &tp->coal, sizeof(*ec));
14051 return 0;
14052 }
14053
tg3_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14054 static int tg3_set_coalesce(struct net_device *dev,
14055 struct ethtool_coalesce *ec,
14056 struct kernel_ethtool_coalesce *kernel_coal,
14057 struct netlink_ext_ack *extack)
14058 {
14059 struct tg3 *tp = netdev_priv(dev);
14060 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14061 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14062
14063 if (!tg3_flag(tp, 5705_PLUS)) {
14064 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14065 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14066 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14067 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14068 }
14069
14070 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14071 (!ec->rx_coalesce_usecs) ||
14072 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14073 (!ec->tx_coalesce_usecs) ||
14074 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14075 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14076 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14077 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14078 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14079 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14080 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14081 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14082 return -EINVAL;
14083
14084 /* Only copy relevant parameters, ignore all others. */
14085 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14086 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14087 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14088 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14089 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14090 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14091 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14092 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14093 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14094
14095 if (netif_running(dev)) {
14096 tg3_full_lock(tp, 0);
14097 __tg3_set_coalesce(tp, &tp->coal);
14098 tg3_full_unlock(tp);
14099 }
14100 return 0;
14101 }
14102
tg3_set_eee(struct net_device * dev,struct ethtool_eee * edata)14103 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14104 {
14105 struct tg3 *tp = netdev_priv(dev);
14106
14107 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14108 netdev_warn(tp->dev, "Board does not support EEE!\n");
14109 return -EOPNOTSUPP;
14110 }
14111
14112 if (edata->advertised != tp->eee.advertised) {
14113 netdev_warn(tp->dev,
14114 "Direct manipulation of EEE advertisement is not supported\n");
14115 return -EINVAL;
14116 }
14117
14118 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14119 netdev_warn(tp->dev,
14120 "Maximal Tx Lpi timer supported is %#x(u)\n",
14121 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14122 return -EINVAL;
14123 }
14124
14125 tp->eee = *edata;
14126
14127 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14128 tg3_warn_mgmt_link_flap(tp);
14129
14130 if (netif_running(tp->dev)) {
14131 tg3_full_lock(tp, 0);
14132 tg3_setup_eee(tp);
14133 tg3_phy_reset(tp);
14134 tg3_full_unlock(tp);
14135 }
14136
14137 return 0;
14138 }
14139
tg3_get_eee(struct net_device * dev,struct ethtool_eee * edata)14140 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14141 {
14142 struct tg3 *tp = netdev_priv(dev);
14143
14144 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14145 netdev_warn(tp->dev,
14146 "Board does not support EEE!\n");
14147 return -EOPNOTSUPP;
14148 }
14149
14150 *edata = tp->eee;
14151 return 0;
14152 }
14153
14154 static const struct ethtool_ops tg3_ethtool_ops = {
14155 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14156 ETHTOOL_COALESCE_MAX_FRAMES |
14157 ETHTOOL_COALESCE_USECS_IRQ |
14158 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14159 ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14160 .get_drvinfo = tg3_get_drvinfo,
14161 .get_regs_len = tg3_get_regs_len,
14162 .get_regs = tg3_get_regs,
14163 .get_wol = tg3_get_wol,
14164 .set_wol = tg3_set_wol,
14165 .get_msglevel = tg3_get_msglevel,
14166 .set_msglevel = tg3_set_msglevel,
14167 .nway_reset = tg3_nway_reset,
14168 .get_link = ethtool_op_get_link,
14169 .get_eeprom_len = tg3_get_eeprom_len,
14170 .get_eeprom = tg3_get_eeprom,
14171 .set_eeprom = tg3_set_eeprom,
14172 .get_ringparam = tg3_get_ringparam,
14173 .set_ringparam = tg3_set_ringparam,
14174 .get_pauseparam = tg3_get_pauseparam,
14175 .set_pauseparam = tg3_set_pauseparam,
14176 .self_test = tg3_self_test,
14177 .get_strings = tg3_get_strings,
14178 .set_phys_id = tg3_set_phys_id,
14179 .get_ethtool_stats = tg3_get_ethtool_stats,
14180 .get_coalesce = tg3_get_coalesce,
14181 .set_coalesce = tg3_set_coalesce,
14182 .get_sset_count = tg3_get_sset_count,
14183 .get_rxnfc = tg3_get_rxnfc,
14184 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14185 .get_rxfh = tg3_get_rxfh,
14186 .set_rxfh = tg3_set_rxfh,
14187 .get_channels = tg3_get_channels,
14188 .set_channels = tg3_set_channels,
14189 .get_ts_info = tg3_get_ts_info,
14190 .get_eee = tg3_get_eee,
14191 .set_eee = tg3_set_eee,
14192 .get_link_ksettings = tg3_get_link_ksettings,
14193 .set_link_ksettings = tg3_set_link_ksettings,
14194 };
14195
tg3_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)14196 static void tg3_get_stats64(struct net_device *dev,
14197 struct rtnl_link_stats64 *stats)
14198 {
14199 struct tg3 *tp = netdev_priv(dev);
14200
14201 spin_lock_bh(&tp->lock);
14202 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14203 *stats = tp->net_stats_prev;
14204 spin_unlock_bh(&tp->lock);
14205 return;
14206 }
14207
14208 tg3_get_nstats(tp, stats);
14209 spin_unlock_bh(&tp->lock);
14210 }
14211
tg3_set_rx_mode(struct net_device * dev)14212 static void tg3_set_rx_mode(struct net_device *dev)
14213 {
14214 struct tg3 *tp = netdev_priv(dev);
14215
14216 if (!netif_running(dev))
14217 return;
14218
14219 tg3_full_lock(tp, 0);
14220 __tg3_set_rx_mode(dev);
14221 tg3_full_unlock(tp);
14222 }
14223
tg3_set_mtu(struct net_device * dev,struct tg3 * tp,int new_mtu)14224 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14225 int new_mtu)
14226 {
14227 dev->mtu = new_mtu;
14228
14229 if (new_mtu > ETH_DATA_LEN) {
14230 if (tg3_flag(tp, 5780_CLASS)) {
14231 netdev_update_features(dev);
14232 tg3_flag_clear(tp, TSO_CAPABLE);
14233 } else {
14234 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14235 }
14236 } else {
14237 if (tg3_flag(tp, 5780_CLASS)) {
14238 tg3_flag_set(tp, TSO_CAPABLE);
14239 netdev_update_features(dev);
14240 }
14241 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14242 }
14243 }
14244
tg3_change_mtu(struct net_device * dev,int new_mtu)14245 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14246 {
14247 struct tg3 *tp = netdev_priv(dev);
14248 int err;
14249 bool reset_phy = false;
14250
14251 if (!netif_running(dev)) {
14252 /* We'll just catch it later when the
14253 * device is up'd.
14254 */
14255 tg3_set_mtu(dev, tp, new_mtu);
14256 return 0;
14257 }
14258
14259 tg3_phy_stop(tp);
14260
14261 tg3_netif_stop(tp);
14262
14263 tg3_set_mtu(dev, tp, new_mtu);
14264
14265 tg3_full_lock(tp, 1);
14266
14267 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14268
14269 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14270 * breaks all requests to 256 bytes.
14271 */
14272 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14273 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14274 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14275 tg3_asic_rev(tp) == ASIC_REV_5720)
14276 reset_phy = true;
14277
14278 err = tg3_restart_hw(tp, reset_phy);
14279
14280 if (!err)
14281 tg3_netif_start(tp);
14282
14283 tg3_full_unlock(tp);
14284
14285 if (!err)
14286 tg3_phy_start(tp);
14287
14288 return err;
14289 }
14290
14291 static const struct net_device_ops tg3_netdev_ops = {
14292 .ndo_open = tg3_open,
14293 .ndo_stop = tg3_close,
14294 .ndo_start_xmit = tg3_start_xmit,
14295 .ndo_get_stats64 = tg3_get_stats64,
14296 .ndo_validate_addr = eth_validate_addr,
14297 .ndo_set_rx_mode = tg3_set_rx_mode,
14298 .ndo_set_mac_address = tg3_set_mac_addr,
14299 .ndo_eth_ioctl = tg3_ioctl,
14300 .ndo_tx_timeout = tg3_tx_timeout,
14301 .ndo_change_mtu = tg3_change_mtu,
14302 .ndo_fix_features = tg3_fix_features,
14303 .ndo_set_features = tg3_set_features,
14304 #ifdef CONFIG_NET_POLL_CONTROLLER
14305 .ndo_poll_controller = tg3_poll_controller,
14306 #endif
14307 };
14308
tg3_get_eeprom_size(struct tg3 * tp)14309 static void tg3_get_eeprom_size(struct tg3 *tp)
14310 {
14311 u32 cursize, val, magic;
14312
14313 tp->nvram_size = EEPROM_CHIP_SIZE;
14314
14315 if (tg3_nvram_read(tp, 0, &magic) != 0)
14316 return;
14317
14318 if ((magic != TG3_EEPROM_MAGIC) &&
14319 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14320 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14321 return;
14322
14323 /*
14324 * Size the chip by reading offsets at increasing powers of two.
14325 * When we encounter our validation signature, we know the addressing
14326 * has wrapped around, and thus have our chip size.
14327 */
14328 cursize = 0x10;
14329
14330 while (cursize < tp->nvram_size) {
14331 if (tg3_nvram_read(tp, cursize, &val) != 0)
14332 return;
14333
14334 if (val == magic)
14335 break;
14336
14337 cursize <<= 1;
14338 }
14339
14340 tp->nvram_size = cursize;
14341 }
14342
tg3_get_nvram_size(struct tg3 * tp)14343 static void tg3_get_nvram_size(struct tg3 *tp)
14344 {
14345 u32 val;
14346
14347 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14348 return;
14349
14350 /* Selfboot format */
14351 if (val != TG3_EEPROM_MAGIC) {
14352 tg3_get_eeprom_size(tp);
14353 return;
14354 }
14355
14356 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14357 if (val != 0) {
14358 /* This is confusing. We want to operate on the
14359 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14360 * call will read from NVRAM and byteswap the data
14361 * according to the byteswapping settings for all
14362 * other register accesses. This ensures the data we
14363 * want will always reside in the lower 16-bits.
14364 * However, the data in NVRAM is in LE format, which
14365 * means the data from the NVRAM read will always be
14366 * opposite the endianness of the CPU. The 16-bit
14367 * byteswap then brings the data to CPU endianness.
14368 */
14369 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14370 return;
14371 }
14372 }
14373 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14374 }
14375
tg3_get_nvram_info(struct tg3 * tp)14376 static void tg3_get_nvram_info(struct tg3 *tp)
14377 {
14378 u32 nvcfg1;
14379
14380 nvcfg1 = tr32(NVRAM_CFG1);
14381 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14382 tg3_flag_set(tp, FLASH);
14383 } else {
14384 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14385 tw32(NVRAM_CFG1, nvcfg1);
14386 }
14387
14388 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14389 tg3_flag(tp, 5780_CLASS)) {
14390 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14391 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14392 tp->nvram_jedecnum = JEDEC_ATMEL;
14393 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14394 tg3_flag_set(tp, NVRAM_BUFFERED);
14395 break;
14396 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14397 tp->nvram_jedecnum = JEDEC_ATMEL;
14398 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14399 break;
14400 case FLASH_VENDOR_ATMEL_EEPROM:
14401 tp->nvram_jedecnum = JEDEC_ATMEL;
14402 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14403 tg3_flag_set(tp, NVRAM_BUFFERED);
14404 break;
14405 case FLASH_VENDOR_ST:
14406 tp->nvram_jedecnum = JEDEC_ST;
14407 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14408 tg3_flag_set(tp, NVRAM_BUFFERED);
14409 break;
14410 case FLASH_VENDOR_SAIFUN:
14411 tp->nvram_jedecnum = JEDEC_SAIFUN;
14412 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14413 break;
14414 case FLASH_VENDOR_SST_SMALL:
14415 case FLASH_VENDOR_SST_LARGE:
14416 tp->nvram_jedecnum = JEDEC_SST;
14417 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14418 break;
14419 }
14420 } else {
14421 tp->nvram_jedecnum = JEDEC_ATMEL;
14422 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14423 tg3_flag_set(tp, NVRAM_BUFFERED);
14424 }
14425 }
14426
tg3_nvram_get_pagesize(struct tg3 * tp,u32 nvmcfg1)14427 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14428 {
14429 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14430 case FLASH_5752PAGE_SIZE_256:
14431 tp->nvram_pagesize = 256;
14432 break;
14433 case FLASH_5752PAGE_SIZE_512:
14434 tp->nvram_pagesize = 512;
14435 break;
14436 case FLASH_5752PAGE_SIZE_1K:
14437 tp->nvram_pagesize = 1024;
14438 break;
14439 case FLASH_5752PAGE_SIZE_2K:
14440 tp->nvram_pagesize = 2048;
14441 break;
14442 case FLASH_5752PAGE_SIZE_4K:
14443 tp->nvram_pagesize = 4096;
14444 break;
14445 case FLASH_5752PAGE_SIZE_264:
14446 tp->nvram_pagesize = 264;
14447 break;
14448 case FLASH_5752PAGE_SIZE_528:
14449 tp->nvram_pagesize = 528;
14450 break;
14451 }
14452 }
14453
tg3_get_5752_nvram_info(struct tg3 * tp)14454 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14455 {
14456 u32 nvcfg1;
14457
14458 nvcfg1 = tr32(NVRAM_CFG1);
14459
14460 /* NVRAM protection for TPM */
14461 if (nvcfg1 & (1 << 27))
14462 tg3_flag_set(tp, PROTECTED_NVRAM);
14463
14464 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14465 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14466 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14467 tp->nvram_jedecnum = JEDEC_ATMEL;
14468 tg3_flag_set(tp, NVRAM_BUFFERED);
14469 break;
14470 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14471 tp->nvram_jedecnum = JEDEC_ATMEL;
14472 tg3_flag_set(tp, NVRAM_BUFFERED);
14473 tg3_flag_set(tp, FLASH);
14474 break;
14475 case FLASH_5752VENDOR_ST_M45PE10:
14476 case FLASH_5752VENDOR_ST_M45PE20:
14477 case FLASH_5752VENDOR_ST_M45PE40:
14478 tp->nvram_jedecnum = JEDEC_ST;
14479 tg3_flag_set(tp, NVRAM_BUFFERED);
14480 tg3_flag_set(tp, FLASH);
14481 break;
14482 }
14483
14484 if (tg3_flag(tp, FLASH)) {
14485 tg3_nvram_get_pagesize(tp, nvcfg1);
14486 } else {
14487 /* For eeprom, set pagesize to maximum eeprom size */
14488 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14489
14490 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14491 tw32(NVRAM_CFG1, nvcfg1);
14492 }
14493 }
14494
tg3_get_5755_nvram_info(struct tg3 * tp)14495 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14496 {
14497 u32 nvcfg1, protect = 0;
14498
14499 nvcfg1 = tr32(NVRAM_CFG1);
14500
14501 /* NVRAM protection for TPM */
14502 if (nvcfg1 & (1 << 27)) {
14503 tg3_flag_set(tp, PROTECTED_NVRAM);
14504 protect = 1;
14505 }
14506
14507 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14508 switch (nvcfg1) {
14509 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14510 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14511 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14512 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14513 tp->nvram_jedecnum = JEDEC_ATMEL;
14514 tg3_flag_set(tp, NVRAM_BUFFERED);
14515 tg3_flag_set(tp, FLASH);
14516 tp->nvram_pagesize = 264;
14517 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14518 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14519 tp->nvram_size = (protect ? 0x3e200 :
14520 TG3_NVRAM_SIZE_512KB);
14521 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14522 tp->nvram_size = (protect ? 0x1f200 :
14523 TG3_NVRAM_SIZE_256KB);
14524 else
14525 tp->nvram_size = (protect ? 0x1f200 :
14526 TG3_NVRAM_SIZE_128KB);
14527 break;
14528 case FLASH_5752VENDOR_ST_M45PE10:
14529 case FLASH_5752VENDOR_ST_M45PE20:
14530 case FLASH_5752VENDOR_ST_M45PE40:
14531 tp->nvram_jedecnum = JEDEC_ST;
14532 tg3_flag_set(tp, NVRAM_BUFFERED);
14533 tg3_flag_set(tp, FLASH);
14534 tp->nvram_pagesize = 256;
14535 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14536 tp->nvram_size = (protect ?
14537 TG3_NVRAM_SIZE_64KB :
14538 TG3_NVRAM_SIZE_128KB);
14539 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14540 tp->nvram_size = (protect ?
14541 TG3_NVRAM_SIZE_64KB :
14542 TG3_NVRAM_SIZE_256KB);
14543 else
14544 tp->nvram_size = (protect ?
14545 TG3_NVRAM_SIZE_128KB :
14546 TG3_NVRAM_SIZE_512KB);
14547 break;
14548 }
14549 }
14550
tg3_get_5787_nvram_info(struct tg3 * tp)14551 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14552 {
14553 u32 nvcfg1;
14554
14555 nvcfg1 = tr32(NVRAM_CFG1);
14556
14557 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14558 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14559 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14560 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14561 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14562 tp->nvram_jedecnum = JEDEC_ATMEL;
14563 tg3_flag_set(tp, NVRAM_BUFFERED);
14564 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14565
14566 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14567 tw32(NVRAM_CFG1, nvcfg1);
14568 break;
14569 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14570 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14571 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14572 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14573 tp->nvram_jedecnum = JEDEC_ATMEL;
14574 tg3_flag_set(tp, NVRAM_BUFFERED);
14575 tg3_flag_set(tp, FLASH);
14576 tp->nvram_pagesize = 264;
14577 break;
14578 case FLASH_5752VENDOR_ST_M45PE10:
14579 case FLASH_5752VENDOR_ST_M45PE20:
14580 case FLASH_5752VENDOR_ST_M45PE40:
14581 tp->nvram_jedecnum = JEDEC_ST;
14582 tg3_flag_set(tp, NVRAM_BUFFERED);
14583 tg3_flag_set(tp, FLASH);
14584 tp->nvram_pagesize = 256;
14585 break;
14586 }
14587 }
14588
tg3_get_5761_nvram_info(struct tg3 * tp)14589 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14590 {
14591 u32 nvcfg1, protect = 0;
14592
14593 nvcfg1 = tr32(NVRAM_CFG1);
14594
14595 /* NVRAM protection for TPM */
14596 if (nvcfg1 & (1 << 27)) {
14597 tg3_flag_set(tp, PROTECTED_NVRAM);
14598 protect = 1;
14599 }
14600
14601 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14602 switch (nvcfg1) {
14603 case FLASH_5761VENDOR_ATMEL_ADB021D:
14604 case FLASH_5761VENDOR_ATMEL_ADB041D:
14605 case FLASH_5761VENDOR_ATMEL_ADB081D:
14606 case FLASH_5761VENDOR_ATMEL_ADB161D:
14607 case FLASH_5761VENDOR_ATMEL_MDB021D:
14608 case FLASH_5761VENDOR_ATMEL_MDB041D:
14609 case FLASH_5761VENDOR_ATMEL_MDB081D:
14610 case FLASH_5761VENDOR_ATMEL_MDB161D:
14611 tp->nvram_jedecnum = JEDEC_ATMEL;
14612 tg3_flag_set(tp, NVRAM_BUFFERED);
14613 tg3_flag_set(tp, FLASH);
14614 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14615 tp->nvram_pagesize = 256;
14616 break;
14617 case FLASH_5761VENDOR_ST_A_M45PE20:
14618 case FLASH_5761VENDOR_ST_A_M45PE40:
14619 case FLASH_5761VENDOR_ST_A_M45PE80:
14620 case FLASH_5761VENDOR_ST_A_M45PE16:
14621 case FLASH_5761VENDOR_ST_M_M45PE20:
14622 case FLASH_5761VENDOR_ST_M_M45PE40:
14623 case FLASH_5761VENDOR_ST_M_M45PE80:
14624 case FLASH_5761VENDOR_ST_M_M45PE16:
14625 tp->nvram_jedecnum = JEDEC_ST;
14626 tg3_flag_set(tp, NVRAM_BUFFERED);
14627 tg3_flag_set(tp, FLASH);
14628 tp->nvram_pagesize = 256;
14629 break;
14630 }
14631
14632 if (protect) {
14633 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14634 } else {
14635 switch (nvcfg1) {
14636 case FLASH_5761VENDOR_ATMEL_ADB161D:
14637 case FLASH_5761VENDOR_ATMEL_MDB161D:
14638 case FLASH_5761VENDOR_ST_A_M45PE16:
14639 case FLASH_5761VENDOR_ST_M_M45PE16:
14640 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14641 break;
14642 case FLASH_5761VENDOR_ATMEL_ADB081D:
14643 case FLASH_5761VENDOR_ATMEL_MDB081D:
14644 case FLASH_5761VENDOR_ST_A_M45PE80:
14645 case FLASH_5761VENDOR_ST_M_M45PE80:
14646 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14647 break;
14648 case FLASH_5761VENDOR_ATMEL_ADB041D:
14649 case FLASH_5761VENDOR_ATMEL_MDB041D:
14650 case FLASH_5761VENDOR_ST_A_M45PE40:
14651 case FLASH_5761VENDOR_ST_M_M45PE40:
14652 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14653 break;
14654 case FLASH_5761VENDOR_ATMEL_ADB021D:
14655 case FLASH_5761VENDOR_ATMEL_MDB021D:
14656 case FLASH_5761VENDOR_ST_A_M45PE20:
14657 case FLASH_5761VENDOR_ST_M_M45PE20:
14658 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14659 break;
14660 }
14661 }
14662 }
14663
tg3_get_5906_nvram_info(struct tg3 * tp)14664 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14665 {
14666 tp->nvram_jedecnum = JEDEC_ATMEL;
14667 tg3_flag_set(tp, NVRAM_BUFFERED);
14668 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14669 }
14670
tg3_get_57780_nvram_info(struct tg3 * tp)14671 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14672 {
14673 u32 nvcfg1;
14674
14675 nvcfg1 = tr32(NVRAM_CFG1);
14676
14677 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14678 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14679 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14680 tp->nvram_jedecnum = JEDEC_ATMEL;
14681 tg3_flag_set(tp, NVRAM_BUFFERED);
14682 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14683
14684 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14685 tw32(NVRAM_CFG1, nvcfg1);
14686 return;
14687 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14688 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14689 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14690 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14691 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14692 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14693 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14694 tp->nvram_jedecnum = JEDEC_ATMEL;
14695 tg3_flag_set(tp, NVRAM_BUFFERED);
14696 tg3_flag_set(tp, FLASH);
14697
14698 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14699 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14700 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14701 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14702 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14703 break;
14704 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14705 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14706 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14707 break;
14708 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14709 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14710 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14711 break;
14712 }
14713 break;
14714 case FLASH_5752VENDOR_ST_M45PE10:
14715 case FLASH_5752VENDOR_ST_M45PE20:
14716 case FLASH_5752VENDOR_ST_M45PE40:
14717 tp->nvram_jedecnum = JEDEC_ST;
14718 tg3_flag_set(tp, NVRAM_BUFFERED);
14719 tg3_flag_set(tp, FLASH);
14720
14721 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14722 case FLASH_5752VENDOR_ST_M45PE10:
14723 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14724 break;
14725 case FLASH_5752VENDOR_ST_M45PE20:
14726 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14727 break;
14728 case FLASH_5752VENDOR_ST_M45PE40:
14729 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14730 break;
14731 }
14732 break;
14733 default:
14734 tg3_flag_set(tp, NO_NVRAM);
14735 return;
14736 }
14737
14738 tg3_nvram_get_pagesize(tp, nvcfg1);
14739 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14740 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14741 }
14742
14743
tg3_get_5717_nvram_info(struct tg3 * tp)14744 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14745 {
14746 u32 nvcfg1;
14747
14748 nvcfg1 = tr32(NVRAM_CFG1);
14749
14750 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14751 case FLASH_5717VENDOR_ATMEL_EEPROM:
14752 case FLASH_5717VENDOR_MICRO_EEPROM:
14753 tp->nvram_jedecnum = JEDEC_ATMEL;
14754 tg3_flag_set(tp, NVRAM_BUFFERED);
14755 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14756
14757 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14758 tw32(NVRAM_CFG1, nvcfg1);
14759 return;
14760 case FLASH_5717VENDOR_ATMEL_MDB011D:
14761 case FLASH_5717VENDOR_ATMEL_ADB011B:
14762 case FLASH_5717VENDOR_ATMEL_ADB011D:
14763 case FLASH_5717VENDOR_ATMEL_MDB021D:
14764 case FLASH_5717VENDOR_ATMEL_ADB021B:
14765 case FLASH_5717VENDOR_ATMEL_ADB021D:
14766 case FLASH_5717VENDOR_ATMEL_45USPT:
14767 tp->nvram_jedecnum = JEDEC_ATMEL;
14768 tg3_flag_set(tp, NVRAM_BUFFERED);
14769 tg3_flag_set(tp, FLASH);
14770
14771 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14772 case FLASH_5717VENDOR_ATMEL_MDB021D:
14773 /* Detect size with tg3_nvram_get_size() */
14774 break;
14775 case FLASH_5717VENDOR_ATMEL_ADB021B:
14776 case FLASH_5717VENDOR_ATMEL_ADB021D:
14777 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14778 break;
14779 default:
14780 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14781 break;
14782 }
14783 break;
14784 case FLASH_5717VENDOR_ST_M_M25PE10:
14785 case FLASH_5717VENDOR_ST_A_M25PE10:
14786 case FLASH_5717VENDOR_ST_M_M45PE10:
14787 case FLASH_5717VENDOR_ST_A_M45PE10:
14788 case FLASH_5717VENDOR_ST_M_M25PE20:
14789 case FLASH_5717VENDOR_ST_A_M25PE20:
14790 case FLASH_5717VENDOR_ST_M_M45PE20:
14791 case FLASH_5717VENDOR_ST_A_M45PE20:
14792 case FLASH_5717VENDOR_ST_25USPT:
14793 case FLASH_5717VENDOR_ST_45USPT:
14794 tp->nvram_jedecnum = JEDEC_ST;
14795 tg3_flag_set(tp, NVRAM_BUFFERED);
14796 tg3_flag_set(tp, FLASH);
14797
14798 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14799 case FLASH_5717VENDOR_ST_M_M25PE20:
14800 case FLASH_5717VENDOR_ST_M_M45PE20:
14801 /* Detect size with tg3_nvram_get_size() */
14802 break;
14803 case FLASH_5717VENDOR_ST_A_M25PE20:
14804 case FLASH_5717VENDOR_ST_A_M45PE20:
14805 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14806 break;
14807 default:
14808 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14809 break;
14810 }
14811 break;
14812 default:
14813 tg3_flag_set(tp, NO_NVRAM);
14814 return;
14815 }
14816
14817 tg3_nvram_get_pagesize(tp, nvcfg1);
14818 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14819 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14820 }
14821
tg3_get_5720_nvram_info(struct tg3 * tp)14822 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14823 {
14824 u32 nvcfg1, nvmpinstrp, nv_status;
14825
14826 nvcfg1 = tr32(NVRAM_CFG1);
14827 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14828
14829 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14830 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14831 tg3_flag_set(tp, NO_NVRAM);
14832 return;
14833 }
14834
14835 switch (nvmpinstrp) {
14836 case FLASH_5762_MX25L_100:
14837 case FLASH_5762_MX25L_200:
14838 case FLASH_5762_MX25L_400:
14839 case FLASH_5762_MX25L_800:
14840 case FLASH_5762_MX25L_160_320:
14841 tp->nvram_pagesize = 4096;
14842 tp->nvram_jedecnum = JEDEC_MACRONIX;
14843 tg3_flag_set(tp, NVRAM_BUFFERED);
14844 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14845 tg3_flag_set(tp, FLASH);
14846 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14847 tp->nvram_size =
14848 (1 << (nv_status >> AUTOSENSE_DEVID &
14849 AUTOSENSE_DEVID_MASK)
14850 << AUTOSENSE_SIZE_IN_MB);
14851 return;
14852
14853 case FLASH_5762_EEPROM_HD:
14854 nvmpinstrp = FLASH_5720_EEPROM_HD;
14855 break;
14856 case FLASH_5762_EEPROM_LD:
14857 nvmpinstrp = FLASH_5720_EEPROM_LD;
14858 break;
14859 case FLASH_5720VENDOR_M_ST_M45PE20:
14860 /* This pinstrap supports multiple sizes, so force it
14861 * to read the actual size from location 0xf0.
14862 */
14863 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14864 break;
14865 }
14866 }
14867
14868 switch (nvmpinstrp) {
14869 case FLASH_5720_EEPROM_HD:
14870 case FLASH_5720_EEPROM_LD:
14871 tp->nvram_jedecnum = JEDEC_ATMEL;
14872 tg3_flag_set(tp, NVRAM_BUFFERED);
14873
14874 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14875 tw32(NVRAM_CFG1, nvcfg1);
14876 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14877 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14878 else
14879 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14880 return;
14881 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14882 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14883 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14884 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14885 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14886 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14887 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14888 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14889 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14890 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14891 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14892 case FLASH_5720VENDOR_ATMEL_45USPT:
14893 tp->nvram_jedecnum = JEDEC_ATMEL;
14894 tg3_flag_set(tp, NVRAM_BUFFERED);
14895 tg3_flag_set(tp, FLASH);
14896
14897 switch (nvmpinstrp) {
14898 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14899 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14900 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14901 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14902 break;
14903 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14904 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14905 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14906 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14907 break;
14908 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14909 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14910 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14911 break;
14912 default:
14913 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14914 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14915 break;
14916 }
14917 break;
14918 case FLASH_5720VENDOR_M_ST_M25PE10:
14919 case FLASH_5720VENDOR_M_ST_M45PE10:
14920 case FLASH_5720VENDOR_A_ST_M25PE10:
14921 case FLASH_5720VENDOR_A_ST_M45PE10:
14922 case FLASH_5720VENDOR_M_ST_M25PE20:
14923 case FLASH_5720VENDOR_M_ST_M45PE20:
14924 case FLASH_5720VENDOR_A_ST_M25PE20:
14925 case FLASH_5720VENDOR_A_ST_M45PE20:
14926 case FLASH_5720VENDOR_M_ST_M25PE40:
14927 case FLASH_5720VENDOR_M_ST_M45PE40:
14928 case FLASH_5720VENDOR_A_ST_M25PE40:
14929 case FLASH_5720VENDOR_A_ST_M45PE40:
14930 case FLASH_5720VENDOR_M_ST_M25PE80:
14931 case FLASH_5720VENDOR_M_ST_M45PE80:
14932 case FLASH_5720VENDOR_A_ST_M25PE80:
14933 case FLASH_5720VENDOR_A_ST_M45PE80:
14934 case FLASH_5720VENDOR_ST_25USPT:
14935 case FLASH_5720VENDOR_ST_45USPT:
14936 tp->nvram_jedecnum = JEDEC_ST;
14937 tg3_flag_set(tp, NVRAM_BUFFERED);
14938 tg3_flag_set(tp, FLASH);
14939
14940 switch (nvmpinstrp) {
14941 case FLASH_5720VENDOR_M_ST_M25PE20:
14942 case FLASH_5720VENDOR_M_ST_M45PE20:
14943 case FLASH_5720VENDOR_A_ST_M25PE20:
14944 case FLASH_5720VENDOR_A_ST_M45PE20:
14945 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14946 break;
14947 case FLASH_5720VENDOR_M_ST_M25PE40:
14948 case FLASH_5720VENDOR_M_ST_M45PE40:
14949 case FLASH_5720VENDOR_A_ST_M25PE40:
14950 case FLASH_5720VENDOR_A_ST_M45PE40:
14951 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14952 break;
14953 case FLASH_5720VENDOR_M_ST_M25PE80:
14954 case FLASH_5720VENDOR_M_ST_M45PE80:
14955 case FLASH_5720VENDOR_A_ST_M25PE80:
14956 case FLASH_5720VENDOR_A_ST_M45PE80:
14957 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14958 break;
14959 default:
14960 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14961 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14962 break;
14963 }
14964 break;
14965 default:
14966 tg3_flag_set(tp, NO_NVRAM);
14967 return;
14968 }
14969
14970 tg3_nvram_get_pagesize(tp, nvcfg1);
14971 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14972 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14973
14974 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14975 u32 val;
14976
14977 if (tg3_nvram_read(tp, 0, &val))
14978 return;
14979
14980 if (val != TG3_EEPROM_MAGIC &&
14981 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14982 tg3_flag_set(tp, NO_NVRAM);
14983 }
14984 }
14985
14986 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
tg3_nvram_init(struct tg3 * tp)14987 static void tg3_nvram_init(struct tg3 *tp)
14988 {
14989 if (tg3_flag(tp, IS_SSB_CORE)) {
14990 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14991 tg3_flag_clear(tp, NVRAM);
14992 tg3_flag_clear(tp, NVRAM_BUFFERED);
14993 tg3_flag_set(tp, NO_NVRAM);
14994 return;
14995 }
14996
14997 tw32_f(GRC_EEPROM_ADDR,
14998 (EEPROM_ADDR_FSM_RESET |
14999 (EEPROM_DEFAULT_CLOCK_PERIOD <<
15000 EEPROM_ADDR_CLKPERD_SHIFT)));
15001
15002 msleep(1);
15003
15004 /* Enable seeprom accesses. */
15005 tw32_f(GRC_LOCAL_CTRL,
15006 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15007 udelay(100);
15008
15009 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15010 tg3_asic_rev(tp) != ASIC_REV_5701) {
15011 tg3_flag_set(tp, NVRAM);
15012
15013 if (tg3_nvram_lock(tp)) {
15014 netdev_warn(tp->dev,
15015 "Cannot get nvram lock, %s failed\n",
15016 __func__);
15017 return;
15018 }
15019 tg3_enable_nvram_access(tp);
15020
15021 tp->nvram_size = 0;
15022
15023 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15024 tg3_get_5752_nvram_info(tp);
15025 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15026 tg3_get_5755_nvram_info(tp);
15027 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15028 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15029 tg3_asic_rev(tp) == ASIC_REV_5785)
15030 tg3_get_5787_nvram_info(tp);
15031 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15032 tg3_get_5761_nvram_info(tp);
15033 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15034 tg3_get_5906_nvram_info(tp);
15035 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15036 tg3_flag(tp, 57765_CLASS))
15037 tg3_get_57780_nvram_info(tp);
15038 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15039 tg3_asic_rev(tp) == ASIC_REV_5719)
15040 tg3_get_5717_nvram_info(tp);
15041 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15042 tg3_asic_rev(tp) == ASIC_REV_5762)
15043 tg3_get_5720_nvram_info(tp);
15044 else
15045 tg3_get_nvram_info(tp);
15046
15047 if (tp->nvram_size == 0)
15048 tg3_get_nvram_size(tp);
15049
15050 tg3_disable_nvram_access(tp);
15051 tg3_nvram_unlock(tp);
15052
15053 } else {
15054 tg3_flag_clear(tp, NVRAM);
15055 tg3_flag_clear(tp, NVRAM_BUFFERED);
15056
15057 tg3_get_eeprom_size(tp);
15058 }
15059 }
15060
15061 struct subsys_tbl_ent {
15062 u16 subsys_vendor, subsys_devid;
15063 u32 phy_id;
15064 };
15065
15066 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15067 /* Broadcom boards. */
15068 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15069 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15070 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15071 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15072 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15073 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15074 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15075 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15076 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15077 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15078 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15079 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15080 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15081 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15082 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15083 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15084 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15085 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15086 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15087 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15088 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15089 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15090
15091 /* 3com boards. */
15092 { TG3PCI_SUBVENDOR_ID_3COM,
15093 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15094 { TG3PCI_SUBVENDOR_ID_3COM,
15095 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15096 { TG3PCI_SUBVENDOR_ID_3COM,
15097 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15098 { TG3PCI_SUBVENDOR_ID_3COM,
15099 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15100 { TG3PCI_SUBVENDOR_ID_3COM,
15101 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15102
15103 /* DELL boards. */
15104 { TG3PCI_SUBVENDOR_ID_DELL,
15105 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15106 { TG3PCI_SUBVENDOR_ID_DELL,
15107 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15108 { TG3PCI_SUBVENDOR_ID_DELL,
15109 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15110 { TG3PCI_SUBVENDOR_ID_DELL,
15111 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15112
15113 /* Compaq boards. */
15114 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15115 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15116 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15117 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15118 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15119 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15120 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15121 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15122 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15123 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15124
15125 /* IBM boards. */
15126 { TG3PCI_SUBVENDOR_ID_IBM,
15127 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15128 };
15129
tg3_lookup_by_subsys(struct tg3 * tp)15130 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15131 {
15132 int i;
15133
15134 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15135 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15136 tp->pdev->subsystem_vendor) &&
15137 (subsys_id_to_phy_id[i].subsys_devid ==
15138 tp->pdev->subsystem_device))
15139 return &subsys_id_to_phy_id[i];
15140 }
15141 return NULL;
15142 }
15143
tg3_get_eeprom_hw_cfg(struct tg3 * tp)15144 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15145 {
15146 u32 val;
15147
15148 tp->phy_id = TG3_PHY_ID_INVALID;
15149 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15150
15151 /* Assume an onboard device and WOL capable by default. */
15152 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15153 tg3_flag_set(tp, WOL_CAP);
15154
15155 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15156 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15157 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15158 tg3_flag_set(tp, IS_NIC);
15159 }
15160 val = tr32(VCPU_CFGSHDW);
15161 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15162 tg3_flag_set(tp, ASPM_WORKAROUND);
15163 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15164 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15165 tg3_flag_set(tp, WOL_ENABLE);
15166 device_set_wakeup_enable(&tp->pdev->dev, true);
15167 }
15168 goto done;
15169 }
15170
15171 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15172 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15173 u32 nic_cfg, led_cfg;
15174 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15175 u32 nic_phy_id, ver, eeprom_phy_id;
15176 int eeprom_phy_serdes = 0;
15177
15178 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15179 tp->nic_sram_data_cfg = nic_cfg;
15180
15181 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15182 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15183 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15184 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15185 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15186 (ver > 0) && (ver < 0x100))
15187 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15188
15189 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15190 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15191
15192 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15193 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15194 tg3_asic_rev(tp) == ASIC_REV_5720)
15195 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15196
15197 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15198 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15199 eeprom_phy_serdes = 1;
15200
15201 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15202 if (nic_phy_id != 0) {
15203 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15204 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15205
15206 eeprom_phy_id = (id1 >> 16) << 10;
15207 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15208 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15209 } else
15210 eeprom_phy_id = 0;
15211
15212 tp->phy_id = eeprom_phy_id;
15213 if (eeprom_phy_serdes) {
15214 if (!tg3_flag(tp, 5705_PLUS))
15215 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15216 else
15217 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15218 }
15219
15220 if (tg3_flag(tp, 5750_PLUS))
15221 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15222 SHASTA_EXT_LED_MODE_MASK);
15223 else
15224 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15225
15226 switch (led_cfg) {
15227 default:
15228 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15229 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15230 break;
15231
15232 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15233 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15234 break;
15235
15236 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15237 tp->led_ctrl = LED_CTRL_MODE_MAC;
15238
15239 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15240 * read on some older 5700/5701 bootcode.
15241 */
15242 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15243 tg3_asic_rev(tp) == ASIC_REV_5701)
15244 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15245
15246 break;
15247
15248 case SHASTA_EXT_LED_SHARED:
15249 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15250 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15251 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15252 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15253 LED_CTRL_MODE_PHY_2);
15254
15255 if (tg3_flag(tp, 5717_PLUS) ||
15256 tg3_asic_rev(tp) == ASIC_REV_5762)
15257 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15258 LED_CTRL_BLINK_RATE_MASK;
15259
15260 break;
15261
15262 case SHASTA_EXT_LED_MAC:
15263 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15264 break;
15265
15266 case SHASTA_EXT_LED_COMBO:
15267 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15268 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15269 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15270 LED_CTRL_MODE_PHY_2);
15271 break;
15272
15273 }
15274
15275 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15276 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15277 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15278 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15279
15280 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15281 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15282
15283 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15284 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15285 if ((tp->pdev->subsystem_vendor ==
15286 PCI_VENDOR_ID_ARIMA) &&
15287 (tp->pdev->subsystem_device == 0x205a ||
15288 tp->pdev->subsystem_device == 0x2063))
15289 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15290 } else {
15291 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15292 tg3_flag_set(tp, IS_NIC);
15293 }
15294
15295 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15296 tg3_flag_set(tp, ENABLE_ASF);
15297 if (tg3_flag(tp, 5750_PLUS))
15298 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15299 }
15300
15301 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15302 tg3_flag(tp, 5750_PLUS))
15303 tg3_flag_set(tp, ENABLE_APE);
15304
15305 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15306 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15307 tg3_flag_clear(tp, WOL_CAP);
15308
15309 if (tg3_flag(tp, WOL_CAP) &&
15310 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15311 tg3_flag_set(tp, WOL_ENABLE);
15312 device_set_wakeup_enable(&tp->pdev->dev, true);
15313 }
15314
15315 if (cfg2 & (1 << 17))
15316 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15317
15318 /* serdes signal pre-emphasis in register 0x590 set by */
15319 /* bootcode if bit 18 is set */
15320 if (cfg2 & (1 << 18))
15321 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15322
15323 if ((tg3_flag(tp, 57765_PLUS) ||
15324 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15325 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15326 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15327 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15328
15329 if (tg3_flag(tp, PCI_EXPRESS)) {
15330 u32 cfg3;
15331
15332 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15333 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15334 !tg3_flag(tp, 57765_PLUS) &&
15335 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15336 tg3_flag_set(tp, ASPM_WORKAROUND);
15337 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15338 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15339 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15340 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15341 }
15342
15343 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15344 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15345 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15346 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15347 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15348 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15349
15350 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15351 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15352 }
15353 done:
15354 if (tg3_flag(tp, WOL_CAP))
15355 device_set_wakeup_enable(&tp->pdev->dev,
15356 tg3_flag(tp, WOL_ENABLE));
15357 else
15358 device_set_wakeup_capable(&tp->pdev->dev, false);
15359 }
15360
tg3_ape_otp_read(struct tg3 * tp,u32 offset,u32 * val)15361 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15362 {
15363 int i, err;
15364 u32 val2, off = offset * 8;
15365
15366 err = tg3_nvram_lock(tp);
15367 if (err)
15368 return err;
15369
15370 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15371 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15372 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15373 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15374 udelay(10);
15375
15376 for (i = 0; i < 100; i++) {
15377 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15378 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15379 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15380 break;
15381 }
15382 udelay(10);
15383 }
15384
15385 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15386
15387 tg3_nvram_unlock(tp);
15388 if (val2 & APE_OTP_STATUS_CMD_DONE)
15389 return 0;
15390
15391 return -EBUSY;
15392 }
15393
tg3_issue_otp_command(struct tg3 * tp,u32 cmd)15394 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15395 {
15396 int i;
15397 u32 val;
15398
15399 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15400 tw32(OTP_CTRL, cmd);
15401
15402 /* Wait for up to 1 ms for command to execute. */
15403 for (i = 0; i < 100; i++) {
15404 val = tr32(OTP_STATUS);
15405 if (val & OTP_STATUS_CMD_DONE)
15406 break;
15407 udelay(10);
15408 }
15409
15410 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15411 }
15412
15413 /* Read the gphy configuration from the OTP region of the chip. The gphy
15414 * configuration is a 32-bit value that straddles the alignment boundary.
15415 * We do two 32-bit reads and then shift and merge the results.
15416 */
tg3_read_otp_phycfg(struct tg3 * tp)15417 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15418 {
15419 u32 bhalf_otp, thalf_otp;
15420
15421 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15422
15423 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15424 return 0;
15425
15426 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15427
15428 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15429 return 0;
15430
15431 thalf_otp = tr32(OTP_READ_DATA);
15432
15433 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15434
15435 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15436 return 0;
15437
15438 bhalf_otp = tr32(OTP_READ_DATA);
15439
15440 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15441 }
15442
tg3_phy_init_link_config(struct tg3 * tp)15443 static void tg3_phy_init_link_config(struct tg3 *tp)
15444 {
15445 u32 adv = ADVERTISED_Autoneg;
15446
15447 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15448 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15449 adv |= ADVERTISED_1000baseT_Half;
15450 adv |= ADVERTISED_1000baseT_Full;
15451 }
15452
15453 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15454 adv |= ADVERTISED_100baseT_Half |
15455 ADVERTISED_100baseT_Full |
15456 ADVERTISED_10baseT_Half |
15457 ADVERTISED_10baseT_Full |
15458 ADVERTISED_TP;
15459 else
15460 adv |= ADVERTISED_FIBRE;
15461
15462 tp->link_config.advertising = adv;
15463 tp->link_config.speed = SPEED_UNKNOWN;
15464 tp->link_config.duplex = DUPLEX_UNKNOWN;
15465 tp->link_config.autoneg = AUTONEG_ENABLE;
15466 tp->link_config.active_speed = SPEED_UNKNOWN;
15467 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15468
15469 tp->old_link = -1;
15470 }
15471
tg3_phy_probe(struct tg3 * tp)15472 static int tg3_phy_probe(struct tg3 *tp)
15473 {
15474 u32 hw_phy_id_1, hw_phy_id_2;
15475 u32 hw_phy_id, hw_phy_id_masked;
15476 int err;
15477
15478 /* flow control autonegotiation is default behavior */
15479 tg3_flag_set(tp, PAUSE_AUTONEG);
15480 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15481
15482 if (tg3_flag(tp, ENABLE_APE)) {
15483 switch (tp->pci_fn) {
15484 case 0:
15485 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15486 break;
15487 case 1:
15488 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15489 break;
15490 case 2:
15491 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15492 break;
15493 case 3:
15494 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15495 break;
15496 }
15497 }
15498
15499 if (!tg3_flag(tp, ENABLE_ASF) &&
15500 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15501 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15502 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15503 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15504
15505 if (tg3_flag(tp, USE_PHYLIB))
15506 return tg3_phy_init(tp);
15507
15508 /* Reading the PHY ID register can conflict with ASF
15509 * firmware access to the PHY hardware.
15510 */
15511 err = 0;
15512 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15513 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15514 } else {
15515 /* Now read the physical PHY_ID from the chip and verify
15516 * that it is sane. If it doesn't look good, we fall back
15517 * to either the hard-coded table based PHY_ID and failing
15518 * that the value found in the eeprom area.
15519 */
15520 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15521 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15522
15523 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15524 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15525 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15526
15527 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15528 }
15529
15530 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15531 tp->phy_id = hw_phy_id;
15532 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15533 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15534 else
15535 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15536 } else {
15537 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15538 /* Do nothing, phy ID already set up in
15539 * tg3_get_eeprom_hw_cfg().
15540 */
15541 } else {
15542 struct subsys_tbl_ent *p;
15543
15544 /* No eeprom signature? Try the hardcoded
15545 * subsys device table.
15546 */
15547 p = tg3_lookup_by_subsys(tp);
15548 if (p) {
15549 tp->phy_id = p->phy_id;
15550 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15551 /* For now we saw the IDs 0xbc050cd0,
15552 * 0xbc050f80 and 0xbc050c30 on devices
15553 * connected to an BCM4785 and there are
15554 * probably more. Just assume that the phy is
15555 * supported when it is connected to a SSB core
15556 * for now.
15557 */
15558 return -ENODEV;
15559 }
15560
15561 if (!tp->phy_id ||
15562 tp->phy_id == TG3_PHY_ID_BCM8002)
15563 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15564 }
15565 }
15566
15567 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15568 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15569 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15570 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15571 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15572 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15573 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15574 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15575 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15576 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15577
15578 tp->eee.supported = SUPPORTED_100baseT_Full |
15579 SUPPORTED_1000baseT_Full;
15580 tp->eee.advertised = ADVERTISED_100baseT_Full |
15581 ADVERTISED_1000baseT_Full;
15582 tp->eee.eee_enabled = 1;
15583 tp->eee.tx_lpi_enabled = 1;
15584 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15585 }
15586
15587 tg3_phy_init_link_config(tp);
15588
15589 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15590 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15591 !tg3_flag(tp, ENABLE_APE) &&
15592 !tg3_flag(tp, ENABLE_ASF)) {
15593 u32 bmsr, dummy;
15594
15595 tg3_readphy(tp, MII_BMSR, &bmsr);
15596 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15597 (bmsr & BMSR_LSTATUS))
15598 goto skip_phy_reset;
15599
15600 err = tg3_phy_reset(tp);
15601 if (err)
15602 return err;
15603
15604 tg3_phy_set_wirespeed(tp);
15605
15606 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15607 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15608 tp->link_config.flowctrl);
15609
15610 tg3_writephy(tp, MII_BMCR,
15611 BMCR_ANENABLE | BMCR_ANRESTART);
15612 }
15613 }
15614
15615 skip_phy_reset:
15616 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15617 err = tg3_init_5401phy_dsp(tp);
15618 if (err)
15619 return err;
15620
15621 err = tg3_init_5401phy_dsp(tp);
15622 }
15623
15624 return err;
15625 }
15626
tg3_read_vpd(struct tg3 * tp)15627 static void tg3_read_vpd(struct tg3 *tp)
15628 {
15629 u8 *vpd_data;
15630 unsigned int len, vpdlen;
15631 int i;
15632
15633 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15634 if (!vpd_data)
15635 goto out_no_vpd;
15636
15637 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15638 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15639 if (i < 0)
15640 goto partno;
15641
15642 if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15643 goto partno;
15644
15645 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15646 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15647 if (i < 0)
15648 goto partno;
15649
15650 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15651 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15652
15653 partno:
15654 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15655 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15656 if (i < 0)
15657 goto out_not_found;
15658
15659 if (len > TG3_BPN_SIZE)
15660 goto out_not_found;
15661
15662 memcpy(tp->board_part_number, &vpd_data[i], len);
15663
15664 out_not_found:
15665 kfree(vpd_data);
15666 if (tp->board_part_number[0])
15667 return;
15668
15669 out_no_vpd:
15670 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15671 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15672 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15673 strcpy(tp->board_part_number, "BCM5717");
15674 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15675 strcpy(tp->board_part_number, "BCM5718");
15676 else
15677 goto nomatch;
15678 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15679 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15680 strcpy(tp->board_part_number, "BCM57780");
15681 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15682 strcpy(tp->board_part_number, "BCM57760");
15683 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15684 strcpy(tp->board_part_number, "BCM57790");
15685 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15686 strcpy(tp->board_part_number, "BCM57788");
15687 else
15688 goto nomatch;
15689 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15690 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15691 strcpy(tp->board_part_number, "BCM57761");
15692 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15693 strcpy(tp->board_part_number, "BCM57765");
15694 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15695 strcpy(tp->board_part_number, "BCM57781");
15696 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15697 strcpy(tp->board_part_number, "BCM57785");
15698 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15699 strcpy(tp->board_part_number, "BCM57791");
15700 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15701 strcpy(tp->board_part_number, "BCM57795");
15702 else
15703 goto nomatch;
15704 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15705 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15706 strcpy(tp->board_part_number, "BCM57762");
15707 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15708 strcpy(tp->board_part_number, "BCM57766");
15709 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15710 strcpy(tp->board_part_number, "BCM57782");
15711 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15712 strcpy(tp->board_part_number, "BCM57786");
15713 else
15714 goto nomatch;
15715 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15716 strcpy(tp->board_part_number, "BCM95906");
15717 } else {
15718 nomatch:
15719 strcpy(tp->board_part_number, "none");
15720 }
15721 }
15722
tg3_fw_img_is_valid(struct tg3 * tp,u32 offset)15723 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15724 {
15725 u32 val;
15726
15727 if (tg3_nvram_read(tp, offset, &val) ||
15728 (val & 0xfc000000) != 0x0c000000 ||
15729 tg3_nvram_read(tp, offset + 4, &val) ||
15730 val != 0)
15731 return 0;
15732
15733 return 1;
15734 }
15735
tg3_read_bc_ver(struct tg3 * tp)15736 static void tg3_read_bc_ver(struct tg3 *tp)
15737 {
15738 u32 val, offset, start, ver_offset;
15739 int i, dst_off;
15740 bool newver = false;
15741
15742 if (tg3_nvram_read(tp, 0xc, &offset) ||
15743 tg3_nvram_read(tp, 0x4, &start))
15744 return;
15745
15746 offset = tg3_nvram_logical_addr(tp, offset);
15747
15748 if (tg3_nvram_read(tp, offset, &val))
15749 return;
15750
15751 if ((val & 0xfc000000) == 0x0c000000) {
15752 if (tg3_nvram_read(tp, offset + 4, &val))
15753 return;
15754
15755 if (val == 0)
15756 newver = true;
15757 }
15758
15759 dst_off = strlen(tp->fw_ver);
15760
15761 if (newver) {
15762 if (TG3_VER_SIZE - dst_off < 16 ||
15763 tg3_nvram_read(tp, offset + 8, &ver_offset))
15764 return;
15765
15766 offset = offset + ver_offset - start;
15767 for (i = 0; i < 16; i += 4) {
15768 __be32 v;
15769 if (tg3_nvram_read_be32(tp, offset + i, &v))
15770 return;
15771
15772 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15773 }
15774 } else {
15775 u32 major, minor;
15776
15777 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15778 return;
15779
15780 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15781 TG3_NVM_BCVER_MAJSFT;
15782 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15783 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15784 "v%d.%02d", major, minor);
15785 }
15786 }
15787
tg3_read_hwsb_ver(struct tg3 * tp)15788 static void tg3_read_hwsb_ver(struct tg3 *tp)
15789 {
15790 u32 val, major, minor;
15791
15792 /* Use native endian representation */
15793 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15794 return;
15795
15796 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15797 TG3_NVM_HWSB_CFG1_MAJSFT;
15798 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15799 TG3_NVM_HWSB_CFG1_MINSFT;
15800
15801 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15802 }
15803
tg3_read_sb_ver(struct tg3 * tp,u32 val)15804 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15805 {
15806 u32 offset, major, minor, build;
15807
15808 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15809
15810 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15811 return;
15812
15813 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15814 case TG3_EEPROM_SB_REVISION_0:
15815 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15816 break;
15817 case TG3_EEPROM_SB_REVISION_2:
15818 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15819 break;
15820 case TG3_EEPROM_SB_REVISION_3:
15821 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15822 break;
15823 case TG3_EEPROM_SB_REVISION_4:
15824 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15825 break;
15826 case TG3_EEPROM_SB_REVISION_5:
15827 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15828 break;
15829 case TG3_EEPROM_SB_REVISION_6:
15830 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15831 break;
15832 default:
15833 return;
15834 }
15835
15836 if (tg3_nvram_read(tp, offset, &val))
15837 return;
15838
15839 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15840 TG3_EEPROM_SB_EDH_BLD_SHFT;
15841 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15842 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15843 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15844
15845 if (minor > 99 || build > 26)
15846 return;
15847
15848 offset = strlen(tp->fw_ver);
15849 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15850 " v%d.%02d", major, minor);
15851
15852 if (build > 0) {
15853 offset = strlen(tp->fw_ver);
15854 if (offset < TG3_VER_SIZE - 1)
15855 tp->fw_ver[offset] = 'a' + build - 1;
15856 }
15857 }
15858
tg3_read_mgmtfw_ver(struct tg3 * tp)15859 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15860 {
15861 u32 val, offset, start;
15862 int i, vlen;
15863
15864 for (offset = TG3_NVM_DIR_START;
15865 offset < TG3_NVM_DIR_END;
15866 offset += TG3_NVM_DIRENT_SIZE) {
15867 if (tg3_nvram_read(tp, offset, &val))
15868 return;
15869
15870 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15871 break;
15872 }
15873
15874 if (offset == TG3_NVM_DIR_END)
15875 return;
15876
15877 if (!tg3_flag(tp, 5705_PLUS))
15878 start = 0x08000000;
15879 else if (tg3_nvram_read(tp, offset - 4, &start))
15880 return;
15881
15882 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15883 !tg3_fw_img_is_valid(tp, offset) ||
15884 tg3_nvram_read(tp, offset + 8, &val))
15885 return;
15886
15887 offset += val - start;
15888
15889 vlen = strlen(tp->fw_ver);
15890
15891 tp->fw_ver[vlen++] = ',';
15892 tp->fw_ver[vlen++] = ' ';
15893
15894 for (i = 0; i < 4; i++) {
15895 __be32 v;
15896 if (tg3_nvram_read_be32(tp, offset, &v))
15897 return;
15898
15899 offset += sizeof(v);
15900
15901 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15902 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15903 break;
15904 }
15905
15906 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15907 vlen += sizeof(v);
15908 }
15909 }
15910
tg3_probe_ncsi(struct tg3 * tp)15911 static void tg3_probe_ncsi(struct tg3 *tp)
15912 {
15913 u32 apedata;
15914
15915 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15916 if (apedata != APE_SEG_SIG_MAGIC)
15917 return;
15918
15919 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15920 if (!(apedata & APE_FW_STATUS_READY))
15921 return;
15922
15923 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15924 tg3_flag_set(tp, APE_HAS_NCSI);
15925 }
15926
tg3_read_dash_ver(struct tg3 * tp)15927 static void tg3_read_dash_ver(struct tg3 *tp)
15928 {
15929 int vlen;
15930 u32 apedata;
15931 char *fwtype;
15932
15933 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15934
15935 if (tg3_flag(tp, APE_HAS_NCSI))
15936 fwtype = "NCSI";
15937 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15938 fwtype = "SMASH";
15939 else
15940 fwtype = "DASH";
15941
15942 vlen = strlen(tp->fw_ver);
15943
15944 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15945 fwtype,
15946 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15947 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15948 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15949 (apedata & APE_FW_VERSION_BLDMSK));
15950 }
15951
tg3_read_otp_ver(struct tg3 * tp)15952 static void tg3_read_otp_ver(struct tg3 *tp)
15953 {
15954 u32 val, val2;
15955
15956 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15957 return;
15958
15959 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15960 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15961 TG3_OTP_MAGIC0_VALID(val)) {
15962 u64 val64 = (u64) val << 32 | val2;
15963 u32 ver = 0;
15964 int i, vlen;
15965
15966 for (i = 0; i < 7; i++) {
15967 if ((val64 & 0xff) == 0)
15968 break;
15969 ver = val64 & 0xff;
15970 val64 >>= 8;
15971 }
15972 vlen = strlen(tp->fw_ver);
15973 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15974 }
15975 }
15976
tg3_read_fw_ver(struct tg3 * tp)15977 static void tg3_read_fw_ver(struct tg3 *tp)
15978 {
15979 u32 val;
15980 bool vpd_vers = false;
15981
15982 if (tp->fw_ver[0] != 0)
15983 vpd_vers = true;
15984
15985 if (tg3_flag(tp, NO_NVRAM)) {
15986 strcat(tp->fw_ver, "sb");
15987 tg3_read_otp_ver(tp);
15988 return;
15989 }
15990
15991 if (tg3_nvram_read(tp, 0, &val))
15992 return;
15993
15994 if (val == TG3_EEPROM_MAGIC)
15995 tg3_read_bc_ver(tp);
15996 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15997 tg3_read_sb_ver(tp, val);
15998 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15999 tg3_read_hwsb_ver(tp);
16000
16001 if (tg3_flag(tp, ENABLE_ASF)) {
16002 if (tg3_flag(tp, ENABLE_APE)) {
16003 tg3_probe_ncsi(tp);
16004 if (!vpd_vers)
16005 tg3_read_dash_ver(tp);
16006 } else if (!vpd_vers) {
16007 tg3_read_mgmtfw_ver(tp);
16008 }
16009 }
16010
16011 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16012 }
16013
tg3_rx_ret_ring_size(struct tg3 * tp)16014 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16015 {
16016 if (tg3_flag(tp, LRG_PROD_RING_CAP))
16017 return TG3_RX_RET_MAX_SIZE_5717;
16018 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16019 return TG3_RX_RET_MAX_SIZE_5700;
16020 else
16021 return TG3_RX_RET_MAX_SIZE_5705;
16022 }
16023
16024 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16025 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16026 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16027 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16028 { },
16029 };
16030
tg3_find_peer(struct tg3 * tp)16031 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16032 {
16033 struct pci_dev *peer;
16034 unsigned int func, devnr = tp->pdev->devfn & ~7;
16035
16036 for (func = 0; func < 8; func++) {
16037 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16038 if (peer && peer != tp->pdev)
16039 break;
16040 pci_dev_put(peer);
16041 }
16042 /* 5704 can be configured in single-port mode, set peer to
16043 * tp->pdev in that case.
16044 */
16045 if (!peer) {
16046 peer = tp->pdev;
16047 return peer;
16048 }
16049
16050 /*
16051 * We don't need to keep the refcount elevated; there's no way
16052 * to remove one half of this device without removing the other
16053 */
16054 pci_dev_put(peer);
16055
16056 return peer;
16057 }
16058
tg3_detect_asic_rev(struct tg3 * tp,u32 misc_ctrl_reg)16059 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16060 {
16061 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16062 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16063 u32 reg;
16064
16065 /* All devices that use the alternate
16066 * ASIC REV location have a CPMU.
16067 */
16068 tg3_flag_set(tp, CPMU_PRESENT);
16069
16070 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16071 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16072 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16073 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16074 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16075 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16076 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16077 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16078 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16079 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16080 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16081 reg = TG3PCI_GEN2_PRODID_ASICREV;
16082 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16083 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16084 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16085 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16086 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16087 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16088 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16089 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16090 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16091 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16092 reg = TG3PCI_GEN15_PRODID_ASICREV;
16093 else
16094 reg = TG3PCI_PRODID_ASICREV;
16095
16096 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16097 }
16098
16099 /* Wrong chip ID in 5752 A0. This code can be removed later
16100 * as A0 is not in production.
16101 */
16102 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16103 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16104
16105 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16106 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16107
16108 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16109 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16110 tg3_asic_rev(tp) == ASIC_REV_5720)
16111 tg3_flag_set(tp, 5717_PLUS);
16112
16113 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16114 tg3_asic_rev(tp) == ASIC_REV_57766)
16115 tg3_flag_set(tp, 57765_CLASS);
16116
16117 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16118 tg3_asic_rev(tp) == ASIC_REV_5762)
16119 tg3_flag_set(tp, 57765_PLUS);
16120
16121 /* Intentionally exclude ASIC_REV_5906 */
16122 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16123 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16124 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16125 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16126 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16127 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16128 tg3_flag(tp, 57765_PLUS))
16129 tg3_flag_set(tp, 5755_PLUS);
16130
16131 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16132 tg3_asic_rev(tp) == ASIC_REV_5714)
16133 tg3_flag_set(tp, 5780_CLASS);
16134
16135 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16136 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16137 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16138 tg3_flag(tp, 5755_PLUS) ||
16139 tg3_flag(tp, 5780_CLASS))
16140 tg3_flag_set(tp, 5750_PLUS);
16141
16142 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16143 tg3_flag(tp, 5750_PLUS))
16144 tg3_flag_set(tp, 5705_PLUS);
16145 }
16146
tg3_10_100_only_device(struct tg3 * tp,const struct pci_device_id * ent)16147 static bool tg3_10_100_only_device(struct tg3 *tp,
16148 const struct pci_device_id *ent)
16149 {
16150 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16151
16152 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16153 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16154 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16155 return true;
16156
16157 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16158 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16159 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16160 return true;
16161 } else {
16162 return true;
16163 }
16164 }
16165
16166 return false;
16167 }
16168
tg3_get_invariants(struct tg3 * tp,const struct pci_device_id * ent)16169 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16170 {
16171 u32 misc_ctrl_reg;
16172 u32 pci_state_reg, grc_misc_cfg;
16173 u32 val;
16174 u16 pci_cmd;
16175 int err;
16176
16177 /* Force memory write invalidate off. If we leave it on,
16178 * then on 5700_BX chips we have to enable a workaround.
16179 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16180 * to match the cacheline size. The Broadcom driver have this
16181 * workaround but turns MWI off all the times so never uses
16182 * it. This seems to suggest that the workaround is insufficient.
16183 */
16184 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16185 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16186 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16187
16188 /* Important! -- Make sure register accesses are byteswapped
16189 * correctly. Also, for those chips that require it, make
16190 * sure that indirect register accesses are enabled before
16191 * the first operation.
16192 */
16193 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16194 &misc_ctrl_reg);
16195 tp->misc_host_ctrl |= (misc_ctrl_reg &
16196 MISC_HOST_CTRL_CHIPREV);
16197 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16198 tp->misc_host_ctrl);
16199
16200 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16201
16202 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16203 * we need to disable memory and use config. cycles
16204 * only to access all registers. The 5702/03 chips
16205 * can mistakenly decode the special cycles from the
16206 * ICH chipsets as memory write cycles, causing corruption
16207 * of register and memory space. Only certain ICH bridges
16208 * will drive special cycles with non-zero data during the
16209 * address phase which can fall within the 5703's address
16210 * range. This is not an ICH bug as the PCI spec allows
16211 * non-zero address during special cycles. However, only
16212 * these ICH bridges are known to drive non-zero addresses
16213 * during special cycles.
16214 *
16215 * Since special cycles do not cross PCI bridges, we only
16216 * enable this workaround if the 5703 is on the secondary
16217 * bus of these ICH bridges.
16218 */
16219 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16220 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16221 static struct tg3_dev_id {
16222 u32 vendor;
16223 u32 device;
16224 u32 rev;
16225 } ich_chipsets[] = {
16226 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16227 PCI_ANY_ID },
16228 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16229 PCI_ANY_ID },
16230 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16231 0xa },
16232 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16233 PCI_ANY_ID },
16234 { },
16235 };
16236 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16237 struct pci_dev *bridge = NULL;
16238
16239 while (pci_id->vendor != 0) {
16240 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16241 bridge);
16242 if (!bridge) {
16243 pci_id++;
16244 continue;
16245 }
16246 if (pci_id->rev != PCI_ANY_ID) {
16247 if (bridge->revision > pci_id->rev)
16248 continue;
16249 }
16250 if (bridge->subordinate &&
16251 (bridge->subordinate->number ==
16252 tp->pdev->bus->number)) {
16253 tg3_flag_set(tp, ICH_WORKAROUND);
16254 pci_dev_put(bridge);
16255 break;
16256 }
16257 }
16258 }
16259
16260 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16261 static struct tg3_dev_id {
16262 u32 vendor;
16263 u32 device;
16264 } bridge_chipsets[] = {
16265 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16266 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16267 { },
16268 };
16269 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16270 struct pci_dev *bridge = NULL;
16271
16272 while (pci_id->vendor != 0) {
16273 bridge = pci_get_device(pci_id->vendor,
16274 pci_id->device,
16275 bridge);
16276 if (!bridge) {
16277 pci_id++;
16278 continue;
16279 }
16280 if (bridge->subordinate &&
16281 (bridge->subordinate->number <=
16282 tp->pdev->bus->number) &&
16283 (bridge->subordinate->busn_res.end >=
16284 tp->pdev->bus->number)) {
16285 tg3_flag_set(tp, 5701_DMA_BUG);
16286 pci_dev_put(bridge);
16287 break;
16288 }
16289 }
16290 }
16291
16292 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16293 * DMA addresses > 40-bit. This bridge may have other additional
16294 * 57xx devices behind it in some 4-port NIC designs for example.
16295 * Any tg3 device found behind the bridge will also need the 40-bit
16296 * DMA workaround.
16297 */
16298 if (tg3_flag(tp, 5780_CLASS)) {
16299 tg3_flag_set(tp, 40BIT_DMA_BUG);
16300 tp->msi_cap = tp->pdev->msi_cap;
16301 } else {
16302 struct pci_dev *bridge = NULL;
16303
16304 do {
16305 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16306 PCI_DEVICE_ID_SERVERWORKS_EPB,
16307 bridge);
16308 if (bridge && bridge->subordinate &&
16309 (bridge->subordinate->number <=
16310 tp->pdev->bus->number) &&
16311 (bridge->subordinate->busn_res.end >=
16312 tp->pdev->bus->number)) {
16313 tg3_flag_set(tp, 40BIT_DMA_BUG);
16314 pci_dev_put(bridge);
16315 break;
16316 }
16317 } while (bridge);
16318 }
16319
16320 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16321 tg3_asic_rev(tp) == ASIC_REV_5714)
16322 tp->pdev_peer = tg3_find_peer(tp);
16323
16324 /* Determine TSO capabilities */
16325 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16326 ; /* Do nothing. HW bug. */
16327 else if (tg3_flag(tp, 57765_PLUS))
16328 tg3_flag_set(tp, HW_TSO_3);
16329 else if (tg3_flag(tp, 5755_PLUS) ||
16330 tg3_asic_rev(tp) == ASIC_REV_5906)
16331 tg3_flag_set(tp, HW_TSO_2);
16332 else if (tg3_flag(tp, 5750_PLUS)) {
16333 tg3_flag_set(tp, HW_TSO_1);
16334 tg3_flag_set(tp, TSO_BUG);
16335 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16336 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16337 tg3_flag_clear(tp, TSO_BUG);
16338 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16339 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16340 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16341 tg3_flag_set(tp, FW_TSO);
16342 tg3_flag_set(tp, TSO_BUG);
16343 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16344 tp->fw_needed = FIRMWARE_TG3TSO5;
16345 else
16346 tp->fw_needed = FIRMWARE_TG3TSO;
16347 }
16348
16349 /* Selectively allow TSO based on operating conditions */
16350 if (tg3_flag(tp, HW_TSO_1) ||
16351 tg3_flag(tp, HW_TSO_2) ||
16352 tg3_flag(tp, HW_TSO_3) ||
16353 tg3_flag(tp, FW_TSO)) {
16354 /* For firmware TSO, assume ASF is disabled.
16355 * We'll disable TSO later if we discover ASF
16356 * is enabled in tg3_get_eeprom_hw_cfg().
16357 */
16358 tg3_flag_set(tp, TSO_CAPABLE);
16359 } else {
16360 tg3_flag_clear(tp, TSO_CAPABLE);
16361 tg3_flag_clear(tp, TSO_BUG);
16362 tp->fw_needed = NULL;
16363 }
16364
16365 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16366 tp->fw_needed = FIRMWARE_TG3;
16367
16368 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16369 tp->fw_needed = FIRMWARE_TG357766;
16370
16371 tp->irq_max = 1;
16372
16373 if (tg3_flag(tp, 5750_PLUS)) {
16374 tg3_flag_set(tp, SUPPORT_MSI);
16375 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16376 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16377 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16378 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16379 tp->pdev_peer == tp->pdev))
16380 tg3_flag_clear(tp, SUPPORT_MSI);
16381
16382 if (tg3_flag(tp, 5755_PLUS) ||
16383 tg3_asic_rev(tp) == ASIC_REV_5906) {
16384 tg3_flag_set(tp, 1SHOT_MSI);
16385 }
16386
16387 if (tg3_flag(tp, 57765_PLUS)) {
16388 tg3_flag_set(tp, SUPPORT_MSIX);
16389 tp->irq_max = TG3_IRQ_MAX_VECS;
16390 }
16391 }
16392
16393 tp->txq_max = 1;
16394 tp->rxq_max = 1;
16395 if (tp->irq_max > 1) {
16396 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16397 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16398
16399 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16400 tg3_asic_rev(tp) == ASIC_REV_5720)
16401 tp->txq_max = tp->irq_max - 1;
16402 }
16403
16404 if (tg3_flag(tp, 5755_PLUS) ||
16405 tg3_asic_rev(tp) == ASIC_REV_5906)
16406 tg3_flag_set(tp, SHORT_DMA_BUG);
16407
16408 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16409 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16410
16411 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16412 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16413 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16414 tg3_asic_rev(tp) == ASIC_REV_5762)
16415 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16416
16417 if (tg3_flag(tp, 57765_PLUS) &&
16418 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16419 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16420
16421 if (!tg3_flag(tp, 5705_PLUS) ||
16422 tg3_flag(tp, 5780_CLASS) ||
16423 tg3_flag(tp, USE_JUMBO_BDFLAG))
16424 tg3_flag_set(tp, JUMBO_CAPABLE);
16425
16426 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16427 &pci_state_reg);
16428
16429 if (pci_is_pcie(tp->pdev)) {
16430 u16 lnkctl;
16431
16432 tg3_flag_set(tp, PCI_EXPRESS);
16433
16434 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16435 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16436 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16437 tg3_flag_clear(tp, HW_TSO_2);
16438 tg3_flag_clear(tp, TSO_CAPABLE);
16439 }
16440 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16441 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16442 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16443 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16444 tg3_flag_set(tp, CLKREQ_BUG);
16445 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16446 tg3_flag_set(tp, L1PLLPD_EN);
16447 }
16448 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16449 /* BCM5785 devices are effectively PCIe devices, and should
16450 * follow PCIe codepaths, but do not have a PCIe capabilities
16451 * section.
16452 */
16453 tg3_flag_set(tp, PCI_EXPRESS);
16454 } else if (!tg3_flag(tp, 5705_PLUS) ||
16455 tg3_flag(tp, 5780_CLASS)) {
16456 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16457 if (!tp->pcix_cap) {
16458 dev_err(&tp->pdev->dev,
16459 "Cannot find PCI-X capability, aborting\n");
16460 return -EIO;
16461 }
16462
16463 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16464 tg3_flag_set(tp, PCIX_MODE);
16465 }
16466
16467 /* If we have an AMD 762 or VIA K8T800 chipset, write
16468 * reordering to the mailbox registers done by the host
16469 * controller can cause major troubles. We read back from
16470 * every mailbox register write to force the writes to be
16471 * posted to the chip in order.
16472 */
16473 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16474 !tg3_flag(tp, PCI_EXPRESS))
16475 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16476
16477 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16478 &tp->pci_cacheline_sz);
16479 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16480 &tp->pci_lat_timer);
16481 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16482 tp->pci_lat_timer < 64) {
16483 tp->pci_lat_timer = 64;
16484 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16485 tp->pci_lat_timer);
16486 }
16487
16488 /* Important! -- It is critical that the PCI-X hw workaround
16489 * situation is decided before the first MMIO register access.
16490 */
16491 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16492 /* 5700 BX chips need to have their TX producer index
16493 * mailboxes written twice to workaround a bug.
16494 */
16495 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16496
16497 /* If we are in PCI-X mode, enable register write workaround.
16498 *
16499 * The workaround is to use indirect register accesses
16500 * for all chip writes not to mailbox registers.
16501 */
16502 if (tg3_flag(tp, PCIX_MODE)) {
16503 u32 pm_reg;
16504
16505 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16506
16507 /* The chip can have it's power management PCI config
16508 * space registers clobbered due to this bug.
16509 * So explicitly force the chip into D0 here.
16510 */
16511 pci_read_config_dword(tp->pdev,
16512 tp->pdev->pm_cap + PCI_PM_CTRL,
16513 &pm_reg);
16514 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16515 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16516 pci_write_config_dword(tp->pdev,
16517 tp->pdev->pm_cap + PCI_PM_CTRL,
16518 pm_reg);
16519
16520 /* Also, force SERR#/PERR# in PCI command. */
16521 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16522 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16523 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16524 }
16525 }
16526
16527 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16528 tg3_flag_set(tp, PCI_HIGH_SPEED);
16529 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16530 tg3_flag_set(tp, PCI_32BIT);
16531
16532 /* Chip-specific fixup from Broadcom driver */
16533 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16534 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16535 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16536 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16537 }
16538
16539 /* Default fast path register access methods */
16540 tp->read32 = tg3_read32;
16541 tp->write32 = tg3_write32;
16542 tp->read32_mbox = tg3_read32;
16543 tp->write32_mbox = tg3_write32;
16544 tp->write32_tx_mbox = tg3_write32;
16545 tp->write32_rx_mbox = tg3_write32;
16546
16547 /* Various workaround register access methods */
16548 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16549 tp->write32 = tg3_write_indirect_reg32;
16550 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16551 (tg3_flag(tp, PCI_EXPRESS) &&
16552 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16553 /*
16554 * Back to back register writes can cause problems on these
16555 * chips, the workaround is to read back all reg writes
16556 * except those to mailbox regs.
16557 *
16558 * See tg3_write_indirect_reg32().
16559 */
16560 tp->write32 = tg3_write_flush_reg32;
16561 }
16562
16563 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16564 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16565 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16566 tp->write32_rx_mbox = tg3_write_flush_reg32;
16567 }
16568
16569 if (tg3_flag(tp, ICH_WORKAROUND)) {
16570 tp->read32 = tg3_read_indirect_reg32;
16571 tp->write32 = tg3_write_indirect_reg32;
16572 tp->read32_mbox = tg3_read_indirect_mbox;
16573 tp->write32_mbox = tg3_write_indirect_mbox;
16574 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16575 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16576
16577 iounmap(tp->regs);
16578 tp->regs = NULL;
16579
16580 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16581 pci_cmd &= ~PCI_COMMAND_MEMORY;
16582 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16583 }
16584 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16585 tp->read32_mbox = tg3_read32_mbox_5906;
16586 tp->write32_mbox = tg3_write32_mbox_5906;
16587 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16588 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16589 }
16590
16591 if (tp->write32 == tg3_write_indirect_reg32 ||
16592 (tg3_flag(tp, PCIX_MODE) &&
16593 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16594 tg3_asic_rev(tp) == ASIC_REV_5701)))
16595 tg3_flag_set(tp, SRAM_USE_CONFIG);
16596
16597 /* The memory arbiter has to be enabled in order for SRAM accesses
16598 * to succeed. Normally on powerup the tg3 chip firmware will make
16599 * sure it is enabled, but other entities such as system netboot
16600 * code might disable it.
16601 */
16602 val = tr32(MEMARB_MODE);
16603 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16604
16605 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16606 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16607 tg3_flag(tp, 5780_CLASS)) {
16608 if (tg3_flag(tp, PCIX_MODE)) {
16609 pci_read_config_dword(tp->pdev,
16610 tp->pcix_cap + PCI_X_STATUS,
16611 &val);
16612 tp->pci_fn = val & 0x7;
16613 }
16614 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16615 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16616 tg3_asic_rev(tp) == ASIC_REV_5720) {
16617 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16618 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16619 val = tr32(TG3_CPMU_STATUS);
16620
16621 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16622 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16623 else
16624 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16625 TG3_CPMU_STATUS_FSHFT_5719;
16626 }
16627
16628 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16629 tp->write32_tx_mbox = tg3_write_flush_reg32;
16630 tp->write32_rx_mbox = tg3_write_flush_reg32;
16631 }
16632
16633 /* Get eeprom hw config before calling tg3_set_power_state().
16634 * In particular, the TG3_FLAG_IS_NIC flag must be
16635 * determined before calling tg3_set_power_state() so that
16636 * we know whether or not to switch out of Vaux power.
16637 * When the flag is set, it means that GPIO1 is used for eeprom
16638 * write protect and also implies that it is a LOM where GPIOs
16639 * are not used to switch power.
16640 */
16641 tg3_get_eeprom_hw_cfg(tp);
16642
16643 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16644 tg3_flag_clear(tp, TSO_CAPABLE);
16645 tg3_flag_clear(tp, TSO_BUG);
16646 tp->fw_needed = NULL;
16647 }
16648
16649 if (tg3_flag(tp, ENABLE_APE)) {
16650 /* Allow reads and writes to the
16651 * APE register and memory space.
16652 */
16653 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16654 PCISTATE_ALLOW_APE_SHMEM_WR |
16655 PCISTATE_ALLOW_APE_PSPACE_WR;
16656 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16657 pci_state_reg);
16658
16659 tg3_ape_lock_init(tp);
16660 tp->ape_hb_interval =
16661 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16662 }
16663
16664 /* Set up tp->grc_local_ctrl before calling
16665 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16666 * will bring 5700's external PHY out of reset.
16667 * It is also used as eeprom write protect on LOMs.
16668 */
16669 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16670 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16671 tg3_flag(tp, EEPROM_WRITE_PROT))
16672 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16673 GRC_LCLCTRL_GPIO_OUTPUT1);
16674 /* Unused GPIO3 must be driven as output on 5752 because there
16675 * are no pull-up resistors on unused GPIO pins.
16676 */
16677 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16678 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16679
16680 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16681 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16682 tg3_flag(tp, 57765_CLASS))
16683 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16684
16685 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16686 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16687 /* Turn off the debug UART. */
16688 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16689 if (tg3_flag(tp, IS_NIC))
16690 /* Keep VMain power. */
16691 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16692 GRC_LCLCTRL_GPIO_OUTPUT0;
16693 }
16694
16695 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16696 tp->grc_local_ctrl |=
16697 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16698
16699 /* Switch out of Vaux if it is a NIC */
16700 tg3_pwrsrc_switch_to_vmain(tp);
16701
16702 /* Derive initial jumbo mode from MTU assigned in
16703 * ether_setup() via the alloc_etherdev() call
16704 */
16705 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16706 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16707
16708 /* Determine WakeOnLan speed to use. */
16709 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16710 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16711 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16712 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16713 tg3_flag_clear(tp, WOL_SPEED_100MB);
16714 } else {
16715 tg3_flag_set(tp, WOL_SPEED_100MB);
16716 }
16717
16718 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16719 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16720
16721 /* A few boards don't want Ethernet@WireSpeed phy feature */
16722 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16723 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16724 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16725 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16726 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16727 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16728 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16729
16730 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16731 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16732 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16733 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16734 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16735
16736 if (tg3_flag(tp, 5705_PLUS) &&
16737 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16738 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16739 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16740 !tg3_flag(tp, 57765_PLUS)) {
16741 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16742 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16743 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16744 tg3_asic_rev(tp) == ASIC_REV_5761) {
16745 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16746 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16747 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16748 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16749 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16750 } else
16751 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16752 }
16753
16754 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16755 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16756 tp->phy_otp = tg3_read_otp_phycfg(tp);
16757 if (tp->phy_otp == 0)
16758 tp->phy_otp = TG3_OTP_DEFAULT;
16759 }
16760
16761 if (tg3_flag(tp, CPMU_PRESENT))
16762 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16763 else
16764 tp->mi_mode = MAC_MI_MODE_BASE;
16765
16766 tp->coalesce_mode = 0;
16767 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16768 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16769 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16770
16771 /* Set these bits to enable statistics workaround. */
16772 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16773 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16774 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16775 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16776 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16777 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16778 }
16779
16780 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16781 tg3_asic_rev(tp) == ASIC_REV_57780)
16782 tg3_flag_set(tp, USE_PHYLIB);
16783
16784 err = tg3_mdio_init(tp);
16785 if (err)
16786 return err;
16787
16788 /* Initialize data/descriptor byte/word swapping. */
16789 val = tr32(GRC_MODE);
16790 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16791 tg3_asic_rev(tp) == ASIC_REV_5762)
16792 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16793 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16794 GRC_MODE_B2HRX_ENABLE |
16795 GRC_MODE_HTX2B_ENABLE |
16796 GRC_MODE_HOST_STACKUP);
16797 else
16798 val &= GRC_MODE_HOST_STACKUP;
16799
16800 tw32(GRC_MODE, val | tp->grc_mode);
16801
16802 tg3_switch_clocks(tp);
16803
16804 /* Clear this out for sanity. */
16805 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16806
16807 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16808 tw32(TG3PCI_REG_BASE_ADDR, 0);
16809
16810 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16811 &pci_state_reg);
16812 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16813 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16814 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16815 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16816 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16817 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16818 void __iomem *sram_base;
16819
16820 /* Write some dummy words into the SRAM status block
16821 * area, see if it reads back correctly. If the return
16822 * value is bad, force enable the PCIX workaround.
16823 */
16824 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16825
16826 writel(0x00000000, sram_base);
16827 writel(0x00000000, sram_base + 4);
16828 writel(0xffffffff, sram_base + 4);
16829 if (readl(sram_base) != 0x00000000)
16830 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16831 }
16832 }
16833
16834 udelay(50);
16835 tg3_nvram_init(tp);
16836
16837 /* If the device has an NVRAM, no need to load patch firmware */
16838 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16839 !tg3_flag(tp, NO_NVRAM))
16840 tp->fw_needed = NULL;
16841
16842 grc_misc_cfg = tr32(GRC_MISC_CFG);
16843 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16844
16845 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16846 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16847 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16848 tg3_flag_set(tp, IS_5788);
16849
16850 if (!tg3_flag(tp, IS_5788) &&
16851 tg3_asic_rev(tp) != ASIC_REV_5700)
16852 tg3_flag_set(tp, TAGGED_STATUS);
16853 if (tg3_flag(tp, TAGGED_STATUS)) {
16854 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16855 HOSTCC_MODE_CLRTICK_TXBD);
16856
16857 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16858 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16859 tp->misc_host_ctrl);
16860 }
16861
16862 /* Preserve the APE MAC_MODE bits */
16863 if (tg3_flag(tp, ENABLE_APE))
16864 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16865 else
16866 tp->mac_mode = 0;
16867
16868 if (tg3_10_100_only_device(tp, ent))
16869 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16870
16871 err = tg3_phy_probe(tp);
16872 if (err) {
16873 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16874 /* ... but do not return immediately ... */
16875 tg3_mdio_fini(tp);
16876 }
16877
16878 tg3_read_vpd(tp);
16879 tg3_read_fw_ver(tp);
16880
16881 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16882 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16883 } else {
16884 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16885 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16886 else
16887 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16888 }
16889
16890 /* 5700 {AX,BX} chips have a broken status block link
16891 * change bit implementation, so we must use the
16892 * status register in those cases.
16893 */
16894 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16895 tg3_flag_set(tp, USE_LINKCHG_REG);
16896 else
16897 tg3_flag_clear(tp, USE_LINKCHG_REG);
16898
16899 /* The led_ctrl is set during tg3_phy_probe, here we might
16900 * have to force the link status polling mechanism based
16901 * upon subsystem IDs.
16902 */
16903 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16904 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16905 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16906 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16907 tg3_flag_set(tp, USE_LINKCHG_REG);
16908 }
16909
16910 /* For all SERDES we poll the MAC status register. */
16911 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16912 tg3_flag_set(tp, POLL_SERDES);
16913 else
16914 tg3_flag_clear(tp, POLL_SERDES);
16915
16916 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16917 tg3_flag_set(tp, POLL_CPMU_LINK);
16918
16919 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16920 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16921 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16922 tg3_flag(tp, PCIX_MODE)) {
16923 tp->rx_offset = NET_SKB_PAD;
16924 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16925 tp->rx_copy_thresh = ~(u16)0;
16926 #endif
16927 }
16928
16929 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16930 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16931 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16932
16933 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16934
16935 /* Increment the rx prod index on the rx std ring by at most
16936 * 8 for these chips to workaround hw errata.
16937 */
16938 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16939 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16940 tg3_asic_rev(tp) == ASIC_REV_5755)
16941 tp->rx_std_max_post = 8;
16942
16943 if (tg3_flag(tp, ASPM_WORKAROUND))
16944 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16945 PCIE_PWR_MGMT_L1_THRESH_MSK;
16946
16947 return err;
16948 }
16949
tg3_get_device_address(struct tg3 * tp,u8 * addr)16950 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
16951 {
16952 u32 hi, lo, mac_offset;
16953 int addr_ok = 0;
16954 int err;
16955
16956 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
16957 return 0;
16958
16959 if (tg3_flag(tp, IS_SSB_CORE)) {
16960 err = ssb_gige_get_macaddr(tp->pdev, addr);
16961 if (!err && is_valid_ether_addr(addr))
16962 return 0;
16963 }
16964
16965 mac_offset = 0x7c;
16966 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16967 tg3_flag(tp, 5780_CLASS)) {
16968 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16969 mac_offset = 0xcc;
16970 if (tg3_nvram_lock(tp))
16971 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16972 else
16973 tg3_nvram_unlock(tp);
16974 } else if (tg3_flag(tp, 5717_PLUS)) {
16975 if (tp->pci_fn & 1)
16976 mac_offset = 0xcc;
16977 if (tp->pci_fn > 1)
16978 mac_offset += 0x18c;
16979 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16980 mac_offset = 0x10;
16981
16982 /* First try to get it from MAC address mailbox. */
16983 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16984 if ((hi >> 16) == 0x484b) {
16985 addr[0] = (hi >> 8) & 0xff;
16986 addr[1] = (hi >> 0) & 0xff;
16987
16988 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16989 addr[2] = (lo >> 24) & 0xff;
16990 addr[3] = (lo >> 16) & 0xff;
16991 addr[4] = (lo >> 8) & 0xff;
16992 addr[5] = (lo >> 0) & 0xff;
16993
16994 /* Some old bootcode may report a 0 MAC address in SRAM */
16995 addr_ok = is_valid_ether_addr(addr);
16996 }
16997 if (!addr_ok) {
16998 /* Next, try NVRAM. */
16999 if (!tg3_flag(tp, NO_NVRAM) &&
17000 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17001 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17002 memcpy(&addr[0], ((char *)&hi) + 2, 2);
17003 memcpy(&addr[2], (char *)&lo, sizeof(lo));
17004 }
17005 /* Finally just fetch it out of the MAC control regs. */
17006 else {
17007 hi = tr32(MAC_ADDR_0_HIGH);
17008 lo = tr32(MAC_ADDR_0_LOW);
17009
17010 addr[5] = lo & 0xff;
17011 addr[4] = (lo >> 8) & 0xff;
17012 addr[3] = (lo >> 16) & 0xff;
17013 addr[2] = (lo >> 24) & 0xff;
17014 addr[1] = hi & 0xff;
17015 addr[0] = (hi >> 8) & 0xff;
17016 }
17017 }
17018
17019 if (!is_valid_ether_addr(addr))
17020 return -EINVAL;
17021 return 0;
17022 }
17023
17024 #define BOUNDARY_SINGLE_CACHELINE 1
17025 #define BOUNDARY_MULTI_CACHELINE 2
17026
tg3_calc_dma_bndry(struct tg3 * tp,u32 val)17027 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17028 {
17029 int cacheline_size;
17030 u8 byte;
17031 int goal;
17032
17033 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17034 if (byte == 0)
17035 cacheline_size = 1024;
17036 else
17037 cacheline_size = (int) byte * 4;
17038
17039 /* On 5703 and later chips, the boundary bits have no
17040 * effect.
17041 */
17042 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17043 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17044 !tg3_flag(tp, PCI_EXPRESS))
17045 goto out;
17046
17047 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17048 goal = BOUNDARY_MULTI_CACHELINE;
17049 #else
17050 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17051 goal = BOUNDARY_SINGLE_CACHELINE;
17052 #else
17053 goal = 0;
17054 #endif
17055 #endif
17056
17057 if (tg3_flag(tp, 57765_PLUS)) {
17058 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17059 goto out;
17060 }
17061
17062 if (!goal)
17063 goto out;
17064
17065 /* PCI controllers on most RISC systems tend to disconnect
17066 * when a device tries to burst across a cache-line boundary.
17067 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17068 *
17069 * Unfortunately, for PCI-E there are only limited
17070 * write-side controls for this, and thus for reads
17071 * we will still get the disconnects. We'll also waste
17072 * these PCI cycles for both read and write for chips
17073 * other than 5700 and 5701 which do not implement the
17074 * boundary bits.
17075 */
17076 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17077 switch (cacheline_size) {
17078 case 16:
17079 case 32:
17080 case 64:
17081 case 128:
17082 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17083 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17084 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17085 } else {
17086 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17087 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17088 }
17089 break;
17090
17091 case 256:
17092 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17093 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17094 break;
17095
17096 default:
17097 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17098 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17099 break;
17100 }
17101 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17102 switch (cacheline_size) {
17103 case 16:
17104 case 32:
17105 case 64:
17106 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17107 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17108 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17109 break;
17110 }
17111 fallthrough;
17112 case 128:
17113 default:
17114 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17115 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17116 break;
17117 }
17118 } else {
17119 switch (cacheline_size) {
17120 case 16:
17121 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17122 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17123 DMA_RWCTRL_WRITE_BNDRY_16);
17124 break;
17125 }
17126 fallthrough;
17127 case 32:
17128 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17129 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17130 DMA_RWCTRL_WRITE_BNDRY_32);
17131 break;
17132 }
17133 fallthrough;
17134 case 64:
17135 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17136 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17137 DMA_RWCTRL_WRITE_BNDRY_64);
17138 break;
17139 }
17140 fallthrough;
17141 case 128:
17142 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17143 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17144 DMA_RWCTRL_WRITE_BNDRY_128);
17145 break;
17146 }
17147 fallthrough;
17148 case 256:
17149 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17150 DMA_RWCTRL_WRITE_BNDRY_256);
17151 break;
17152 case 512:
17153 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17154 DMA_RWCTRL_WRITE_BNDRY_512);
17155 break;
17156 case 1024:
17157 default:
17158 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17159 DMA_RWCTRL_WRITE_BNDRY_1024);
17160 break;
17161 }
17162 }
17163
17164 out:
17165 return val;
17166 }
17167
tg3_do_test_dma(struct tg3 * tp,u32 * buf,dma_addr_t buf_dma,int size,bool to_device)17168 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17169 int size, bool to_device)
17170 {
17171 struct tg3_internal_buffer_desc test_desc;
17172 u32 sram_dma_descs;
17173 int i, ret;
17174
17175 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17176
17177 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17178 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17179 tw32(RDMAC_STATUS, 0);
17180 tw32(WDMAC_STATUS, 0);
17181
17182 tw32(BUFMGR_MODE, 0);
17183 tw32(FTQ_RESET, 0);
17184
17185 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17186 test_desc.addr_lo = buf_dma & 0xffffffff;
17187 test_desc.nic_mbuf = 0x00002100;
17188 test_desc.len = size;
17189
17190 /*
17191 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17192 * the *second* time the tg3 driver was getting loaded after an
17193 * initial scan.
17194 *
17195 * Broadcom tells me:
17196 * ...the DMA engine is connected to the GRC block and a DMA
17197 * reset may affect the GRC block in some unpredictable way...
17198 * The behavior of resets to individual blocks has not been tested.
17199 *
17200 * Broadcom noted the GRC reset will also reset all sub-components.
17201 */
17202 if (to_device) {
17203 test_desc.cqid_sqid = (13 << 8) | 2;
17204
17205 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17206 udelay(40);
17207 } else {
17208 test_desc.cqid_sqid = (16 << 8) | 7;
17209
17210 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17211 udelay(40);
17212 }
17213 test_desc.flags = 0x00000005;
17214
17215 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17216 u32 val;
17217
17218 val = *(((u32 *)&test_desc) + i);
17219 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17220 sram_dma_descs + (i * sizeof(u32)));
17221 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17222 }
17223 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17224
17225 if (to_device)
17226 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17227 else
17228 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17229
17230 ret = -ENODEV;
17231 for (i = 0; i < 40; i++) {
17232 u32 val;
17233
17234 if (to_device)
17235 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17236 else
17237 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17238 if ((val & 0xffff) == sram_dma_descs) {
17239 ret = 0;
17240 break;
17241 }
17242
17243 udelay(100);
17244 }
17245
17246 return ret;
17247 }
17248
17249 #define TEST_BUFFER_SIZE 0x2000
17250
17251 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17252 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17253 { },
17254 };
17255
tg3_test_dma(struct tg3 * tp)17256 static int tg3_test_dma(struct tg3 *tp)
17257 {
17258 dma_addr_t buf_dma;
17259 u32 *buf, saved_dma_rwctrl;
17260 int ret = 0;
17261
17262 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17263 &buf_dma, GFP_KERNEL);
17264 if (!buf) {
17265 ret = -ENOMEM;
17266 goto out_nofree;
17267 }
17268
17269 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17270 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17271
17272 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17273
17274 if (tg3_flag(tp, 57765_PLUS))
17275 goto out;
17276
17277 if (tg3_flag(tp, PCI_EXPRESS)) {
17278 /* DMA read watermark not used on PCIE */
17279 tp->dma_rwctrl |= 0x00180000;
17280 } else if (!tg3_flag(tp, PCIX_MODE)) {
17281 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17282 tg3_asic_rev(tp) == ASIC_REV_5750)
17283 tp->dma_rwctrl |= 0x003f0000;
17284 else
17285 tp->dma_rwctrl |= 0x003f000f;
17286 } else {
17287 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17288 tg3_asic_rev(tp) == ASIC_REV_5704) {
17289 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17290 u32 read_water = 0x7;
17291
17292 /* If the 5704 is behind the EPB bridge, we can
17293 * do the less restrictive ONE_DMA workaround for
17294 * better performance.
17295 */
17296 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17297 tg3_asic_rev(tp) == ASIC_REV_5704)
17298 tp->dma_rwctrl |= 0x8000;
17299 else if (ccval == 0x6 || ccval == 0x7)
17300 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17301
17302 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17303 read_water = 4;
17304 /* Set bit 23 to enable PCIX hw bug fix */
17305 tp->dma_rwctrl |=
17306 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17307 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17308 (1 << 23);
17309 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17310 /* 5780 always in PCIX mode */
17311 tp->dma_rwctrl |= 0x00144000;
17312 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17313 /* 5714 always in PCIX mode */
17314 tp->dma_rwctrl |= 0x00148000;
17315 } else {
17316 tp->dma_rwctrl |= 0x001b000f;
17317 }
17318 }
17319 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17320 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17321
17322 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17323 tg3_asic_rev(tp) == ASIC_REV_5704)
17324 tp->dma_rwctrl &= 0xfffffff0;
17325
17326 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17327 tg3_asic_rev(tp) == ASIC_REV_5701) {
17328 /* Remove this if it causes problems for some boards. */
17329 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17330
17331 /* On 5700/5701 chips, we need to set this bit.
17332 * Otherwise the chip will issue cacheline transactions
17333 * to streamable DMA memory with not all the byte
17334 * enables turned on. This is an error on several
17335 * RISC PCI controllers, in particular sparc64.
17336 *
17337 * On 5703/5704 chips, this bit has been reassigned
17338 * a different meaning. In particular, it is used
17339 * on those chips to enable a PCI-X workaround.
17340 */
17341 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17342 }
17343
17344 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17345
17346
17347 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17348 tg3_asic_rev(tp) != ASIC_REV_5701)
17349 goto out;
17350
17351 /* It is best to perform DMA test with maximum write burst size
17352 * to expose the 5700/5701 write DMA bug.
17353 */
17354 saved_dma_rwctrl = tp->dma_rwctrl;
17355 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17356 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17357
17358 while (1) {
17359 u32 *p = buf, i;
17360
17361 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17362 p[i] = i;
17363
17364 /* Send the buffer to the chip. */
17365 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17366 if (ret) {
17367 dev_err(&tp->pdev->dev,
17368 "%s: Buffer write failed. err = %d\n",
17369 __func__, ret);
17370 break;
17371 }
17372
17373 /* Now read it back. */
17374 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17375 if (ret) {
17376 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17377 "err = %d\n", __func__, ret);
17378 break;
17379 }
17380
17381 /* Verify it. */
17382 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17383 if (p[i] == i)
17384 continue;
17385
17386 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17387 DMA_RWCTRL_WRITE_BNDRY_16) {
17388 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17389 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17390 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17391 break;
17392 } else {
17393 dev_err(&tp->pdev->dev,
17394 "%s: Buffer corrupted on read back! "
17395 "(%d != %d)\n", __func__, p[i], i);
17396 ret = -ENODEV;
17397 goto out;
17398 }
17399 }
17400
17401 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17402 /* Success. */
17403 ret = 0;
17404 break;
17405 }
17406 }
17407 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17408 DMA_RWCTRL_WRITE_BNDRY_16) {
17409 /* DMA test passed without adjusting DMA boundary,
17410 * now look for chipsets that are known to expose the
17411 * DMA bug without failing the test.
17412 */
17413 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17414 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17415 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17416 } else {
17417 /* Safe to use the calculated DMA boundary. */
17418 tp->dma_rwctrl = saved_dma_rwctrl;
17419 }
17420
17421 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17422 }
17423
17424 out:
17425 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17426 out_nofree:
17427 return ret;
17428 }
17429
tg3_init_bufmgr_config(struct tg3 * tp)17430 static void tg3_init_bufmgr_config(struct tg3 *tp)
17431 {
17432 if (tg3_flag(tp, 57765_PLUS)) {
17433 tp->bufmgr_config.mbuf_read_dma_low_water =
17434 DEFAULT_MB_RDMA_LOW_WATER_5705;
17435 tp->bufmgr_config.mbuf_mac_rx_low_water =
17436 DEFAULT_MB_MACRX_LOW_WATER_57765;
17437 tp->bufmgr_config.mbuf_high_water =
17438 DEFAULT_MB_HIGH_WATER_57765;
17439
17440 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17441 DEFAULT_MB_RDMA_LOW_WATER_5705;
17442 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17443 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17444 tp->bufmgr_config.mbuf_high_water_jumbo =
17445 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17446 } else if (tg3_flag(tp, 5705_PLUS)) {
17447 tp->bufmgr_config.mbuf_read_dma_low_water =
17448 DEFAULT_MB_RDMA_LOW_WATER_5705;
17449 tp->bufmgr_config.mbuf_mac_rx_low_water =
17450 DEFAULT_MB_MACRX_LOW_WATER_5705;
17451 tp->bufmgr_config.mbuf_high_water =
17452 DEFAULT_MB_HIGH_WATER_5705;
17453 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17454 tp->bufmgr_config.mbuf_mac_rx_low_water =
17455 DEFAULT_MB_MACRX_LOW_WATER_5906;
17456 tp->bufmgr_config.mbuf_high_water =
17457 DEFAULT_MB_HIGH_WATER_5906;
17458 }
17459
17460 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17461 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17462 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17463 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17464 tp->bufmgr_config.mbuf_high_water_jumbo =
17465 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17466 } else {
17467 tp->bufmgr_config.mbuf_read_dma_low_water =
17468 DEFAULT_MB_RDMA_LOW_WATER;
17469 tp->bufmgr_config.mbuf_mac_rx_low_water =
17470 DEFAULT_MB_MACRX_LOW_WATER;
17471 tp->bufmgr_config.mbuf_high_water =
17472 DEFAULT_MB_HIGH_WATER;
17473
17474 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17475 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17476 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17477 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17478 tp->bufmgr_config.mbuf_high_water_jumbo =
17479 DEFAULT_MB_HIGH_WATER_JUMBO;
17480 }
17481
17482 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17483 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17484 }
17485
tg3_phy_string(struct tg3 * tp)17486 static char *tg3_phy_string(struct tg3 *tp)
17487 {
17488 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17489 case TG3_PHY_ID_BCM5400: return "5400";
17490 case TG3_PHY_ID_BCM5401: return "5401";
17491 case TG3_PHY_ID_BCM5411: return "5411";
17492 case TG3_PHY_ID_BCM5701: return "5701";
17493 case TG3_PHY_ID_BCM5703: return "5703";
17494 case TG3_PHY_ID_BCM5704: return "5704";
17495 case TG3_PHY_ID_BCM5705: return "5705";
17496 case TG3_PHY_ID_BCM5750: return "5750";
17497 case TG3_PHY_ID_BCM5752: return "5752";
17498 case TG3_PHY_ID_BCM5714: return "5714";
17499 case TG3_PHY_ID_BCM5780: return "5780";
17500 case TG3_PHY_ID_BCM5755: return "5755";
17501 case TG3_PHY_ID_BCM5787: return "5787";
17502 case TG3_PHY_ID_BCM5784: return "5784";
17503 case TG3_PHY_ID_BCM5756: return "5722/5756";
17504 case TG3_PHY_ID_BCM5906: return "5906";
17505 case TG3_PHY_ID_BCM5761: return "5761";
17506 case TG3_PHY_ID_BCM5718C: return "5718C";
17507 case TG3_PHY_ID_BCM5718S: return "5718S";
17508 case TG3_PHY_ID_BCM57765: return "57765";
17509 case TG3_PHY_ID_BCM5719C: return "5719C";
17510 case TG3_PHY_ID_BCM5720C: return "5720C";
17511 case TG3_PHY_ID_BCM5762: return "5762C";
17512 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17513 case 0: return "serdes";
17514 default: return "unknown";
17515 }
17516 }
17517
tg3_bus_string(struct tg3 * tp,char * str)17518 static char *tg3_bus_string(struct tg3 *tp, char *str)
17519 {
17520 if (tg3_flag(tp, PCI_EXPRESS)) {
17521 strcpy(str, "PCI Express");
17522 return str;
17523 } else if (tg3_flag(tp, PCIX_MODE)) {
17524 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17525
17526 strcpy(str, "PCIX:");
17527
17528 if ((clock_ctrl == 7) ||
17529 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17530 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17531 strcat(str, "133MHz");
17532 else if (clock_ctrl == 0)
17533 strcat(str, "33MHz");
17534 else if (clock_ctrl == 2)
17535 strcat(str, "50MHz");
17536 else if (clock_ctrl == 4)
17537 strcat(str, "66MHz");
17538 else if (clock_ctrl == 6)
17539 strcat(str, "100MHz");
17540 } else {
17541 strcpy(str, "PCI:");
17542 if (tg3_flag(tp, PCI_HIGH_SPEED))
17543 strcat(str, "66MHz");
17544 else
17545 strcat(str, "33MHz");
17546 }
17547 if (tg3_flag(tp, PCI_32BIT))
17548 strcat(str, ":32-bit");
17549 else
17550 strcat(str, ":64-bit");
17551 return str;
17552 }
17553
tg3_init_coal(struct tg3 * tp)17554 static void tg3_init_coal(struct tg3 *tp)
17555 {
17556 struct ethtool_coalesce *ec = &tp->coal;
17557
17558 memset(ec, 0, sizeof(*ec));
17559 ec->cmd = ETHTOOL_GCOALESCE;
17560 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17561 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17562 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17563 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17564 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17565 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17566 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17567 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17568 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17569
17570 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17571 HOSTCC_MODE_CLRTICK_TXBD)) {
17572 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17573 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17574 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17575 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17576 }
17577
17578 if (tg3_flag(tp, 5705_PLUS)) {
17579 ec->rx_coalesce_usecs_irq = 0;
17580 ec->tx_coalesce_usecs_irq = 0;
17581 ec->stats_block_coalesce_usecs = 0;
17582 }
17583 }
17584
tg3_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)17585 static int tg3_init_one(struct pci_dev *pdev,
17586 const struct pci_device_id *ent)
17587 {
17588 struct net_device *dev;
17589 struct tg3 *tp;
17590 int i, err;
17591 u32 sndmbx, rcvmbx, intmbx;
17592 char str[40];
17593 u64 dma_mask, persist_dma_mask;
17594 netdev_features_t features = 0;
17595 u8 addr[ETH_ALEN] __aligned(2);
17596
17597 err = pci_enable_device(pdev);
17598 if (err) {
17599 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17600 return err;
17601 }
17602
17603 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17604 if (err) {
17605 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17606 goto err_out_disable_pdev;
17607 }
17608
17609 pci_set_master(pdev);
17610
17611 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17612 if (!dev) {
17613 err = -ENOMEM;
17614 goto err_out_free_res;
17615 }
17616
17617 SET_NETDEV_DEV(dev, &pdev->dev);
17618
17619 tp = netdev_priv(dev);
17620 tp->pdev = pdev;
17621 tp->dev = dev;
17622 tp->rx_mode = TG3_DEF_RX_MODE;
17623 tp->tx_mode = TG3_DEF_TX_MODE;
17624 tp->irq_sync = 1;
17625 tp->pcierr_recovery = false;
17626
17627 if (tg3_debug > 0)
17628 tp->msg_enable = tg3_debug;
17629 else
17630 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17631
17632 if (pdev_is_ssb_gige_core(pdev)) {
17633 tg3_flag_set(tp, IS_SSB_CORE);
17634 if (ssb_gige_must_flush_posted_writes(pdev))
17635 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17636 if (ssb_gige_one_dma_at_once(pdev))
17637 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17638 if (ssb_gige_have_roboswitch(pdev)) {
17639 tg3_flag_set(tp, USE_PHYLIB);
17640 tg3_flag_set(tp, ROBOSWITCH);
17641 }
17642 if (ssb_gige_is_rgmii(pdev))
17643 tg3_flag_set(tp, RGMII_MODE);
17644 }
17645
17646 /* The word/byte swap controls here control register access byte
17647 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17648 * setting below.
17649 */
17650 tp->misc_host_ctrl =
17651 MISC_HOST_CTRL_MASK_PCI_INT |
17652 MISC_HOST_CTRL_WORD_SWAP |
17653 MISC_HOST_CTRL_INDIR_ACCESS |
17654 MISC_HOST_CTRL_PCISTATE_RW;
17655
17656 /* The NONFRM (non-frame) byte/word swap controls take effect
17657 * on descriptor entries, anything which isn't packet data.
17658 *
17659 * The StrongARM chips on the board (one for tx, one for rx)
17660 * are running in big-endian mode.
17661 */
17662 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17663 GRC_MODE_WSWAP_NONFRM_DATA);
17664 #ifdef __BIG_ENDIAN
17665 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17666 #endif
17667 spin_lock_init(&tp->lock);
17668 spin_lock_init(&tp->indirect_lock);
17669 INIT_WORK(&tp->reset_task, tg3_reset_task);
17670
17671 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17672 if (!tp->regs) {
17673 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17674 err = -ENOMEM;
17675 goto err_out_free_dev;
17676 }
17677
17678 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17679 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17680 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17681 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17682 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17683 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17684 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17685 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17686 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17687 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17688 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17689 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17690 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17691 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17692 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17693 tg3_flag_set(tp, ENABLE_APE);
17694 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17695 if (!tp->aperegs) {
17696 dev_err(&pdev->dev,
17697 "Cannot map APE registers, aborting\n");
17698 err = -ENOMEM;
17699 goto err_out_iounmap;
17700 }
17701 }
17702
17703 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17704 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17705
17706 dev->ethtool_ops = &tg3_ethtool_ops;
17707 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17708 dev->netdev_ops = &tg3_netdev_ops;
17709 dev->irq = pdev->irq;
17710
17711 err = tg3_get_invariants(tp, ent);
17712 if (err) {
17713 dev_err(&pdev->dev,
17714 "Problem fetching invariants of chip, aborting\n");
17715 goto err_out_apeunmap;
17716 }
17717
17718 /* The EPB bridge inside 5714, 5715, and 5780 and any
17719 * device behind the EPB cannot support DMA addresses > 40-bit.
17720 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17721 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17722 * do DMA address check in tg3_start_xmit().
17723 */
17724 if (tg3_flag(tp, IS_5788))
17725 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17726 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17727 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17728 #ifdef CONFIG_HIGHMEM
17729 dma_mask = DMA_BIT_MASK(64);
17730 #endif
17731 } else
17732 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17733
17734 if (tg3_asic_rev(tp) == ASIC_REV_57766)
17735 persist_dma_mask = DMA_BIT_MASK(31);
17736
17737 /* Configure DMA attributes. */
17738 if (dma_mask > DMA_BIT_MASK(32)) {
17739 err = dma_set_mask(&pdev->dev, dma_mask);
17740 if (!err) {
17741 features |= NETIF_F_HIGHDMA;
17742 err = dma_set_coherent_mask(&pdev->dev,
17743 persist_dma_mask);
17744 if (err < 0) {
17745 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17746 "DMA for consistent allocations\n");
17747 goto err_out_apeunmap;
17748 }
17749 }
17750 }
17751 if (err || dma_mask == DMA_BIT_MASK(32)) {
17752 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17753 if (err) {
17754 dev_err(&pdev->dev,
17755 "No usable DMA configuration, aborting\n");
17756 goto err_out_apeunmap;
17757 }
17758 }
17759
17760 tg3_init_bufmgr_config(tp);
17761
17762 /* 5700 B0 chips do not support checksumming correctly due
17763 * to hardware bugs.
17764 */
17765 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17766 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17767
17768 if (tg3_flag(tp, 5755_PLUS))
17769 features |= NETIF_F_IPV6_CSUM;
17770 }
17771
17772 /* TSO is on by default on chips that support hardware TSO.
17773 * Firmware TSO on older chips gives lower performance, so it
17774 * is off by default, but can be enabled using ethtool.
17775 */
17776 if ((tg3_flag(tp, HW_TSO_1) ||
17777 tg3_flag(tp, HW_TSO_2) ||
17778 tg3_flag(tp, HW_TSO_3)) &&
17779 (features & NETIF_F_IP_CSUM))
17780 features |= NETIF_F_TSO;
17781 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17782 if (features & NETIF_F_IPV6_CSUM)
17783 features |= NETIF_F_TSO6;
17784 if (tg3_flag(tp, HW_TSO_3) ||
17785 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17786 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17787 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17788 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17789 tg3_asic_rev(tp) == ASIC_REV_57780)
17790 features |= NETIF_F_TSO_ECN;
17791 }
17792
17793 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17794 NETIF_F_HW_VLAN_CTAG_RX;
17795 dev->vlan_features |= features;
17796
17797 /*
17798 * Add loopback capability only for a subset of devices that support
17799 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17800 * loopback for the remaining devices.
17801 */
17802 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17803 !tg3_flag(tp, CPMU_PRESENT))
17804 /* Add the loopback capability */
17805 features |= NETIF_F_LOOPBACK;
17806
17807 dev->hw_features |= features;
17808 dev->priv_flags |= IFF_UNICAST_FLT;
17809
17810 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17811 dev->min_mtu = TG3_MIN_MTU;
17812 dev->max_mtu = TG3_MAX_MTU(tp);
17813
17814 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17815 !tg3_flag(tp, TSO_CAPABLE) &&
17816 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17817 tg3_flag_set(tp, MAX_RXPEND_64);
17818 tp->rx_pending = 63;
17819 }
17820
17821 err = tg3_get_device_address(tp, addr);
17822 if (err) {
17823 dev_err(&pdev->dev,
17824 "Could not obtain valid ethernet address, aborting\n");
17825 goto err_out_apeunmap;
17826 }
17827 eth_hw_addr_set(dev, addr);
17828
17829 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17830 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17831 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17832 for (i = 0; i < tp->irq_max; i++) {
17833 struct tg3_napi *tnapi = &tp->napi[i];
17834
17835 tnapi->tp = tp;
17836 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17837
17838 tnapi->int_mbox = intmbx;
17839 intmbx += 0x8;
17840
17841 tnapi->consmbox = rcvmbx;
17842 tnapi->prodmbox = sndmbx;
17843
17844 if (i)
17845 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17846 else
17847 tnapi->coal_now = HOSTCC_MODE_NOW;
17848
17849 if (!tg3_flag(tp, SUPPORT_MSIX))
17850 break;
17851
17852 /*
17853 * If we support MSIX, we'll be using RSS. If we're using
17854 * RSS, the first vector only handles link interrupts and the
17855 * remaining vectors handle rx and tx interrupts. Reuse the
17856 * mailbox values for the next iteration. The values we setup
17857 * above are still useful for the single vectored mode.
17858 */
17859 if (!i)
17860 continue;
17861
17862 rcvmbx += 0x8;
17863
17864 if (sndmbx & 0x4)
17865 sndmbx -= 0x4;
17866 else
17867 sndmbx += 0xc;
17868 }
17869
17870 /*
17871 * Reset chip in case UNDI or EFI driver did not shutdown
17872 * DMA self test will enable WDMAC and we'll see (spurious)
17873 * pending DMA on the PCI bus at that point.
17874 */
17875 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17876 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17877 tg3_full_lock(tp, 0);
17878 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17879 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17880 tg3_full_unlock(tp);
17881 }
17882
17883 err = tg3_test_dma(tp);
17884 if (err) {
17885 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17886 goto err_out_apeunmap;
17887 }
17888
17889 tg3_init_coal(tp);
17890
17891 pci_set_drvdata(pdev, dev);
17892
17893 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17894 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17895 tg3_asic_rev(tp) == ASIC_REV_5762)
17896 tg3_flag_set(tp, PTP_CAPABLE);
17897
17898 tg3_timer_init(tp);
17899
17900 tg3_carrier_off(tp);
17901
17902 err = register_netdev(dev);
17903 if (err) {
17904 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17905 goto err_out_apeunmap;
17906 }
17907
17908 if (tg3_flag(tp, PTP_CAPABLE)) {
17909 tg3_ptp_init(tp);
17910 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17911 &tp->pdev->dev);
17912 if (IS_ERR(tp->ptp_clock))
17913 tp->ptp_clock = NULL;
17914 }
17915
17916 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17917 tp->board_part_number,
17918 tg3_chip_rev_id(tp),
17919 tg3_bus_string(tp, str),
17920 dev->dev_addr);
17921
17922 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17923 char *ethtype;
17924
17925 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17926 ethtype = "10/100Base-TX";
17927 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17928 ethtype = "1000Base-SX";
17929 else
17930 ethtype = "10/100/1000Base-T";
17931
17932 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17933 "(WireSpeed[%d], EEE[%d])\n",
17934 tg3_phy_string(tp), ethtype,
17935 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17936 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17937 }
17938
17939 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17940 (dev->features & NETIF_F_RXCSUM) != 0,
17941 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17942 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17943 tg3_flag(tp, ENABLE_ASF) != 0,
17944 tg3_flag(tp, TSO_CAPABLE) != 0);
17945 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17946 tp->dma_rwctrl,
17947 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17948 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17949
17950 pci_save_state(pdev);
17951
17952 return 0;
17953
17954 err_out_apeunmap:
17955 if (tp->aperegs) {
17956 iounmap(tp->aperegs);
17957 tp->aperegs = NULL;
17958 }
17959
17960 err_out_iounmap:
17961 if (tp->regs) {
17962 iounmap(tp->regs);
17963 tp->regs = NULL;
17964 }
17965
17966 err_out_free_dev:
17967 free_netdev(dev);
17968
17969 err_out_free_res:
17970 pci_release_regions(pdev);
17971
17972 err_out_disable_pdev:
17973 if (pci_is_enabled(pdev))
17974 pci_disable_device(pdev);
17975 return err;
17976 }
17977
tg3_remove_one(struct pci_dev * pdev)17978 static void tg3_remove_one(struct pci_dev *pdev)
17979 {
17980 struct net_device *dev = pci_get_drvdata(pdev);
17981
17982 if (dev) {
17983 struct tg3 *tp = netdev_priv(dev);
17984
17985 tg3_ptp_fini(tp);
17986
17987 release_firmware(tp->fw);
17988
17989 tg3_reset_task_cancel(tp);
17990
17991 if (tg3_flag(tp, USE_PHYLIB)) {
17992 tg3_phy_fini(tp);
17993 tg3_mdio_fini(tp);
17994 }
17995
17996 unregister_netdev(dev);
17997 if (tp->aperegs) {
17998 iounmap(tp->aperegs);
17999 tp->aperegs = NULL;
18000 }
18001 if (tp->regs) {
18002 iounmap(tp->regs);
18003 tp->regs = NULL;
18004 }
18005 free_netdev(dev);
18006 pci_release_regions(pdev);
18007 pci_disable_device(pdev);
18008 }
18009 }
18010
18011 #ifdef CONFIG_PM_SLEEP
tg3_suspend(struct device * device)18012 static int tg3_suspend(struct device *device)
18013 {
18014 struct net_device *dev = dev_get_drvdata(device);
18015 struct tg3 *tp = netdev_priv(dev);
18016 int err = 0;
18017
18018 rtnl_lock();
18019
18020 if (!netif_running(dev))
18021 goto unlock;
18022
18023 tg3_reset_task_cancel(tp);
18024 tg3_phy_stop(tp);
18025 tg3_netif_stop(tp);
18026
18027 tg3_timer_stop(tp);
18028
18029 tg3_full_lock(tp, 1);
18030 tg3_disable_ints(tp);
18031 tg3_full_unlock(tp);
18032
18033 netif_device_detach(dev);
18034
18035 tg3_full_lock(tp, 0);
18036 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18037 tg3_flag_clear(tp, INIT_COMPLETE);
18038 tg3_full_unlock(tp);
18039
18040 err = tg3_power_down_prepare(tp);
18041 if (err) {
18042 int err2;
18043
18044 tg3_full_lock(tp, 0);
18045
18046 tg3_flag_set(tp, INIT_COMPLETE);
18047 err2 = tg3_restart_hw(tp, true);
18048 if (err2)
18049 goto out;
18050
18051 tg3_timer_start(tp);
18052
18053 netif_device_attach(dev);
18054 tg3_netif_start(tp);
18055
18056 out:
18057 tg3_full_unlock(tp);
18058
18059 if (!err2)
18060 tg3_phy_start(tp);
18061 }
18062
18063 unlock:
18064 rtnl_unlock();
18065 return err;
18066 }
18067
tg3_resume(struct device * device)18068 static int tg3_resume(struct device *device)
18069 {
18070 struct net_device *dev = dev_get_drvdata(device);
18071 struct tg3 *tp = netdev_priv(dev);
18072 int err = 0;
18073
18074 rtnl_lock();
18075
18076 if (!netif_running(dev))
18077 goto unlock;
18078
18079 netif_device_attach(dev);
18080
18081 tg3_full_lock(tp, 0);
18082
18083 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18084
18085 tg3_flag_set(tp, INIT_COMPLETE);
18086 err = tg3_restart_hw(tp,
18087 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18088 if (err)
18089 goto out;
18090
18091 tg3_timer_start(tp);
18092
18093 tg3_netif_start(tp);
18094
18095 out:
18096 tg3_full_unlock(tp);
18097
18098 if (!err)
18099 tg3_phy_start(tp);
18100
18101 unlock:
18102 rtnl_unlock();
18103 return err;
18104 }
18105 #endif /* CONFIG_PM_SLEEP */
18106
18107 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18108
tg3_shutdown(struct pci_dev * pdev)18109 static void tg3_shutdown(struct pci_dev *pdev)
18110 {
18111 struct net_device *dev = pci_get_drvdata(pdev);
18112 struct tg3 *tp = netdev_priv(dev);
18113
18114 tg3_reset_task_cancel(tp);
18115
18116 rtnl_lock();
18117
18118 netif_device_detach(dev);
18119
18120 if (netif_running(dev))
18121 dev_close(dev);
18122
18123 if (system_state == SYSTEM_POWER_OFF)
18124 tg3_power_down(tp);
18125
18126 rtnl_unlock();
18127
18128 pci_disable_device(pdev);
18129 }
18130
18131 /**
18132 * tg3_io_error_detected - called when PCI error is detected
18133 * @pdev: Pointer to PCI device
18134 * @state: The current pci connection state
18135 *
18136 * This function is called after a PCI bus error affecting
18137 * this device has been detected.
18138 */
tg3_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)18139 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18140 pci_channel_state_t state)
18141 {
18142 struct net_device *netdev = pci_get_drvdata(pdev);
18143 struct tg3 *tp = netdev_priv(netdev);
18144 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18145
18146 netdev_info(netdev, "PCI I/O error detected\n");
18147
18148 /* Want to make sure that the reset task doesn't run */
18149 tg3_reset_task_cancel(tp);
18150
18151 rtnl_lock();
18152
18153 /* Could be second call or maybe we don't have netdev yet */
18154 if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18155 goto done;
18156
18157 /* We needn't recover from permanent error */
18158 if (state == pci_channel_io_frozen)
18159 tp->pcierr_recovery = true;
18160
18161 tg3_phy_stop(tp);
18162
18163 tg3_netif_stop(tp);
18164
18165 tg3_timer_stop(tp);
18166
18167 netif_device_detach(netdev);
18168
18169 /* Clean up software state, even if MMIO is blocked */
18170 tg3_full_lock(tp, 0);
18171 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18172 tg3_full_unlock(tp);
18173
18174 done:
18175 if (state == pci_channel_io_perm_failure) {
18176 if (netdev) {
18177 tg3_napi_enable(tp);
18178 dev_close(netdev);
18179 }
18180 err = PCI_ERS_RESULT_DISCONNECT;
18181 } else {
18182 pci_disable_device(pdev);
18183 }
18184
18185 rtnl_unlock();
18186
18187 return err;
18188 }
18189
18190 /**
18191 * tg3_io_slot_reset - called after the pci bus has been reset.
18192 * @pdev: Pointer to PCI device
18193 *
18194 * Restart the card from scratch, as if from a cold-boot.
18195 * At this point, the card has exprienced a hard reset,
18196 * followed by fixups by BIOS, and has its config space
18197 * set up identically to what it was at cold boot.
18198 */
tg3_io_slot_reset(struct pci_dev * pdev)18199 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18200 {
18201 struct net_device *netdev = pci_get_drvdata(pdev);
18202 struct tg3 *tp = netdev_priv(netdev);
18203 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18204 int err;
18205
18206 rtnl_lock();
18207
18208 if (pci_enable_device(pdev)) {
18209 dev_err(&pdev->dev,
18210 "Cannot re-enable PCI device after reset.\n");
18211 goto done;
18212 }
18213
18214 pci_set_master(pdev);
18215 pci_restore_state(pdev);
18216 pci_save_state(pdev);
18217
18218 if (!netdev || !netif_running(netdev)) {
18219 rc = PCI_ERS_RESULT_RECOVERED;
18220 goto done;
18221 }
18222
18223 err = tg3_power_up(tp);
18224 if (err)
18225 goto done;
18226
18227 rc = PCI_ERS_RESULT_RECOVERED;
18228
18229 done:
18230 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18231 tg3_napi_enable(tp);
18232 dev_close(netdev);
18233 }
18234 rtnl_unlock();
18235
18236 return rc;
18237 }
18238
18239 /**
18240 * tg3_io_resume - called when traffic can start flowing again.
18241 * @pdev: Pointer to PCI device
18242 *
18243 * This callback is called when the error recovery driver tells
18244 * us that its OK to resume normal operation.
18245 */
tg3_io_resume(struct pci_dev * pdev)18246 static void tg3_io_resume(struct pci_dev *pdev)
18247 {
18248 struct net_device *netdev = pci_get_drvdata(pdev);
18249 struct tg3 *tp = netdev_priv(netdev);
18250 int err;
18251
18252 rtnl_lock();
18253
18254 if (!netdev || !netif_running(netdev))
18255 goto done;
18256
18257 tg3_full_lock(tp, 0);
18258 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18259 tg3_flag_set(tp, INIT_COMPLETE);
18260 err = tg3_restart_hw(tp, true);
18261 if (err) {
18262 tg3_full_unlock(tp);
18263 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18264 goto done;
18265 }
18266
18267 netif_device_attach(netdev);
18268
18269 tg3_timer_start(tp);
18270
18271 tg3_netif_start(tp);
18272
18273 tg3_full_unlock(tp);
18274
18275 tg3_phy_start(tp);
18276
18277 done:
18278 tp->pcierr_recovery = false;
18279 rtnl_unlock();
18280 }
18281
18282 static const struct pci_error_handlers tg3_err_handler = {
18283 .error_detected = tg3_io_error_detected,
18284 .slot_reset = tg3_io_slot_reset,
18285 .resume = tg3_io_resume
18286 };
18287
18288 static struct pci_driver tg3_driver = {
18289 .name = DRV_MODULE_NAME,
18290 .id_table = tg3_pci_tbl,
18291 .probe = tg3_init_one,
18292 .remove = tg3_remove_one,
18293 .err_handler = &tg3_err_handler,
18294 .driver.pm = &tg3_pm_ops,
18295 .shutdown = tg3_shutdown,
18296 };
18297
18298 module_pci_driver(tg3_driver);
18299