1 /* 2 * tg3.c: Broadcom Tigon3 ethernet driver. 3 * 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 6 * Copyright (C) 2004 Sun Microsystems Inc. 7 * Copyright (C) 2005-2016 Broadcom Corporation. 8 * Copyright (C) 2016-2017 Broadcom Limited. 9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" 10 * refers to Broadcom Inc. and/or its subsidiaries. 11 * 12 * Firmware is: 13 * Derived from proprietary unpublished source code, 14 * Copyright (C) 2000-2016 Broadcom Corporation. 15 * Copyright (C) 2016-2017 Broadcom Ltd. 16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" 17 * refers to Broadcom Inc. and/or its subsidiaries. 18 * 19 * Permission is hereby granted for the distribution of this firmware 20 * data in hexadecimal or equivalent format, provided this copyright 21 * notice is accompanying it. 22 */ 23 24 25 #include <linux/module.h> 26 #include <linux/moduleparam.h> 27 #include <linux/stringify.h> 28 #include <linux/kernel.h> 29 #include <linux/sched/signal.h> 30 #include <linux/types.h> 31 #include <linux/compiler.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/in.h> 35 #include <linux/interrupt.h> 36 #include <linux/ioport.h> 37 #include <linux/pci.h> 38 #include <linux/netdevice.h> 39 #include <linux/etherdevice.h> 40 #include <linux/skbuff.h> 41 #include <linux/ethtool.h> 42 #include <linux/mdio.h> 43 #include <linux/mii.h> 44 #include <linux/phy.h> 45 #include <linux/brcmphy.h> 46 #include <linux/if.h> 47 #include <linux/if_vlan.h> 48 #include <linux/ip.h> 49 #include <linux/tcp.h> 50 #include <linux/workqueue.h> 51 #include <linux/prefetch.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/firmware.h> 54 #include <linux/ssb/ssb_driver_gige.h> 55 #include <linux/hwmon.h> 56 #include <linux/hwmon-sysfs.h> 57 #include <linux/crc32poly.h> 58 59 #include <net/checksum.h> 60 #include <net/ip.h> 61 62 #include <linux/io.h> 63 #include <asm/byteorder.h> 64 #include <linux/uaccess.h> 65 66 #include <uapi/linux/net_tstamp.h> 67 #include <linux/ptp_clock_kernel.h> 68 69 #define BAR_0 0 70 #define BAR_2 2 71 72 #include "tg3.h" 73 74 /* Functions & macros to verify TG3_FLAGS types */ 75 76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) 77 { 78 return test_bit(flag, bits); 79 } 80 81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) 82 { 83 set_bit(flag, bits); 84 } 85 86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) 87 { 88 clear_bit(flag, bits); 89 } 90 91 #define tg3_flag(tp, flag) \ 92 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) 93 #define tg3_flag_set(tp, flag) \ 94 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) 95 #define tg3_flag_clear(tp, flag) \ 96 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) 97 98 #define DRV_MODULE_NAME "tg3" 99 #define TG3_MAJ_NUM 3 100 #define TG3_MIN_NUM 137 101 #define DRV_MODULE_VERSION \ 102 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 103 #define DRV_MODULE_RELDATE "May 11, 2014" 104 105 #define RESET_KIND_SHUTDOWN 0 106 #define RESET_KIND_INIT 1 107 #define RESET_KIND_SUSPEND 2 108 109 #define TG3_DEF_RX_MODE 0 110 #define TG3_DEF_TX_MODE 0 111 #define TG3_DEF_MSG_ENABLE \ 112 (NETIF_MSG_DRV | \ 113 NETIF_MSG_PROBE | \ 114 NETIF_MSG_LINK | \ 115 NETIF_MSG_TIMER | \ 116 NETIF_MSG_IFDOWN | \ 117 NETIF_MSG_IFUP | \ 118 NETIF_MSG_RX_ERR | \ 119 NETIF_MSG_TX_ERR) 120 121 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100 122 123 /* length of time before we decide the hardware is borked, 124 * and dev->tx_timeout() should be called to fix the problem 125 */ 126 127 #define TG3_TX_TIMEOUT (5 * HZ) 128 129 /* hardware minimum and maximum for a single frame's data payload */ 130 #define TG3_MIN_MTU ETH_ZLEN 131 #define TG3_MAX_MTU(tp) \ 132 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) 133 134 /* These numbers seem to be hard coded in the NIC firmware somehow. 135 * You can't change the ring sizes, but you can change where you place 136 * them in the NIC onboard memory. 137 */ 138 #define TG3_RX_STD_RING_SIZE(tp) \ 139 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 140 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) 141 #define TG3_DEF_RX_RING_PENDING 200 142 #define TG3_RX_JMB_RING_SIZE(tp) \ 143 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 144 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) 145 #define TG3_DEF_RX_JUMBO_RING_PENDING 100 146 147 /* Do not place this n-ring entries value into the tp struct itself, 148 * we really want to expose these constants to GCC so that modulo et 149 * al. operations are done with shifts and masks instead of with 150 * hw multiply/modulo instructions. Another solution would be to 151 * replace things like '% foo' with '& (foo - 1)'. 152 */ 153 154 #define TG3_TX_RING_SIZE 512 155 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) 156 157 #define TG3_RX_STD_RING_BYTES(tp) \ 158 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) 159 #define TG3_RX_JMB_RING_BYTES(tp) \ 160 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) 161 #define TG3_RX_RCB_RING_BYTES(tp) \ 162 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) 163 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 164 TG3_TX_RING_SIZE) 165 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 166 167 #define TG3_DMA_BYTE_ENAB 64 168 169 #define TG3_RX_STD_DMA_SZ 1536 170 #define TG3_RX_JMB_DMA_SZ 9046 171 172 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) 173 174 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) 175 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 176 177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \ 178 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) 179 180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ 181 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) 182 183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses 184 * that are at least dword aligned when used in PCIX mode. The driver 185 * works around this bug by double copying the packet. This workaround 186 * is built into the normal double copy length check for efficiency. 187 * 188 * However, the double copy is only necessary on those architectures 189 * where unaligned memory accesses are inefficient. For those architectures 190 * where unaligned memory accesses incur little penalty, we can reintegrate 191 * the 5701 in the normal rx path. Doing so saves a device structure 192 * dereference by hardcoding the double copy threshold in place. 193 */ 194 #define TG3_RX_COPY_THRESHOLD 256 195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 196 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD 197 #else 198 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) 199 #endif 200 201 #if (NET_IP_ALIGN != 0) 202 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset) 203 #else 204 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD) 205 #endif 206 207 /* minimum number of free TX descriptors required to wake up TX process */ 208 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 209 #define TG3_TX_BD_DMA_MAX_2K 2048 210 #define TG3_TX_BD_DMA_MAX_4K 4096 211 212 #define TG3_RAW_IP_ALIGN 2 213 214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3) 215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1) 216 217 #define TG3_FW_UPDATE_TIMEOUT_SEC 5 218 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2) 219 220 #define FIRMWARE_TG3 "tigon/tg3.bin" 221 #define FIRMWARE_TG357766 "tigon/tg357766.bin" 222 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" 223 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" 224 225 static char version[] = 226 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; 227 228 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); 229 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); 230 MODULE_LICENSE("GPL"); 231 MODULE_VERSION(DRV_MODULE_VERSION); 232 MODULE_FIRMWARE(FIRMWARE_TG3); 233 MODULE_FIRMWARE(FIRMWARE_TG3TSO); 234 MODULE_FIRMWARE(FIRMWARE_TG3TSO5); 235 236 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ 237 module_param(tg3_debug, int, 0); 238 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 239 240 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001 241 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002 242 243 static const struct pci_device_id tg3_pci_tbl[] = { 244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, 245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, 246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, 247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, 248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, 249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, 250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, 251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, 252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, 253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, 254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, 255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, 256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, 257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, 258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, 259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, 260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, 261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, 262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901), 263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 264 TG3_DRV_DATA_FLAG_5705_10_100}, 265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2), 266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 267 TG3_DRV_DATA_FLAG_5705_10_100}, 268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, 269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F), 270 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 271 TG3_DRV_DATA_FLAG_5705_10_100}, 272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, 273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, 274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, 275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, 276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, 277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F), 278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, 280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, 281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, 282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, 283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F), 284 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, 286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, 287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, 288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, 289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, 290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, 291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, 292 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M, 293 PCI_VENDOR_ID_LENOVO, 294 TG3PCI_SUBDEVICE_ID_LENOVO_5787M), 295 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, 297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F), 298 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, 300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, 301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, 302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, 303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, 304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, 305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, 306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, 307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, 308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, 309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, 310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, 311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, 312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, 314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, 315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, 316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, 317 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 318 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A), 319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 320 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 321 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B), 322 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, 324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, 325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790), 326 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, 329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)}, 330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, 331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, 332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, 333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, 334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, 335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791), 336 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795), 338 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, 340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, 341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)}, 342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)}, 343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)}, 344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)}, 345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)}, 346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)}, 347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)}, 348 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)}, 349 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)}, 350 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)}, 351 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 352 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 354 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, 355 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, 356 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, 357 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, 358 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ 359 {} 360 }; 361 362 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); 363 364 static const struct { 365 const char string[ETH_GSTRING_LEN]; 366 } ethtool_stats_keys[] = { 367 { "rx_octets" }, 368 { "rx_fragments" }, 369 { "rx_ucast_packets" }, 370 { "rx_mcast_packets" }, 371 { "rx_bcast_packets" }, 372 { "rx_fcs_errors" }, 373 { "rx_align_errors" }, 374 { "rx_xon_pause_rcvd" }, 375 { "rx_xoff_pause_rcvd" }, 376 { "rx_mac_ctrl_rcvd" }, 377 { "rx_xoff_entered" }, 378 { "rx_frame_too_long_errors" }, 379 { "rx_jabbers" }, 380 { "rx_undersize_packets" }, 381 { "rx_in_length_errors" }, 382 { "rx_out_length_errors" }, 383 { "rx_64_or_less_octet_packets" }, 384 { "rx_65_to_127_octet_packets" }, 385 { "rx_128_to_255_octet_packets" }, 386 { "rx_256_to_511_octet_packets" }, 387 { "rx_512_to_1023_octet_packets" }, 388 { "rx_1024_to_1522_octet_packets" }, 389 { "rx_1523_to_2047_octet_packets" }, 390 { "rx_2048_to_4095_octet_packets" }, 391 { "rx_4096_to_8191_octet_packets" }, 392 { "rx_8192_to_9022_octet_packets" }, 393 394 { "tx_octets" }, 395 { "tx_collisions" }, 396 397 { "tx_xon_sent" }, 398 { "tx_xoff_sent" }, 399 { "tx_flow_control" }, 400 { "tx_mac_errors" }, 401 { "tx_single_collisions" }, 402 { "tx_mult_collisions" }, 403 { "tx_deferred" }, 404 { "tx_excessive_collisions" }, 405 { "tx_late_collisions" }, 406 { "tx_collide_2times" }, 407 { "tx_collide_3times" }, 408 { "tx_collide_4times" }, 409 { "tx_collide_5times" }, 410 { "tx_collide_6times" }, 411 { "tx_collide_7times" }, 412 { "tx_collide_8times" }, 413 { "tx_collide_9times" }, 414 { "tx_collide_10times" }, 415 { "tx_collide_11times" }, 416 { "tx_collide_12times" }, 417 { "tx_collide_13times" }, 418 { "tx_collide_14times" }, 419 { "tx_collide_15times" }, 420 { "tx_ucast_packets" }, 421 { "tx_mcast_packets" }, 422 { "tx_bcast_packets" }, 423 { "tx_carrier_sense_errors" }, 424 { "tx_discards" }, 425 { "tx_errors" }, 426 427 { "dma_writeq_full" }, 428 { "dma_write_prioq_full" }, 429 { "rxbds_empty" }, 430 { "rx_discards" }, 431 { "rx_errors" }, 432 { "rx_threshold_hit" }, 433 434 { "dma_readq_full" }, 435 { "dma_read_prioq_full" }, 436 { "tx_comp_queue_full" }, 437 438 { "ring_set_send_prod_index" }, 439 { "ring_status_update" }, 440 { "nic_irqs" }, 441 { "nic_avoided_irqs" }, 442 { "nic_tx_threshold_hit" }, 443 444 { "mbuf_lwm_thresh_hit" }, 445 }; 446 447 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) 448 #define TG3_NVRAM_TEST 0 449 #define TG3_LINK_TEST 1 450 #define TG3_REGISTER_TEST 2 451 #define TG3_MEMORY_TEST 3 452 #define TG3_MAC_LOOPB_TEST 4 453 #define TG3_PHY_LOOPB_TEST 5 454 #define TG3_EXT_LOOPB_TEST 6 455 #define TG3_INTERRUPT_TEST 7 456 457 458 static const struct { 459 const char string[ETH_GSTRING_LEN]; 460 } ethtool_test_keys[] = { 461 [TG3_NVRAM_TEST] = { "nvram test (online) " }, 462 [TG3_LINK_TEST] = { "link test (online) " }, 463 [TG3_REGISTER_TEST] = { "register test (offline)" }, 464 [TG3_MEMORY_TEST] = { "memory test (offline)" }, 465 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" }, 466 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" }, 467 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" }, 468 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" }, 469 }; 470 471 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) 472 473 474 static void tg3_write32(struct tg3 *tp, u32 off, u32 val) 475 { 476 writel(val, tp->regs + off); 477 } 478 479 static u32 tg3_read32(struct tg3 *tp, u32 off) 480 { 481 return readl(tp->regs + off); 482 } 483 484 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) 485 { 486 writel(val, tp->aperegs + off); 487 } 488 489 static u32 tg3_ape_read32(struct tg3 *tp, u32 off) 490 { 491 return readl(tp->aperegs + off); 492 } 493 494 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 495 { 496 unsigned long flags; 497 498 spin_lock_irqsave(&tp->indirect_lock, flags); 499 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 500 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 501 spin_unlock_irqrestore(&tp->indirect_lock, flags); 502 } 503 504 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) 505 { 506 writel(val, tp->regs + off); 507 readl(tp->regs + off); 508 } 509 510 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) 511 { 512 unsigned long flags; 513 u32 val; 514 515 spin_lock_irqsave(&tp->indirect_lock, flags); 516 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 517 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 518 spin_unlock_irqrestore(&tp->indirect_lock, flags); 519 return val; 520 } 521 522 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) 523 { 524 unsigned long flags; 525 526 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { 527 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + 528 TG3_64BIT_REG_LOW, val); 529 return; 530 } 531 if (off == TG3_RX_STD_PROD_IDX_REG) { 532 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + 533 TG3_64BIT_REG_LOW, val); 534 return; 535 } 536 537 spin_lock_irqsave(&tp->indirect_lock, flags); 538 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 539 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 540 spin_unlock_irqrestore(&tp->indirect_lock, flags); 541 542 /* In indirect mode when disabling interrupts, we also need 543 * to clear the interrupt bit in the GRC local ctrl register. 544 */ 545 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && 546 (val == 0x1)) { 547 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, 548 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); 549 } 550 } 551 552 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) 553 { 554 unsigned long flags; 555 u32 val; 556 557 spin_lock_irqsave(&tp->indirect_lock, flags); 558 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 559 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 560 spin_unlock_irqrestore(&tp->indirect_lock, flags); 561 return val; 562 } 563 564 /* usec_wait specifies the wait time in usec when writing to certain registers 565 * where it is unsafe to read back the register without some delay. 566 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. 567 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. 568 */ 569 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) 570 { 571 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) 572 /* Non-posted methods */ 573 tp->write32(tp, off, val); 574 else { 575 /* Posted method */ 576 tg3_write32(tp, off, val); 577 if (usec_wait) 578 udelay(usec_wait); 579 tp->read32(tp, off); 580 } 581 /* Wait again after the read for the posted method to guarantee that 582 * the wait time is met. 583 */ 584 if (usec_wait) 585 udelay(usec_wait); 586 } 587 588 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) 589 { 590 tp->write32_mbox(tp, off, val); 591 if (tg3_flag(tp, FLUSH_POSTED_WRITES) || 592 (!tg3_flag(tp, MBOX_WRITE_REORDER) && 593 !tg3_flag(tp, ICH_WORKAROUND))) 594 tp->read32_mbox(tp, off); 595 } 596 597 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) 598 { 599 void __iomem *mbox = tp->regs + off; 600 writel(val, mbox); 601 if (tg3_flag(tp, TXD_MBOX_HWBUG)) 602 writel(val, mbox); 603 if (tg3_flag(tp, MBOX_WRITE_REORDER) || 604 tg3_flag(tp, FLUSH_POSTED_WRITES)) 605 readl(mbox); 606 } 607 608 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) 609 { 610 return readl(tp->regs + off + GRCMBOX_BASE); 611 } 612 613 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) 614 { 615 writel(val, tp->regs + off + GRCMBOX_BASE); 616 } 617 618 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) 619 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) 620 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) 621 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) 622 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg) 623 624 #define tw32(reg, val) tp->write32(tp, reg, val) 625 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) 626 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) 627 #define tr32(reg) tp->read32(tp, reg) 628 629 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 630 { 631 unsigned long flags; 632 633 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 634 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) 635 return; 636 637 spin_lock_irqsave(&tp->indirect_lock, flags); 638 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 639 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 640 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 641 642 /* Always leave this as zero. */ 643 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 644 } else { 645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 646 tw32_f(TG3PCI_MEM_WIN_DATA, val); 647 648 /* Always leave this as zero. */ 649 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 650 } 651 spin_unlock_irqrestore(&tp->indirect_lock, flags); 652 } 653 654 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 655 { 656 unsigned long flags; 657 658 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 659 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { 660 *val = 0; 661 return; 662 } 663 664 spin_lock_irqsave(&tp->indirect_lock, flags); 665 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 666 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 667 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 668 669 /* Always leave this as zero. */ 670 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 671 } else { 672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 673 *val = tr32(TG3PCI_MEM_WIN_DATA); 674 675 /* Always leave this as zero. */ 676 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 677 } 678 spin_unlock_irqrestore(&tp->indirect_lock, flags); 679 } 680 681 static void tg3_ape_lock_init(struct tg3 *tp) 682 { 683 int i; 684 u32 regbase, bit; 685 686 if (tg3_asic_rev(tp) == ASIC_REV_5761) 687 regbase = TG3_APE_LOCK_GRANT; 688 else 689 regbase = TG3_APE_PER_LOCK_GRANT; 690 691 /* Make sure the driver hasn't any stale locks. */ 692 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) { 693 switch (i) { 694 case TG3_APE_LOCK_PHY0: 695 case TG3_APE_LOCK_PHY1: 696 case TG3_APE_LOCK_PHY2: 697 case TG3_APE_LOCK_PHY3: 698 bit = APE_LOCK_GRANT_DRIVER; 699 break; 700 default: 701 if (!tp->pci_fn) 702 bit = APE_LOCK_GRANT_DRIVER; 703 else 704 bit = 1 << tp->pci_fn; 705 } 706 tg3_ape_write32(tp, regbase + 4 * i, bit); 707 } 708 709 } 710 711 static int tg3_ape_lock(struct tg3 *tp, int locknum) 712 { 713 int i, off; 714 int ret = 0; 715 u32 status, req, gnt, bit; 716 717 if (!tg3_flag(tp, ENABLE_APE)) 718 return 0; 719 720 switch (locknum) { 721 case TG3_APE_LOCK_GPIO: 722 if (tg3_asic_rev(tp) == ASIC_REV_5761) 723 return 0; 724 /* fall through */ 725 case TG3_APE_LOCK_GRC: 726 case TG3_APE_LOCK_MEM: 727 if (!tp->pci_fn) 728 bit = APE_LOCK_REQ_DRIVER; 729 else 730 bit = 1 << tp->pci_fn; 731 break; 732 case TG3_APE_LOCK_PHY0: 733 case TG3_APE_LOCK_PHY1: 734 case TG3_APE_LOCK_PHY2: 735 case TG3_APE_LOCK_PHY3: 736 bit = APE_LOCK_REQ_DRIVER; 737 break; 738 default: 739 return -EINVAL; 740 } 741 742 if (tg3_asic_rev(tp) == ASIC_REV_5761) { 743 req = TG3_APE_LOCK_REQ; 744 gnt = TG3_APE_LOCK_GRANT; 745 } else { 746 req = TG3_APE_PER_LOCK_REQ; 747 gnt = TG3_APE_PER_LOCK_GRANT; 748 } 749 750 off = 4 * locknum; 751 752 tg3_ape_write32(tp, req + off, bit); 753 754 /* Wait for up to 1 millisecond to acquire lock. */ 755 for (i = 0; i < 100; i++) { 756 status = tg3_ape_read32(tp, gnt + off); 757 if (status == bit) 758 break; 759 if (pci_channel_offline(tp->pdev)) 760 break; 761 762 udelay(10); 763 } 764 765 if (status != bit) { 766 /* Revoke the lock request. */ 767 tg3_ape_write32(tp, gnt + off, bit); 768 ret = -EBUSY; 769 } 770 771 return ret; 772 } 773 774 static void tg3_ape_unlock(struct tg3 *tp, int locknum) 775 { 776 u32 gnt, bit; 777 778 if (!tg3_flag(tp, ENABLE_APE)) 779 return; 780 781 switch (locknum) { 782 case TG3_APE_LOCK_GPIO: 783 if (tg3_asic_rev(tp) == ASIC_REV_5761) 784 return; 785 /* fall through */ 786 case TG3_APE_LOCK_GRC: 787 case TG3_APE_LOCK_MEM: 788 if (!tp->pci_fn) 789 bit = APE_LOCK_GRANT_DRIVER; 790 else 791 bit = 1 << tp->pci_fn; 792 break; 793 case TG3_APE_LOCK_PHY0: 794 case TG3_APE_LOCK_PHY1: 795 case TG3_APE_LOCK_PHY2: 796 case TG3_APE_LOCK_PHY3: 797 bit = APE_LOCK_GRANT_DRIVER; 798 break; 799 default: 800 return; 801 } 802 803 if (tg3_asic_rev(tp) == ASIC_REV_5761) 804 gnt = TG3_APE_LOCK_GRANT; 805 else 806 gnt = TG3_APE_PER_LOCK_GRANT; 807 808 tg3_ape_write32(tp, gnt + 4 * locknum, bit); 809 } 810 811 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us) 812 { 813 u32 apedata; 814 815 while (timeout_us) { 816 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) 817 return -EBUSY; 818 819 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 820 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 821 break; 822 823 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 824 825 udelay(10); 826 timeout_us -= (timeout_us > 10) ? 10 : timeout_us; 827 } 828 829 return timeout_us ? 0 : -EBUSY; 830 } 831 832 #ifdef CONFIG_TIGON3_HWMON 833 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us) 834 { 835 u32 i, apedata; 836 837 for (i = 0; i < timeout_us / 10; i++) { 838 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 839 840 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 841 break; 842 843 udelay(10); 844 } 845 846 return i == timeout_us / 10; 847 } 848 849 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, 850 u32 len) 851 { 852 int err; 853 u32 i, bufoff, msgoff, maxlen, apedata; 854 855 if (!tg3_flag(tp, APE_HAS_NCSI)) 856 return 0; 857 858 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 859 if (apedata != APE_SEG_SIG_MAGIC) 860 return -ENODEV; 861 862 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 863 if (!(apedata & APE_FW_STATUS_READY)) 864 return -EAGAIN; 865 866 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) + 867 TG3_APE_SHMEM_BASE; 868 msgoff = bufoff + 2 * sizeof(u32); 869 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN); 870 871 while (len) { 872 u32 length; 873 874 /* Cap xfer sizes to scratchpad limits. */ 875 length = (len > maxlen) ? maxlen : len; 876 len -= length; 877 878 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 879 if (!(apedata & APE_FW_STATUS_READY)) 880 return -EAGAIN; 881 882 /* Wait for up to 1 msec for APE to service previous event. */ 883 err = tg3_ape_event_lock(tp, 1000); 884 if (err) 885 return err; 886 887 apedata = APE_EVENT_STATUS_DRIVER_EVNT | 888 APE_EVENT_STATUS_SCRTCHPD_READ | 889 APE_EVENT_STATUS_EVENT_PENDING; 890 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata); 891 892 tg3_ape_write32(tp, bufoff, base_off); 893 tg3_ape_write32(tp, bufoff + sizeof(u32), length); 894 895 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 896 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 897 898 base_off += length; 899 900 if (tg3_ape_wait_for_event(tp, 30000)) 901 return -EAGAIN; 902 903 for (i = 0; length; i += 4, length -= 4) { 904 u32 val = tg3_ape_read32(tp, msgoff + i); 905 memcpy(data, &val, sizeof(u32)); 906 data++; 907 } 908 } 909 910 return 0; 911 } 912 #endif 913 914 static int tg3_ape_send_event(struct tg3 *tp, u32 event) 915 { 916 int err; 917 u32 apedata; 918 919 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 920 if (apedata != APE_SEG_SIG_MAGIC) 921 return -EAGAIN; 922 923 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 924 if (!(apedata & APE_FW_STATUS_READY)) 925 return -EAGAIN; 926 927 /* Wait for up to 20 millisecond for APE to service previous event. */ 928 err = tg3_ape_event_lock(tp, 20000); 929 if (err) 930 return err; 931 932 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, 933 event | APE_EVENT_STATUS_EVENT_PENDING); 934 935 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 936 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 937 938 return 0; 939 } 940 941 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) 942 { 943 u32 event; 944 u32 apedata; 945 946 if (!tg3_flag(tp, ENABLE_APE)) 947 return; 948 949 switch (kind) { 950 case RESET_KIND_INIT: 951 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); 952 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 953 APE_HOST_SEG_SIG_MAGIC); 954 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, 955 APE_HOST_SEG_LEN_MAGIC); 956 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); 957 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); 958 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, 959 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); 960 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, 961 APE_HOST_BEHAV_NO_PHYLOCK); 962 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, 963 TG3_APE_HOST_DRVR_STATE_START); 964 965 event = APE_EVENT_STATUS_STATE_START; 966 break; 967 case RESET_KIND_SHUTDOWN: 968 if (device_may_wakeup(&tp->pdev->dev) && 969 tg3_flag(tp, WOL_ENABLE)) { 970 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, 971 TG3_APE_HOST_WOL_SPEED_AUTO); 972 apedata = TG3_APE_HOST_DRVR_STATE_WOL; 973 } else 974 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; 975 976 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); 977 978 event = APE_EVENT_STATUS_STATE_UNLOAD; 979 break; 980 default: 981 return; 982 } 983 984 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; 985 986 tg3_ape_send_event(tp, event); 987 } 988 989 static void tg3_send_ape_heartbeat(struct tg3 *tp, 990 unsigned long interval) 991 { 992 /* Check if hb interval has exceeded */ 993 if (!tg3_flag(tp, ENABLE_APE) || 994 time_before(jiffies, tp->ape_hb_jiffies + interval)) 995 return; 996 997 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); 998 tp->ape_hb_jiffies = jiffies; 999 } 1000 1001 static void tg3_disable_ints(struct tg3 *tp) 1002 { 1003 int i; 1004 1005 tw32(TG3PCI_MISC_HOST_CTRL, 1006 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); 1007 for (i = 0; i < tp->irq_max; i++) 1008 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); 1009 } 1010 1011 static void tg3_enable_ints(struct tg3 *tp) 1012 { 1013 int i; 1014 1015 tp->irq_sync = 0; 1016 wmb(); 1017 1018 tw32(TG3PCI_MISC_HOST_CTRL, 1019 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 1020 1021 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; 1022 for (i = 0; i < tp->irq_cnt; i++) { 1023 struct tg3_napi *tnapi = &tp->napi[i]; 1024 1025 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1026 if (tg3_flag(tp, 1SHOT_MSI)) 1027 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1028 1029 tp->coal_now |= tnapi->coal_now; 1030 } 1031 1032 /* Force an initial interrupt */ 1033 if (!tg3_flag(tp, TAGGED_STATUS) && 1034 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) 1035 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 1036 else 1037 tw32(HOSTCC_MODE, tp->coal_now); 1038 1039 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); 1040 } 1041 1042 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) 1043 { 1044 struct tg3 *tp = tnapi->tp; 1045 struct tg3_hw_status *sblk = tnapi->hw_status; 1046 unsigned int work_exists = 0; 1047 1048 /* check for phy events */ 1049 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 1050 if (sblk->status & SD_STATUS_LINK_CHG) 1051 work_exists = 1; 1052 } 1053 1054 /* check for TX work to do */ 1055 if (sblk->idx[0].tx_consumer != tnapi->tx_cons) 1056 work_exists = 1; 1057 1058 /* check for RX work to do */ 1059 if (tnapi->rx_rcb_prod_idx && 1060 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 1061 work_exists = 1; 1062 1063 return work_exists; 1064 } 1065 1066 /* tg3_int_reenable 1067 * similar to tg3_enable_ints, but it accurately determines whether there 1068 * is new work pending and can return without flushing the PIO write 1069 * which reenables interrupts 1070 */ 1071 static void tg3_int_reenable(struct tg3_napi *tnapi) 1072 { 1073 struct tg3 *tp = tnapi->tp; 1074 1075 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 1076 1077 /* When doing tagged status, this work check is unnecessary. 1078 * The last_tag we write above tells the chip which piece of 1079 * work we've completed. 1080 */ 1081 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) 1082 tw32(HOSTCC_MODE, tp->coalesce_mode | 1083 HOSTCC_MODE_ENABLE | tnapi->coal_now); 1084 } 1085 1086 static void tg3_switch_clocks(struct tg3 *tp) 1087 { 1088 u32 clock_ctrl; 1089 u32 orig_clock_ctrl; 1090 1091 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) 1092 return; 1093 1094 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); 1095 1096 orig_clock_ctrl = clock_ctrl; 1097 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | 1098 CLOCK_CTRL_CLKRUN_OENABLE | 1099 0x1f); 1100 tp->pci_clock_ctrl = clock_ctrl; 1101 1102 if (tg3_flag(tp, 5705_PLUS)) { 1103 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { 1104 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1105 clock_ctrl | CLOCK_CTRL_625_CORE, 40); 1106 } 1107 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { 1108 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1109 clock_ctrl | 1110 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), 1111 40); 1112 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1113 clock_ctrl | (CLOCK_CTRL_ALTCLK), 1114 40); 1115 } 1116 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); 1117 } 1118 1119 #define PHY_BUSY_LOOPS 5000 1120 1121 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg, 1122 u32 *val) 1123 { 1124 u32 frame_val; 1125 unsigned int loops; 1126 int ret; 1127 1128 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1129 tw32_f(MAC_MI_MODE, 1130 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1131 udelay(80); 1132 } 1133 1134 tg3_ape_lock(tp, tp->phy_ape_lock); 1135 1136 *val = 0x0; 1137 1138 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1139 MI_COM_PHY_ADDR_MASK); 1140 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1141 MI_COM_REG_ADDR_MASK); 1142 frame_val |= (MI_COM_CMD_READ | MI_COM_START); 1143 1144 tw32_f(MAC_MI_COM, frame_val); 1145 1146 loops = PHY_BUSY_LOOPS; 1147 while (loops != 0) { 1148 udelay(10); 1149 frame_val = tr32(MAC_MI_COM); 1150 1151 if ((frame_val & MI_COM_BUSY) == 0) { 1152 udelay(5); 1153 frame_val = tr32(MAC_MI_COM); 1154 break; 1155 } 1156 loops -= 1; 1157 } 1158 1159 ret = -EBUSY; 1160 if (loops != 0) { 1161 *val = frame_val & MI_COM_DATA_MASK; 1162 ret = 0; 1163 } 1164 1165 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1166 tw32_f(MAC_MI_MODE, tp->mi_mode); 1167 udelay(80); 1168 } 1169 1170 tg3_ape_unlock(tp, tp->phy_ape_lock); 1171 1172 return ret; 1173 } 1174 1175 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) 1176 { 1177 return __tg3_readphy(tp, tp->phy_addr, reg, val); 1178 } 1179 1180 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg, 1181 u32 val) 1182 { 1183 u32 frame_val; 1184 unsigned int loops; 1185 int ret; 1186 1187 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 1188 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) 1189 return 0; 1190 1191 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1192 tw32_f(MAC_MI_MODE, 1193 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1194 udelay(80); 1195 } 1196 1197 tg3_ape_lock(tp, tp->phy_ape_lock); 1198 1199 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1200 MI_COM_PHY_ADDR_MASK); 1201 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1202 MI_COM_REG_ADDR_MASK); 1203 frame_val |= (val & MI_COM_DATA_MASK); 1204 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); 1205 1206 tw32_f(MAC_MI_COM, frame_val); 1207 1208 loops = PHY_BUSY_LOOPS; 1209 while (loops != 0) { 1210 udelay(10); 1211 frame_val = tr32(MAC_MI_COM); 1212 if ((frame_val & MI_COM_BUSY) == 0) { 1213 udelay(5); 1214 frame_val = tr32(MAC_MI_COM); 1215 break; 1216 } 1217 loops -= 1; 1218 } 1219 1220 ret = -EBUSY; 1221 if (loops != 0) 1222 ret = 0; 1223 1224 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1225 tw32_f(MAC_MI_MODE, tp->mi_mode); 1226 udelay(80); 1227 } 1228 1229 tg3_ape_unlock(tp, tp->phy_ape_lock); 1230 1231 return ret; 1232 } 1233 1234 static int tg3_writephy(struct tg3 *tp, int reg, u32 val) 1235 { 1236 return __tg3_writephy(tp, tp->phy_addr, reg, val); 1237 } 1238 1239 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) 1240 { 1241 int err; 1242 1243 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1244 if (err) 1245 goto done; 1246 1247 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1248 if (err) 1249 goto done; 1250 1251 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1252 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1253 if (err) 1254 goto done; 1255 1256 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); 1257 1258 done: 1259 return err; 1260 } 1261 1262 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) 1263 { 1264 int err; 1265 1266 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1267 if (err) 1268 goto done; 1269 1270 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1271 if (err) 1272 goto done; 1273 1274 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1275 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1276 if (err) 1277 goto done; 1278 1279 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); 1280 1281 done: 1282 return err; 1283 } 1284 1285 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) 1286 { 1287 int err; 1288 1289 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1290 if (!err) 1291 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); 1292 1293 return err; 1294 } 1295 1296 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) 1297 { 1298 int err; 1299 1300 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1301 if (!err) 1302 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); 1303 1304 return err; 1305 } 1306 1307 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) 1308 { 1309 int err; 1310 1311 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 1312 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | 1313 MII_TG3_AUXCTL_SHDWSEL_MISC); 1314 if (!err) 1315 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); 1316 1317 return err; 1318 } 1319 1320 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) 1321 { 1322 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) 1323 set |= MII_TG3_AUXCTL_MISC_WREN; 1324 1325 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); 1326 } 1327 1328 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) 1329 { 1330 u32 val; 1331 int err; 1332 1333 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 1334 1335 if (err) 1336 return err; 1337 1338 if (enable) 1339 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1340 else 1341 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1342 1343 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 1344 val | MII_TG3_AUXCTL_ACTL_TX_6DB); 1345 1346 return err; 1347 } 1348 1349 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val) 1350 { 1351 return tg3_writephy(tp, MII_TG3_MISC_SHDW, 1352 reg | val | MII_TG3_MISC_SHDW_WREN); 1353 } 1354 1355 static int tg3_bmcr_reset(struct tg3 *tp) 1356 { 1357 u32 phy_control; 1358 int limit, err; 1359 1360 /* OK, reset it, and poll the BMCR_RESET bit until it 1361 * clears or we time out. 1362 */ 1363 phy_control = BMCR_RESET; 1364 err = tg3_writephy(tp, MII_BMCR, phy_control); 1365 if (err != 0) 1366 return -EBUSY; 1367 1368 limit = 5000; 1369 while (limit--) { 1370 err = tg3_readphy(tp, MII_BMCR, &phy_control); 1371 if (err != 0) 1372 return -EBUSY; 1373 1374 if ((phy_control & BMCR_RESET) == 0) { 1375 udelay(40); 1376 break; 1377 } 1378 udelay(10); 1379 } 1380 if (limit < 0) 1381 return -EBUSY; 1382 1383 return 0; 1384 } 1385 1386 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) 1387 { 1388 struct tg3 *tp = bp->priv; 1389 u32 val; 1390 1391 spin_lock_bh(&tp->lock); 1392 1393 if (__tg3_readphy(tp, mii_id, reg, &val)) 1394 val = -EIO; 1395 1396 spin_unlock_bh(&tp->lock); 1397 1398 return val; 1399 } 1400 1401 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) 1402 { 1403 struct tg3 *tp = bp->priv; 1404 u32 ret = 0; 1405 1406 spin_lock_bh(&tp->lock); 1407 1408 if (__tg3_writephy(tp, mii_id, reg, val)) 1409 ret = -EIO; 1410 1411 spin_unlock_bh(&tp->lock); 1412 1413 return ret; 1414 } 1415 1416 static void tg3_mdio_config_5785(struct tg3 *tp) 1417 { 1418 u32 val; 1419 struct phy_device *phydev; 1420 1421 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1422 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1423 case PHY_ID_BCM50610: 1424 case PHY_ID_BCM50610M: 1425 val = MAC_PHYCFG2_50610_LED_MODES; 1426 break; 1427 case PHY_ID_BCMAC131: 1428 val = MAC_PHYCFG2_AC131_LED_MODES; 1429 break; 1430 case PHY_ID_RTL8211C: 1431 val = MAC_PHYCFG2_RTL8211C_LED_MODES; 1432 break; 1433 case PHY_ID_RTL8201E: 1434 val = MAC_PHYCFG2_RTL8201E_LED_MODES; 1435 break; 1436 default: 1437 return; 1438 } 1439 1440 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { 1441 tw32(MAC_PHYCFG2, val); 1442 1443 val = tr32(MAC_PHYCFG1); 1444 val &= ~(MAC_PHYCFG1_RGMII_INT | 1445 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); 1446 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; 1447 tw32(MAC_PHYCFG1, val); 1448 1449 return; 1450 } 1451 1452 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) 1453 val |= MAC_PHYCFG2_EMODE_MASK_MASK | 1454 MAC_PHYCFG2_FMODE_MASK_MASK | 1455 MAC_PHYCFG2_GMODE_MASK_MASK | 1456 MAC_PHYCFG2_ACT_MASK_MASK | 1457 MAC_PHYCFG2_QUAL_MASK_MASK | 1458 MAC_PHYCFG2_INBAND_ENABLE; 1459 1460 tw32(MAC_PHYCFG2, val); 1461 1462 val = tr32(MAC_PHYCFG1); 1463 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | 1464 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); 1465 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1466 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1467 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; 1468 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1469 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; 1470 } 1471 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | 1472 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; 1473 tw32(MAC_PHYCFG1, val); 1474 1475 val = tr32(MAC_EXT_RGMII_MODE); 1476 val &= ~(MAC_RGMII_MODE_RX_INT_B | 1477 MAC_RGMII_MODE_RX_QUALITY | 1478 MAC_RGMII_MODE_RX_ACTIVITY | 1479 MAC_RGMII_MODE_RX_ENG_DET | 1480 MAC_RGMII_MODE_TX_ENABLE | 1481 MAC_RGMII_MODE_TX_LOWPWR | 1482 MAC_RGMII_MODE_TX_RESET); 1483 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1484 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1485 val |= MAC_RGMII_MODE_RX_INT_B | 1486 MAC_RGMII_MODE_RX_QUALITY | 1487 MAC_RGMII_MODE_RX_ACTIVITY | 1488 MAC_RGMII_MODE_RX_ENG_DET; 1489 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1490 val |= MAC_RGMII_MODE_TX_ENABLE | 1491 MAC_RGMII_MODE_TX_LOWPWR | 1492 MAC_RGMII_MODE_TX_RESET; 1493 } 1494 tw32(MAC_EXT_RGMII_MODE, val); 1495 } 1496 1497 static void tg3_mdio_start(struct tg3 *tp) 1498 { 1499 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; 1500 tw32_f(MAC_MI_MODE, tp->mi_mode); 1501 udelay(80); 1502 1503 if (tg3_flag(tp, MDIOBUS_INITED) && 1504 tg3_asic_rev(tp) == ASIC_REV_5785) 1505 tg3_mdio_config_5785(tp); 1506 } 1507 1508 static int tg3_mdio_init(struct tg3 *tp) 1509 { 1510 int i; 1511 u32 reg; 1512 struct phy_device *phydev; 1513 1514 if (tg3_flag(tp, 5717_PLUS)) { 1515 u32 is_serdes; 1516 1517 tp->phy_addr = tp->pci_fn + 1; 1518 1519 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) 1520 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; 1521 else 1522 is_serdes = tr32(TG3_CPMU_PHY_STRAP) & 1523 TG3_CPMU_PHY_STRAP_IS_SERDES; 1524 if (is_serdes) 1525 tp->phy_addr += 7; 1526 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) { 1527 int addr; 1528 1529 addr = ssb_gige_get_phyaddr(tp->pdev); 1530 if (addr < 0) 1531 return addr; 1532 tp->phy_addr = addr; 1533 } else 1534 tp->phy_addr = TG3_PHY_MII_ADDR; 1535 1536 tg3_mdio_start(tp); 1537 1538 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) 1539 return 0; 1540 1541 tp->mdio_bus = mdiobus_alloc(); 1542 if (tp->mdio_bus == NULL) 1543 return -ENOMEM; 1544 1545 tp->mdio_bus->name = "tg3 mdio bus"; 1546 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", 1547 (tp->pdev->bus->number << 8) | tp->pdev->devfn); 1548 tp->mdio_bus->priv = tp; 1549 tp->mdio_bus->parent = &tp->pdev->dev; 1550 tp->mdio_bus->read = &tg3_mdio_read; 1551 tp->mdio_bus->write = &tg3_mdio_write; 1552 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); 1553 1554 /* The bus registration will look for all the PHYs on the mdio bus. 1555 * Unfortunately, it does not ensure the PHY is powered up before 1556 * accessing the PHY ID registers. A chip reset is the 1557 * quickest way to bring the device back to an operational state.. 1558 */ 1559 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) 1560 tg3_bmcr_reset(tp); 1561 1562 i = mdiobus_register(tp->mdio_bus); 1563 if (i) { 1564 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); 1565 mdiobus_free(tp->mdio_bus); 1566 return i; 1567 } 1568 1569 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1570 1571 if (!phydev || !phydev->drv) { 1572 dev_warn(&tp->pdev->dev, "No PHY devices\n"); 1573 mdiobus_unregister(tp->mdio_bus); 1574 mdiobus_free(tp->mdio_bus); 1575 return -ENODEV; 1576 } 1577 1578 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1579 case PHY_ID_BCM57780: 1580 phydev->interface = PHY_INTERFACE_MODE_GMII; 1581 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1582 break; 1583 case PHY_ID_BCM50610: 1584 case PHY_ID_BCM50610M: 1585 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | 1586 PHY_BRCM_RX_REFCLK_UNUSED | 1587 PHY_BRCM_DIS_TXCRXC_NOENRGY | 1588 PHY_BRCM_AUTO_PWRDWN_ENABLE; 1589 if (tg3_flag(tp, RGMII_INBAND_DISABLE)) 1590 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; 1591 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1592 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; 1593 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1594 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; 1595 /* fall through */ 1596 case PHY_ID_RTL8211C: 1597 phydev->interface = PHY_INTERFACE_MODE_RGMII; 1598 break; 1599 case PHY_ID_RTL8201E: 1600 case PHY_ID_BCMAC131: 1601 phydev->interface = PHY_INTERFACE_MODE_MII; 1602 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1603 tp->phy_flags |= TG3_PHYFLG_IS_FET; 1604 break; 1605 } 1606 1607 tg3_flag_set(tp, MDIOBUS_INITED); 1608 1609 if (tg3_asic_rev(tp) == ASIC_REV_5785) 1610 tg3_mdio_config_5785(tp); 1611 1612 return 0; 1613 } 1614 1615 static void tg3_mdio_fini(struct tg3 *tp) 1616 { 1617 if (tg3_flag(tp, MDIOBUS_INITED)) { 1618 tg3_flag_clear(tp, MDIOBUS_INITED); 1619 mdiobus_unregister(tp->mdio_bus); 1620 mdiobus_free(tp->mdio_bus); 1621 } 1622 } 1623 1624 /* tp->lock is held. */ 1625 static inline void tg3_generate_fw_event(struct tg3 *tp) 1626 { 1627 u32 val; 1628 1629 val = tr32(GRC_RX_CPU_EVENT); 1630 val |= GRC_RX_CPU_DRIVER_EVENT; 1631 tw32_f(GRC_RX_CPU_EVENT, val); 1632 1633 tp->last_event_jiffies = jiffies; 1634 } 1635 1636 #define TG3_FW_EVENT_TIMEOUT_USEC 2500 1637 1638 /* tp->lock is held. */ 1639 static void tg3_wait_for_event_ack(struct tg3 *tp) 1640 { 1641 int i; 1642 unsigned int delay_cnt; 1643 long time_remain; 1644 1645 /* If enough time has passed, no wait is necessary. */ 1646 time_remain = (long)(tp->last_event_jiffies + 1 + 1647 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - 1648 (long)jiffies; 1649 if (time_remain < 0) 1650 return; 1651 1652 /* Check if we can shorten the wait time. */ 1653 delay_cnt = jiffies_to_usecs(time_remain); 1654 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) 1655 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; 1656 delay_cnt = (delay_cnt >> 3) + 1; 1657 1658 for (i = 0; i < delay_cnt; i++) { 1659 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) 1660 break; 1661 if (pci_channel_offline(tp->pdev)) 1662 break; 1663 1664 udelay(8); 1665 } 1666 } 1667 1668 /* tp->lock is held. */ 1669 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data) 1670 { 1671 u32 reg, val; 1672 1673 val = 0; 1674 if (!tg3_readphy(tp, MII_BMCR, ®)) 1675 val = reg << 16; 1676 if (!tg3_readphy(tp, MII_BMSR, ®)) 1677 val |= (reg & 0xffff); 1678 *data++ = val; 1679 1680 val = 0; 1681 if (!tg3_readphy(tp, MII_ADVERTISE, ®)) 1682 val = reg << 16; 1683 if (!tg3_readphy(tp, MII_LPA, ®)) 1684 val |= (reg & 0xffff); 1685 *data++ = val; 1686 1687 val = 0; 1688 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { 1689 if (!tg3_readphy(tp, MII_CTRL1000, ®)) 1690 val = reg << 16; 1691 if (!tg3_readphy(tp, MII_STAT1000, ®)) 1692 val |= (reg & 0xffff); 1693 } 1694 *data++ = val; 1695 1696 if (!tg3_readphy(tp, MII_PHYADDR, ®)) 1697 val = reg << 16; 1698 else 1699 val = 0; 1700 *data++ = val; 1701 } 1702 1703 /* tp->lock is held. */ 1704 static void tg3_ump_link_report(struct tg3 *tp) 1705 { 1706 u32 data[4]; 1707 1708 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) 1709 return; 1710 1711 tg3_phy_gather_ump_data(tp, data); 1712 1713 tg3_wait_for_event_ack(tp); 1714 1715 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); 1716 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); 1717 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]); 1718 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]); 1719 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]); 1720 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]); 1721 1722 tg3_generate_fw_event(tp); 1723 } 1724 1725 /* tp->lock is held. */ 1726 static void tg3_stop_fw(struct tg3 *tp) 1727 { 1728 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 1729 /* Wait for RX cpu to ACK the previous event. */ 1730 tg3_wait_for_event_ack(tp); 1731 1732 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); 1733 1734 tg3_generate_fw_event(tp); 1735 1736 /* Wait for RX cpu to ACK this event. */ 1737 tg3_wait_for_event_ack(tp); 1738 } 1739 } 1740 1741 /* tp->lock is held. */ 1742 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) 1743 { 1744 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, 1745 NIC_SRAM_FIRMWARE_MBOX_MAGIC1); 1746 1747 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1748 switch (kind) { 1749 case RESET_KIND_INIT: 1750 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1751 DRV_STATE_START); 1752 break; 1753 1754 case RESET_KIND_SHUTDOWN: 1755 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1756 DRV_STATE_UNLOAD); 1757 break; 1758 1759 case RESET_KIND_SUSPEND: 1760 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1761 DRV_STATE_SUSPEND); 1762 break; 1763 1764 default: 1765 break; 1766 } 1767 } 1768 } 1769 1770 /* tp->lock is held. */ 1771 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) 1772 { 1773 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1774 switch (kind) { 1775 case RESET_KIND_INIT: 1776 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1777 DRV_STATE_START_DONE); 1778 break; 1779 1780 case RESET_KIND_SHUTDOWN: 1781 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1782 DRV_STATE_UNLOAD_DONE); 1783 break; 1784 1785 default: 1786 break; 1787 } 1788 } 1789 } 1790 1791 /* tp->lock is held. */ 1792 static void tg3_write_sig_legacy(struct tg3 *tp, int kind) 1793 { 1794 if (tg3_flag(tp, ENABLE_ASF)) { 1795 switch (kind) { 1796 case RESET_KIND_INIT: 1797 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1798 DRV_STATE_START); 1799 break; 1800 1801 case RESET_KIND_SHUTDOWN: 1802 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1803 DRV_STATE_UNLOAD); 1804 break; 1805 1806 case RESET_KIND_SUSPEND: 1807 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1808 DRV_STATE_SUSPEND); 1809 break; 1810 1811 default: 1812 break; 1813 } 1814 } 1815 } 1816 1817 static int tg3_poll_fw(struct tg3 *tp) 1818 { 1819 int i; 1820 u32 val; 1821 1822 if (tg3_flag(tp, NO_FWARE_REPORTED)) 1823 return 0; 1824 1825 if (tg3_flag(tp, IS_SSB_CORE)) { 1826 /* We don't use firmware. */ 1827 return 0; 1828 } 1829 1830 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 1831 /* Wait up to 20ms for init done. */ 1832 for (i = 0; i < 200; i++) { 1833 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) 1834 return 0; 1835 if (pci_channel_offline(tp->pdev)) 1836 return -ENODEV; 1837 1838 udelay(100); 1839 } 1840 return -ENODEV; 1841 } 1842 1843 /* Wait for firmware initialization to complete. */ 1844 for (i = 0; i < 100000; i++) { 1845 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); 1846 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 1847 break; 1848 if (pci_channel_offline(tp->pdev)) { 1849 if (!tg3_flag(tp, NO_FWARE_REPORTED)) { 1850 tg3_flag_set(tp, NO_FWARE_REPORTED); 1851 netdev_info(tp->dev, "No firmware running\n"); 1852 } 1853 1854 break; 1855 } 1856 1857 udelay(10); 1858 } 1859 1860 /* Chip might not be fitted with firmware. Some Sun onboard 1861 * parts are configured like that. So don't signal the timeout 1862 * of the above loop as an error, but do report the lack of 1863 * running firmware once. 1864 */ 1865 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { 1866 tg3_flag_set(tp, NO_FWARE_REPORTED); 1867 1868 netdev_info(tp->dev, "No firmware running\n"); 1869 } 1870 1871 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 1872 /* The 57765 A0 needs a little more 1873 * time to do some important work. 1874 */ 1875 mdelay(10); 1876 } 1877 1878 return 0; 1879 } 1880 1881 static void tg3_link_report(struct tg3 *tp) 1882 { 1883 if (!netif_carrier_ok(tp->dev)) { 1884 netif_info(tp, link, tp->dev, "Link is down\n"); 1885 tg3_ump_link_report(tp); 1886 } else if (netif_msg_link(tp)) { 1887 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", 1888 (tp->link_config.active_speed == SPEED_1000 ? 1889 1000 : 1890 (tp->link_config.active_speed == SPEED_100 ? 1891 100 : 10)), 1892 (tp->link_config.active_duplex == DUPLEX_FULL ? 1893 "full" : "half")); 1894 1895 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", 1896 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? 1897 "on" : "off", 1898 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? 1899 "on" : "off"); 1900 1901 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 1902 netdev_info(tp->dev, "EEE is %s\n", 1903 tp->setlpicnt ? "enabled" : "disabled"); 1904 1905 tg3_ump_link_report(tp); 1906 } 1907 1908 tp->link_up = netif_carrier_ok(tp->dev); 1909 } 1910 1911 static u32 tg3_decode_flowctrl_1000T(u32 adv) 1912 { 1913 u32 flowctrl = 0; 1914 1915 if (adv & ADVERTISE_PAUSE_CAP) { 1916 flowctrl |= FLOW_CTRL_RX; 1917 if (!(adv & ADVERTISE_PAUSE_ASYM)) 1918 flowctrl |= FLOW_CTRL_TX; 1919 } else if (adv & ADVERTISE_PAUSE_ASYM) 1920 flowctrl |= FLOW_CTRL_TX; 1921 1922 return flowctrl; 1923 } 1924 1925 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) 1926 { 1927 u16 miireg; 1928 1929 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) 1930 miireg = ADVERTISE_1000XPAUSE; 1931 else if (flow_ctrl & FLOW_CTRL_TX) 1932 miireg = ADVERTISE_1000XPSE_ASYM; 1933 else if (flow_ctrl & FLOW_CTRL_RX) 1934 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; 1935 else 1936 miireg = 0; 1937 1938 return miireg; 1939 } 1940 1941 static u32 tg3_decode_flowctrl_1000X(u32 adv) 1942 { 1943 u32 flowctrl = 0; 1944 1945 if (adv & ADVERTISE_1000XPAUSE) { 1946 flowctrl |= FLOW_CTRL_RX; 1947 if (!(adv & ADVERTISE_1000XPSE_ASYM)) 1948 flowctrl |= FLOW_CTRL_TX; 1949 } else if (adv & ADVERTISE_1000XPSE_ASYM) 1950 flowctrl |= FLOW_CTRL_TX; 1951 1952 return flowctrl; 1953 } 1954 1955 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) 1956 { 1957 u8 cap = 0; 1958 1959 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) { 1960 cap = FLOW_CTRL_TX | FLOW_CTRL_RX; 1961 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) { 1962 if (lcladv & ADVERTISE_1000XPAUSE) 1963 cap = FLOW_CTRL_RX; 1964 if (rmtadv & ADVERTISE_1000XPAUSE) 1965 cap = FLOW_CTRL_TX; 1966 } 1967 1968 return cap; 1969 } 1970 1971 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) 1972 { 1973 u8 autoneg; 1974 u8 flowctrl = 0; 1975 u32 old_rx_mode = tp->rx_mode; 1976 u32 old_tx_mode = tp->tx_mode; 1977 1978 if (tg3_flag(tp, USE_PHYLIB)) 1979 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg; 1980 else 1981 autoneg = tp->link_config.autoneg; 1982 1983 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { 1984 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 1985 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); 1986 else 1987 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 1988 } else 1989 flowctrl = tp->link_config.flowctrl; 1990 1991 tp->link_config.active_flowctrl = flowctrl; 1992 1993 if (flowctrl & FLOW_CTRL_RX) 1994 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; 1995 else 1996 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; 1997 1998 if (old_rx_mode != tp->rx_mode) 1999 tw32_f(MAC_RX_MODE, tp->rx_mode); 2000 2001 if (flowctrl & FLOW_CTRL_TX) 2002 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; 2003 else 2004 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; 2005 2006 if (old_tx_mode != tp->tx_mode) 2007 tw32_f(MAC_TX_MODE, tp->tx_mode); 2008 } 2009 2010 static void tg3_adjust_link(struct net_device *dev) 2011 { 2012 u8 oldflowctrl, linkmesg = 0; 2013 u32 mac_mode, lcl_adv, rmt_adv; 2014 struct tg3 *tp = netdev_priv(dev); 2015 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2016 2017 spin_lock_bh(&tp->lock); 2018 2019 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | 2020 MAC_MODE_HALF_DUPLEX); 2021 2022 oldflowctrl = tp->link_config.active_flowctrl; 2023 2024 if (phydev->link) { 2025 lcl_adv = 0; 2026 rmt_adv = 0; 2027 2028 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) 2029 mac_mode |= MAC_MODE_PORT_MODE_MII; 2030 else if (phydev->speed == SPEED_1000 || 2031 tg3_asic_rev(tp) != ASIC_REV_5785) 2032 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2033 else 2034 mac_mode |= MAC_MODE_PORT_MODE_MII; 2035 2036 if (phydev->duplex == DUPLEX_HALF) 2037 mac_mode |= MAC_MODE_HALF_DUPLEX; 2038 else { 2039 lcl_adv = mii_advertise_flowctrl( 2040 tp->link_config.flowctrl); 2041 2042 if (phydev->pause) 2043 rmt_adv = LPA_PAUSE_CAP; 2044 if (phydev->asym_pause) 2045 rmt_adv |= LPA_PAUSE_ASYM; 2046 } 2047 2048 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 2049 } else 2050 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2051 2052 if (mac_mode != tp->mac_mode) { 2053 tp->mac_mode = mac_mode; 2054 tw32_f(MAC_MODE, tp->mac_mode); 2055 udelay(40); 2056 } 2057 2058 if (tg3_asic_rev(tp) == ASIC_REV_5785) { 2059 if (phydev->speed == SPEED_10) 2060 tw32(MAC_MI_STAT, 2061 MAC_MI_STAT_10MBPS_MODE | 2062 MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2063 else 2064 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2065 } 2066 2067 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) 2068 tw32(MAC_TX_LENGTHS, 2069 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2070 (6 << TX_LENGTHS_IPG_SHIFT) | 2071 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); 2072 else 2073 tw32(MAC_TX_LENGTHS, 2074 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2075 (6 << TX_LENGTHS_IPG_SHIFT) | 2076 (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); 2077 2078 if (phydev->link != tp->old_link || 2079 phydev->speed != tp->link_config.active_speed || 2080 phydev->duplex != tp->link_config.active_duplex || 2081 oldflowctrl != tp->link_config.active_flowctrl) 2082 linkmesg = 1; 2083 2084 tp->old_link = phydev->link; 2085 tp->link_config.active_speed = phydev->speed; 2086 tp->link_config.active_duplex = phydev->duplex; 2087 2088 spin_unlock_bh(&tp->lock); 2089 2090 if (linkmesg) 2091 tg3_link_report(tp); 2092 } 2093 2094 static int tg3_phy_init(struct tg3 *tp) 2095 { 2096 struct phy_device *phydev; 2097 2098 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) 2099 return 0; 2100 2101 /* Bring the PHY back to a known state. */ 2102 tg3_bmcr_reset(tp); 2103 2104 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2105 2106 /* Attach the MAC to the PHY. */ 2107 phydev = phy_connect(tp->dev, phydev_name(phydev), 2108 tg3_adjust_link, phydev->interface); 2109 if (IS_ERR(phydev)) { 2110 dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); 2111 return PTR_ERR(phydev); 2112 } 2113 2114 /* Mask with MAC supported features. */ 2115 switch (phydev->interface) { 2116 case PHY_INTERFACE_MODE_GMII: 2117 case PHY_INTERFACE_MODE_RGMII: 2118 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 2119 phy_set_max_speed(phydev, SPEED_1000); 2120 phy_support_asym_pause(phydev); 2121 break; 2122 } 2123 /* fall through */ 2124 case PHY_INTERFACE_MODE_MII: 2125 phy_set_max_speed(phydev, SPEED_100); 2126 phy_support_asym_pause(phydev); 2127 break; 2128 default: 2129 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2130 return -EINVAL; 2131 } 2132 2133 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; 2134 2135 phy_attached_info(phydev); 2136 2137 return 0; 2138 } 2139 2140 static void tg3_phy_start(struct tg3 *tp) 2141 { 2142 struct phy_device *phydev; 2143 2144 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2145 return; 2146 2147 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2148 2149 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 2150 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 2151 phydev->speed = tp->link_config.speed; 2152 phydev->duplex = tp->link_config.duplex; 2153 phydev->autoneg = tp->link_config.autoneg; 2154 ethtool_convert_legacy_u32_to_link_mode( 2155 phydev->advertising, tp->link_config.advertising); 2156 } 2157 2158 phy_start(phydev); 2159 2160 phy_start_aneg(phydev); 2161 } 2162 2163 static void tg3_phy_stop(struct tg3 *tp) 2164 { 2165 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2166 return; 2167 2168 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2169 } 2170 2171 static void tg3_phy_fini(struct tg3 *tp) 2172 { 2173 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 2174 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2175 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; 2176 } 2177 } 2178 2179 static int tg3_phy_set_extloopbk(struct tg3 *tp) 2180 { 2181 int err; 2182 u32 val; 2183 2184 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 2185 return 0; 2186 2187 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2188 /* Cannot do read-modify-write on 5401 */ 2189 err = tg3_phy_auxctl_write(tp, 2190 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2191 MII_TG3_AUXCTL_ACTL_EXTLOOPBK | 2192 0x4c20); 2193 goto done; 2194 } 2195 2196 err = tg3_phy_auxctl_read(tp, 2197 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2198 if (err) 2199 return err; 2200 2201 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK; 2202 err = tg3_phy_auxctl_write(tp, 2203 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val); 2204 2205 done: 2206 return err; 2207 } 2208 2209 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) 2210 { 2211 u32 phytest; 2212 2213 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 2214 u32 phy; 2215 2216 tg3_writephy(tp, MII_TG3_FET_TEST, 2217 phytest | MII_TG3_FET_SHADOW_EN); 2218 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { 2219 if (enable) 2220 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; 2221 else 2222 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; 2223 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); 2224 } 2225 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 2226 } 2227 } 2228 2229 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) 2230 { 2231 u32 reg; 2232 2233 if (!tg3_flag(tp, 5705_PLUS) || 2234 (tg3_flag(tp, 5717_PLUS) && 2235 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) 2236 return; 2237 2238 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2239 tg3_phy_fet_toggle_apd(tp, enable); 2240 return; 2241 } 2242 2243 reg = MII_TG3_MISC_SHDW_SCR5_LPED | 2244 MII_TG3_MISC_SHDW_SCR5_DLPTLM | 2245 MII_TG3_MISC_SHDW_SCR5_SDTL | 2246 MII_TG3_MISC_SHDW_SCR5_C125OE; 2247 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable) 2248 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; 2249 2250 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg); 2251 2252 2253 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS; 2254 if (enable) 2255 reg |= MII_TG3_MISC_SHDW_APD_ENABLE; 2256 2257 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg); 2258 } 2259 2260 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable) 2261 { 2262 u32 phy; 2263 2264 if (!tg3_flag(tp, 5705_PLUS) || 2265 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 2266 return; 2267 2268 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2269 u32 ephy; 2270 2271 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { 2272 u32 reg = MII_TG3_FET_SHDW_MISCCTRL; 2273 2274 tg3_writephy(tp, MII_TG3_FET_TEST, 2275 ephy | MII_TG3_FET_SHADOW_EN); 2276 if (!tg3_readphy(tp, reg, &phy)) { 2277 if (enable) 2278 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2279 else 2280 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2281 tg3_writephy(tp, reg, phy); 2282 } 2283 tg3_writephy(tp, MII_TG3_FET_TEST, ephy); 2284 } 2285 } else { 2286 int ret; 2287 2288 ret = tg3_phy_auxctl_read(tp, 2289 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); 2290 if (!ret) { 2291 if (enable) 2292 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2293 else 2294 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2295 tg3_phy_auxctl_write(tp, 2296 MII_TG3_AUXCTL_SHDWSEL_MISC, phy); 2297 } 2298 } 2299 } 2300 2301 static void tg3_phy_set_wirespeed(struct tg3 *tp) 2302 { 2303 int ret; 2304 u32 val; 2305 2306 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) 2307 return; 2308 2309 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); 2310 if (!ret) 2311 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, 2312 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); 2313 } 2314 2315 static void tg3_phy_apply_otp(struct tg3 *tp) 2316 { 2317 u32 otp, phy; 2318 2319 if (!tp->phy_otp) 2320 return; 2321 2322 otp = tp->phy_otp; 2323 2324 if (tg3_phy_toggle_auxctl_smdsp(tp, true)) 2325 return; 2326 2327 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); 2328 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; 2329 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); 2330 2331 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | 2332 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); 2333 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); 2334 2335 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); 2336 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; 2337 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); 2338 2339 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); 2340 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); 2341 2342 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); 2343 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); 2344 2345 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | 2346 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); 2347 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); 2348 2349 tg3_phy_toggle_auxctl_smdsp(tp, false); 2350 } 2351 2352 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee) 2353 { 2354 u32 val; 2355 struct ethtool_eee *dest = &tp->eee; 2356 2357 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2358 return; 2359 2360 if (eee) 2361 dest = eee; 2362 2363 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val)) 2364 return; 2365 2366 /* Pull eee_active */ 2367 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || 2368 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) { 2369 dest->eee_active = 1; 2370 } else 2371 dest->eee_active = 0; 2372 2373 /* Pull lp advertised settings */ 2374 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val)) 2375 return; 2376 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2377 2378 /* Pull advertised and eee_enabled settings */ 2379 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) 2380 return; 2381 dest->eee_enabled = !!val; 2382 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2383 2384 /* Pull tx_lpi_enabled */ 2385 val = tr32(TG3_CPMU_EEE_MODE); 2386 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX); 2387 2388 /* Pull lpi timer value */ 2389 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff; 2390 } 2391 2392 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) 2393 { 2394 u32 val; 2395 2396 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2397 return; 2398 2399 tp->setlpicnt = 0; 2400 2401 if (tp->link_config.autoneg == AUTONEG_ENABLE && 2402 current_link_up && 2403 tp->link_config.active_duplex == DUPLEX_FULL && 2404 (tp->link_config.active_speed == SPEED_100 || 2405 tp->link_config.active_speed == SPEED_1000)) { 2406 u32 eeectl; 2407 2408 if (tp->link_config.active_speed == SPEED_1000) 2409 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; 2410 else 2411 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; 2412 2413 tw32(TG3_CPMU_EEE_CTRL, eeectl); 2414 2415 tg3_eee_pull_config(tp, NULL); 2416 if (tp->eee.eee_active) 2417 tp->setlpicnt = 2; 2418 } 2419 2420 if (!tp->setlpicnt) { 2421 if (current_link_up && 2422 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2423 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); 2424 tg3_phy_toggle_auxctl_smdsp(tp, false); 2425 } 2426 2427 val = tr32(TG3_CPMU_EEE_MODE); 2428 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); 2429 } 2430 } 2431 2432 static void tg3_phy_eee_enable(struct tg3 *tp) 2433 { 2434 u32 val; 2435 2436 if (tp->link_config.active_speed == SPEED_1000 && 2437 (tg3_asic_rev(tp) == ASIC_REV_5717 || 2438 tg3_asic_rev(tp) == ASIC_REV_5719 || 2439 tg3_flag(tp, 57765_CLASS)) && 2440 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2441 val = MII_TG3_DSP_TAP26_ALNOKO | 2442 MII_TG3_DSP_TAP26_RMRXSTO; 2443 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 2444 tg3_phy_toggle_auxctl_smdsp(tp, false); 2445 } 2446 2447 val = tr32(TG3_CPMU_EEE_MODE); 2448 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); 2449 } 2450 2451 static int tg3_wait_macro_done(struct tg3 *tp) 2452 { 2453 int limit = 100; 2454 2455 while (limit--) { 2456 u32 tmp32; 2457 2458 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { 2459 if ((tmp32 & 0x1000) == 0) 2460 break; 2461 } 2462 } 2463 if (limit < 0) 2464 return -EBUSY; 2465 2466 return 0; 2467 } 2468 2469 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) 2470 { 2471 static const u32 test_pat[4][6] = { 2472 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, 2473 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, 2474 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, 2475 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } 2476 }; 2477 int chan; 2478 2479 for (chan = 0; chan < 4; chan++) { 2480 int i; 2481 2482 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2483 (chan * 0x2000) | 0x0200); 2484 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2485 2486 for (i = 0; i < 6; i++) 2487 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 2488 test_pat[chan][i]); 2489 2490 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2491 if (tg3_wait_macro_done(tp)) { 2492 *resetp = 1; 2493 return -EBUSY; 2494 } 2495 2496 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2497 (chan * 0x2000) | 0x0200); 2498 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); 2499 if (tg3_wait_macro_done(tp)) { 2500 *resetp = 1; 2501 return -EBUSY; 2502 } 2503 2504 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); 2505 if (tg3_wait_macro_done(tp)) { 2506 *resetp = 1; 2507 return -EBUSY; 2508 } 2509 2510 for (i = 0; i < 6; i += 2) { 2511 u32 low, high; 2512 2513 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || 2514 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || 2515 tg3_wait_macro_done(tp)) { 2516 *resetp = 1; 2517 return -EBUSY; 2518 } 2519 low &= 0x7fff; 2520 high &= 0x000f; 2521 if (low != test_pat[chan][i] || 2522 high != test_pat[chan][i+1]) { 2523 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); 2524 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); 2525 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); 2526 2527 return -EBUSY; 2528 } 2529 } 2530 } 2531 2532 return 0; 2533 } 2534 2535 static int tg3_phy_reset_chanpat(struct tg3 *tp) 2536 { 2537 int chan; 2538 2539 for (chan = 0; chan < 4; chan++) { 2540 int i; 2541 2542 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2543 (chan * 0x2000) | 0x0200); 2544 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2545 for (i = 0; i < 6; i++) 2546 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); 2547 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2548 if (tg3_wait_macro_done(tp)) 2549 return -EBUSY; 2550 } 2551 2552 return 0; 2553 } 2554 2555 static int tg3_phy_reset_5703_4_5(struct tg3 *tp) 2556 { 2557 u32 reg32, phy9_orig; 2558 int retries, do_phy_reset, err; 2559 2560 retries = 10; 2561 do_phy_reset = 1; 2562 do { 2563 if (do_phy_reset) { 2564 err = tg3_bmcr_reset(tp); 2565 if (err) 2566 return err; 2567 do_phy_reset = 0; 2568 } 2569 2570 /* Disable transmitter and interrupt. */ 2571 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) 2572 continue; 2573 2574 reg32 |= 0x3000; 2575 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2576 2577 /* Set full-duplex, 1000 mbps. */ 2578 tg3_writephy(tp, MII_BMCR, 2579 BMCR_FULLDPLX | BMCR_SPEED1000); 2580 2581 /* Set to master mode. */ 2582 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) 2583 continue; 2584 2585 tg3_writephy(tp, MII_CTRL1000, 2586 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 2587 2588 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 2589 if (err) 2590 return err; 2591 2592 /* Block the PHY control access. */ 2593 tg3_phydsp_write(tp, 0x8005, 0x0800); 2594 2595 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); 2596 if (!err) 2597 break; 2598 } while (--retries); 2599 2600 err = tg3_phy_reset_chanpat(tp); 2601 if (err) 2602 return err; 2603 2604 tg3_phydsp_write(tp, 0x8005, 0x0000); 2605 2606 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); 2607 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); 2608 2609 tg3_phy_toggle_auxctl_smdsp(tp, false); 2610 2611 tg3_writephy(tp, MII_CTRL1000, phy9_orig); 2612 2613 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32); 2614 if (err) 2615 return err; 2616 2617 reg32 &= ~0x3000; 2618 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2619 2620 return 0; 2621 } 2622 2623 static void tg3_carrier_off(struct tg3 *tp) 2624 { 2625 netif_carrier_off(tp->dev); 2626 tp->link_up = false; 2627 } 2628 2629 static void tg3_warn_mgmt_link_flap(struct tg3 *tp) 2630 { 2631 if (tg3_flag(tp, ENABLE_ASF)) 2632 netdev_warn(tp->dev, 2633 "Management side-band traffic will be interrupted during phy settings change\n"); 2634 } 2635 2636 /* This will reset the tigon3 PHY if there is no valid 2637 * link unless the FORCE argument is non-zero. 2638 */ 2639 static int tg3_phy_reset(struct tg3 *tp) 2640 { 2641 u32 val, cpmuctrl; 2642 int err; 2643 2644 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2645 val = tr32(GRC_MISC_CFG); 2646 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); 2647 udelay(40); 2648 } 2649 err = tg3_readphy(tp, MII_BMSR, &val); 2650 err |= tg3_readphy(tp, MII_BMSR, &val); 2651 if (err != 0) 2652 return -EBUSY; 2653 2654 if (netif_running(tp->dev) && tp->link_up) { 2655 netif_carrier_off(tp->dev); 2656 tg3_link_report(tp); 2657 } 2658 2659 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 2660 tg3_asic_rev(tp) == ASIC_REV_5704 || 2661 tg3_asic_rev(tp) == ASIC_REV_5705) { 2662 err = tg3_phy_reset_5703_4_5(tp); 2663 if (err) 2664 return err; 2665 goto out; 2666 } 2667 2668 cpmuctrl = 0; 2669 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 2670 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 2671 cpmuctrl = tr32(TG3_CPMU_CTRL); 2672 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) 2673 tw32(TG3_CPMU_CTRL, 2674 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); 2675 } 2676 2677 err = tg3_bmcr_reset(tp); 2678 if (err) 2679 return err; 2680 2681 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { 2682 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; 2683 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); 2684 2685 tw32(TG3_CPMU_CTRL, cpmuctrl); 2686 } 2687 2688 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 2689 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 2690 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 2691 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == 2692 CPMU_LSPD_1000MB_MACCLK_12_5) { 2693 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 2694 udelay(40); 2695 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 2696 } 2697 } 2698 2699 if (tg3_flag(tp, 5717_PLUS) && 2700 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) 2701 return 0; 2702 2703 tg3_phy_apply_otp(tp); 2704 2705 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 2706 tg3_phy_toggle_apd(tp, true); 2707 else 2708 tg3_phy_toggle_apd(tp, false); 2709 2710 out: 2711 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && 2712 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2713 tg3_phydsp_write(tp, 0x201f, 0x2aaa); 2714 tg3_phydsp_write(tp, 0x000a, 0x0323); 2715 tg3_phy_toggle_auxctl_smdsp(tp, false); 2716 } 2717 2718 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { 2719 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2720 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2721 } 2722 2723 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { 2724 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2725 tg3_phydsp_write(tp, 0x000a, 0x310b); 2726 tg3_phydsp_write(tp, 0x201f, 0x9506); 2727 tg3_phydsp_write(tp, 0x401f, 0x14e2); 2728 tg3_phy_toggle_auxctl_smdsp(tp, false); 2729 } 2730 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { 2731 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2732 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 2733 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { 2734 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); 2735 tg3_writephy(tp, MII_TG3_TEST1, 2736 MII_TG3_TEST1_TRIM_EN | 0x4); 2737 } else 2738 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); 2739 2740 tg3_phy_toggle_auxctl_smdsp(tp, false); 2741 } 2742 } 2743 2744 /* Set Extended packet length bit (bit 14) on all chips that */ 2745 /* support jumbo frames */ 2746 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2747 /* Cannot do read-modify-write on 5401 */ 2748 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 2749 } else if (tg3_flag(tp, JUMBO_CAPABLE)) { 2750 /* Set bit 14 with read-modify-write to preserve other bits */ 2751 err = tg3_phy_auxctl_read(tp, 2752 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2753 if (!err) 2754 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2755 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); 2756 } 2757 2758 /* Set phy register 0x10 bit 0 to high fifo elasticity to support 2759 * jumbo frames transmission. 2760 */ 2761 if (tg3_flag(tp, JUMBO_CAPABLE)) { 2762 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) 2763 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2764 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); 2765 } 2766 2767 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2768 /* adjust output voltage */ 2769 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); 2770 } 2771 2772 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0) 2773 tg3_phydsp_write(tp, 0xffb, 0x4000); 2774 2775 tg3_phy_toggle_automdix(tp, true); 2776 tg3_phy_set_wirespeed(tp); 2777 return 0; 2778 } 2779 2780 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001 2781 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002 2782 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ 2783 TG3_GPIO_MSG_NEED_VAUX) 2784 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ 2785 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ 2786 (TG3_GPIO_MSG_DRVR_PRES << 4) | \ 2787 (TG3_GPIO_MSG_DRVR_PRES << 8) | \ 2788 (TG3_GPIO_MSG_DRVR_PRES << 12)) 2789 2790 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ 2791 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ 2792 (TG3_GPIO_MSG_NEED_VAUX << 4) | \ 2793 (TG3_GPIO_MSG_NEED_VAUX << 8) | \ 2794 (TG3_GPIO_MSG_NEED_VAUX << 12)) 2795 2796 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) 2797 { 2798 u32 status, shift; 2799 2800 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2801 tg3_asic_rev(tp) == ASIC_REV_5719) 2802 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); 2803 else 2804 status = tr32(TG3_CPMU_DRV_STATUS); 2805 2806 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; 2807 status &= ~(TG3_GPIO_MSG_MASK << shift); 2808 status |= (newstat << shift); 2809 2810 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2811 tg3_asic_rev(tp) == ASIC_REV_5719) 2812 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); 2813 else 2814 tw32(TG3_CPMU_DRV_STATUS, status); 2815 2816 return status >> TG3_APE_GPIO_MSG_SHIFT; 2817 } 2818 2819 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) 2820 { 2821 if (!tg3_flag(tp, IS_NIC)) 2822 return 0; 2823 2824 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2825 tg3_asic_rev(tp) == ASIC_REV_5719 || 2826 tg3_asic_rev(tp) == ASIC_REV_5720) { 2827 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2828 return -EIO; 2829 2830 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); 2831 2832 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2833 TG3_GRC_LCLCTL_PWRSW_DELAY); 2834 2835 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2836 } else { 2837 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2838 TG3_GRC_LCLCTL_PWRSW_DELAY); 2839 } 2840 2841 return 0; 2842 } 2843 2844 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) 2845 { 2846 u32 grc_local_ctrl; 2847 2848 if (!tg3_flag(tp, IS_NIC) || 2849 tg3_asic_rev(tp) == ASIC_REV_5700 || 2850 tg3_asic_rev(tp) == ASIC_REV_5701) 2851 return; 2852 2853 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; 2854 2855 tw32_wait_f(GRC_LOCAL_CTRL, 2856 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2857 TG3_GRC_LCLCTL_PWRSW_DELAY); 2858 2859 tw32_wait_f(GRC_LOCAL_CTRL, 2860 grc_local_ctrl, 2861 TG3_GRC_LCLCTL_PWRSW_DELAY); 2862 2863 tw32_wait_f(GRC_LOCAL_CTRL, 2864 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2865 TG3_GRC_LCLCTL_PWRSW_DELAY); 2866 } 2867 2868 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) 2869 { 2870 if (!tg3_flag(tp, IS_NIC)) 2871 return; 2872 2873 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 2874 tg3_asic_rev(tp) == ASIC_REV_5701) { 2875 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2876 (GRC_LCLCTRL_GPIO_OE0 | 2877 GRC_LCLCTRL_GPIO_OE1 | 2878 GRC_LCLCTRL_GPIO_OE2 | 2879 GRC_LCLCTRL_GPIO_OUTPUT0 | 2880 GRC_LCLCTRL_GPIO_OUTPUT1), 2881 TG3_GRC_LCLCTL_PWRSW_DELAY); 2882 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 2883 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 2884 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ 2885 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | 2886 GRC_LCLCTRL_GPIO_OE1 | 2887 GRC_LCLCTRL_GPIO_OE2 | 2888 GRC_LCLCTRL_GPIO_OUTPUT0 | 2889 GRC_LCLCTRL_GPIO_OUTPUT1 | 2890 tp->grc_local_ctrl; 2891 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2892 TG3_GRC_LCLCTL_PWRSW_DELAY); 2893 2894 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; 2895 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2896 TG3_GRC_LCLCTL_PWRSW_DELAY); 2897 2898 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; 2899 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2900 TG3_GRC_LCLCTL_PWRSW_DELAY); 2901 } else { 2902 u32 no_gpio2; 2903 u32 grc_local_ctrl = 0; 2904 2905 /* Workaround to prevent overdrawing Amps. */ 2906 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 2907 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 2908 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2909 grc_local_ctrl, 2910 TG3_GRC_LCLCTL_PWRSW_DELAY); 2911 } 2912 2913 /* On 5753 and variants, GPIO2 cannot be used. */ 2914 no_gpio2 = tp->nic_sram_data_cfg & 2915 NIC_SRAM_DATA_CFG_NO_GPIO2; 2916 2917 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 2918 GRC_LCLCTRL_GPIO_OE1 | 2919 GRC_LCLCTRL_GPIO_OE2 | 2920 GRC_LCLCTRL_GPIO_OUTPUT1 | 2921 GRC_LCLCTRL_GPIO_OUTPUT2; 2922 if (no_gpio2) { 2923 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | 2924 GRC_LCLCTRL_GPIO_OUTPUT2); 2925 } 2926 tw32_wait_f(GRC_LOCAL_CTRL, 2927 tp->grc_local_ctrl | grc_local_ctrl, 2928 TG3_GRC_LCLCTL_PWRSW_DELAY); 2929 2930 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; 2931 2932 tw32_wait_f(GRC_LOCAL_CTRL, 2933 tp->grc_local_ctrl | grc_local_ctrl, 2934 TG3_GRC_LCLCTL_PWRSW_DELAY); 2935 2936 if (!no_gpio2) { 2937 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; 2938 tw32_wait_f(GRC_LOCAL_CTRL, 2939 tp->grc_local_ctrl | grc_local_ctrl, 2940 TG3_GRC_LCLCTL_PWRSW_DELAY); 2941 } 2942 } 2943 } 2944 2945 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) 2946 { 2947 u32 msg = 0; 2948 2949 /* Serialize power state transitions */ 2950 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2951 return; 2952 2953 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) 2954 msg = TG3_GPIO_MSG_NEED_VAUX; 2955 2956 msg = tg3_set_function_status(tp, msg); 2957 2958 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) 2959 goto done; 2960 2961 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) 2962 tg3_pwrsrc_switch_to_vaux(tp); 2963 else 2964 tg3_pwrsrc_die_with_vmain(tp); 2965 2966 done: 2967 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2968 } 2969 2970 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) 2971 { 2972 bool need_vaux = false; 2973 2974 /* The GPIOs do something completely different on 57765. */ 2975 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS)) 2976 return; 2977 2978 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2979 tg3_asic_rev(tp) == ASIC_REV_5719 || 2980 tg3_asic_rev(tp) == ASIC_REV_5720) { 2981 tg3_frob_aux_power_5717(tp, include_wol ? 2982 tg3_flag(tp, WOL_ENABLE) != 0 : 0); 2983 return; 2984 } 2985 2986 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { 2987 struct net_device *dev_peer; 2988 2989 dev_peer = pci_get_drvdata(tp->pdev_peer); 2990 2991 /* remove_one() may have been run on the peer. */ 2992 if (dev_peer) { 2993 struct tg3 *tp_peer = netdev_priv(dev_peer); 2994 2995 if (tg3_flag(tp_peer, INIT_COMPLETE)) 2996 return; 2997 2998 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || 2999 tg3_flag(tp_peer, ENABLE_ASF)) 3000 need_vaux = true; 3001 } 3002 } 3003 3004 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || 3005 tg3_flag(tp, ENABLE_ASF)) 3006 need_vaux = true; 3007 3008 if (need_vaux) 3009 tg3_pwrsrc_switch_to_vaux(tp); 3010 else 3011 tg3_pwrsrc_die_with_vmain(tp); 3012 } 3013 3014 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) 3015 { 3016 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) 3017 return 1; 3018 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { 3019 if (speed != SPEED_10) 3020 return 1; 3021 } else if (speed == SPEED_10) 3022 return 1; 3023 3024 return 0; 3025 } 3026 3027 static bool tg3_phy_power_bug(struct tg3 *tp) 3028 { 3029 switch (tg3_asic_rev(tp)) { 3030 case ASIC_REV_5700: 3031 case ASIC_REV_5704: 3032 return true; 3033 case ASIC_REV_5780: 3034 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 3035 return true; 3036 return false; 3037 case ASIC_REV_5717: 3038 if (!tp->pci_fn) 3039 return true; 3040 return false; 3041 case ASIC_REV_5719: 3042 case ASIC_REV_5720: 3043 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 3044 !tp->pci_fn) 3045 return true; 3046 return false; 3047 } 3048 3049 return false; 3050 } 3051 3052 static bool tg3_phy_led_bug(struct tg3 *tp) 3053 { 3054 switch (tg3_asic_rev(tp)) { 3055 case ASIC_REV_5719: 3056 case ASIC_REV_5720: 3057 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 3058 !tp->pci_fn) 3059 return true; 3060 return false; 3061 } 3062 3063 return false; 3064 } 3065 3066 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) 3067 { 3068 u32 val; 3069 3070 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) 3071 return; 3072 3073 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 3074 if (tg3_asic_rev(tp) == ASIC_REV_5704) { 3075 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); 3076 u32 serdes_cfg = tr32(MAC_SERDES_CFG); 3077 3078 sg_dig_ctrl |= 3079 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; 3080 tw32(SG_DIG_CTRL, sg_dig_ctrl); 3081 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); 3082 } 3083 return; 3084 } 3085 3086 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3087 tg3_bmcr_reset(tp); 3088 val = tr32(GRC_MISC_CFG); 3089 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 3090 udelay(40); 3091 return; 3092 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 3093 u32 phytest; 3094 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 3095 u32 phy; 3096 3097 tg3_writephy(tp, MII_ADVERTISE, 0); 3098 tg3_writephy(tp, MII_BMCR, 3099 BMCR_ANENABLE | BMCR_ANRESTART); 3100 3101 tg3_writephy(tp, MII_TG3_FET_TEST, 3102 phytest | MII_TG3_FET_SHADOW_EN); 3103 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { 3104 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; 3105 tg3_writephy(tp, 3106 MII_TG3_FET_SHDW_AUXMODE4, 3107 phy); 3108 } 3109 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 3110 } 3111 return; 3112 } else if (do_low_power) { 3113 if (!tg3_phy_led_bug(tp)) 3114 tg3_writephy(tp, MII_TG3_EXT_CTRL, 3115 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 3116 3117 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | 3118 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | 3119 MII_TG3_AUXCTL_PCTL_VREG_11V; 3120 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); 3121 } 3122 3123 /* The PHY should not be powered down on some chips because 3124 * of bugs. 3125 */ 3126 if (tg3_phy_power_bug(tp)) 3127 return; 3128 3129 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 3130 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 3131 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 3132 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 3133 val |= CPMU_LSPD_1000MB_MACCLK_12_5; 3134 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 3135 } 3136 3137 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); 3138 } 3139 3140 /* tp->lock is held. */ 3141 static int tg3_nvram_lock(struct tg3 *tp) 3142 { 3143 if (tg3_flag(tp, NVRAM)) { 3144 int i; 3145 3146 if (tp->nvram_lock_cnt == 0) { 3147 tw32(NVRAM_SWARB, SWARB_REQ_SET1); 3148 for (i = 0; i < 8000; i++) { 3149 if (tr32(NVRAM_SWARB) & SWARB_GNT1) 3150 break; 3151 udelay(20); 3152 } 3153 if (i == 8000) { 3154 tw32(NVRAM_SWARB, SWARB_REQ_CLR1); 3155 return -ENODEV; 3156 } 3157 } 3158 tp->nvram_lock_cnt++; 3159 } 3160 return 0; 3161 } 3162 3163 /* tp->lock is held. */ 3164 static void tg3_nvram_unlock(struct tg3 *tp) 3165 { 3166 if (tg3_flag(tp, NVRAM)) { 3167 if (tp->nvram_lock_cnt > 0) 3168 tp->nvram_lock_cnt--; 3169 if (tp->nvram_lock_cnt == 0) 3170 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); 3171 } 3172 } 3173 3174 /* tp->lock is held. */ 3175 static void tg3_enable_nvram_access(struct tg3 *tp) 3176 { 3177 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3178 u32 nvaccess = tr32(NVRAM_ACCESS); 3179 3180 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); 3181 } 3182 } 3183 3184 /* tp->lock is held. */ 3185 static void tg3_disable_nvram_access(struct tg3 *tp) 3186 { 3187 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3188 u32 nvaccess = tr32(NVRAM_ACCESS); 3189 3190 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); 3191 } 3192 } 3193 3194 static int tg3_nvram_read_using_eeprom(struct tg3 *tp, 3195 u32 offset, u32 *val) 3196 { 3197 u32 tmp; 3198 int i; 3199 3200 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) 3201 return -EINVAL; 3202 3203 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | 3204 EEPROM_ADDR_DEVID_MASK | 3205 EEPROM_ADDR_READ); 3206 tw32(GRC_EEPROM_ADDR, 3207 tmp | 3208 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3209 ((offset << EEPROM_ADDR_ADDR_SHIFT) & 3210 EEPROM_ADDR_ADDR_MASK) | 3211 EEPROM_ADDR_READ | EEPROM_ADDR_START); 3212 3213 for (i = 0; i < 1000; i++) { 3214 tmp = tr32(GRC_EEPROM_ADDR); 3215 3216 if (tmp & EEPROM_ADDR_COMPLETE) 3217 break; 3218 msleep(1); 3219 } 3220 if (!(tmp & EEPROM_ADDR_COMPLETE)) 3221 return -EBUSY; 3222 3223 tmp = tr32(GRC_EEPROM_DATA); 3224 3225 /* 3226 * The data will always be opposite the native endian 3227 * format. Perform a blind byteswap to compensate. 3228 */ 3229 *val = swab32(tmp); 3230 3231 return 0; 3232 } 3233 3234 #define NVRAM_CMD_TIMEOUT 10000 3235 3236 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) 3237 { 3238 int i; 3239 3240 tw32(NVRAM_CMD, nvram_cmd); 3241 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { 3242 usleep_range(10, 40); 3243 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { 3244 udelay(10); 3245 break; 3246 } 3247 } 3248 3249 if (i == NVRAM_CMD_TIMEOUT) 3250 return -EBUSY; 3251 3252 return 0; 3253 } 3254 3255 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) 3256 { 3257 if (tg3_flag(tp, NVRAM) && 3258 tg3_flag(tp, NVRAM_BUFFERED) && 3259 tg3_flag(tp, FLASH) && 3260 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3261 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3262 3263 addr = ((addr / tp->nvram_pagesize) << 3264 ATMEL_AT45DB0X1B_PAGE_POS) + 3265 (addr % tp->nvram_pagesize); 3266 3267 return addr; 3268 } 3269 3270 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) 3271 { 3272 if (tg3_flag(tp, NVRAM) && 3273 tg3_flag(tp, NVRAM_BUFFERED) && 3274 tg3_flag(tp, FLASH) && 3275 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3276 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3277 3278 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * 3279 tp->nvram_pagesize) + 3280 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); 3281 3282 return addr; 3283 } 3284 3285 /* NOTE: Data read in from NVRAM is byteswapped according to 3286 * the byteswapping settings for all other register accesses. 3287 * tg3 devices are BE devices, so on a BE machine, the data 3288 * returned will be exactly as it is seen in NVRAM. On a LE 3289 * machine, the 32-bit value will be byteswapped. 3290 */ 3291 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) 3292 { 3293 int ret; 3294 3295 if (!tg3_flag(tp, NVRAM)) 3296 return tg3_nvram_read_using_eeprom(tp, offset, val); 3297 3298 offset = tg3_nvram_phys_addr(tp, offset); 3299 3300 if (offset > NVRAM_ADDR_MSK) 3301 return -EINVAL; 3302 3303 ret = tg3_nvram_lock(tp); 3304 if (ret) 3305 return ret; 3306 3307 tg3_enable_nvram_access(tp); 3308 3309 tw32(NVRAM_ADDR, offset); 3310 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | 3311 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); 3312 3313 if (ret == 0) 3314 *val = tr32(NVRAM_RDDATA); 3315 3316 tg3_disable_nvram_access(tp); 3317 3318 tg3_nvram_unlock(tp); 3319 3320 return ret; 3321 } 3322 3323 /* Ensures NVRAM data is in bytestream format. */ 3324 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) 3325 { 3326 u32 v; 3327 int res = tg3_nvram_read(tp, offset, &v); 3328 if (!res) 3329 *val = cpu_to_be32(v); 3330 return res; 3331 } 3332 3333 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, 3334 u32 offset, u32 len, u8 *buf) 3335 { 3336 int i, j, rc = 0; 3337 u32 val; 3338 3339 for (i = 0; i < len; i += 4) { 3340 u32 addr; 3341 __be32 data; 3342 3343 addr = offset + i; 3344 3345 memcpy(&data, buf + i, 4); 3346 3347 /* 3348 * The SEEPROM interface expects the data to always be opposite 3349 * the native endian format. We accomplish this by reversing 3350 * all the operations that would have been performed on the 3351 * data from a call to tg3_nvram_read_be32(). 3352 */ 3353 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); 3354 3355 val = tr32(GRC_EEPROM_ADDR); 3356 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); 3357 3358 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | 3359 EEPROM_ADDR_READ); 3360 tw32(GRC_EEPROM_ADDR, val | 3361 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3362 (addr & EEPROM_ADDR_ADDR_MASK) | 3363 EEPROM_ADDR_START | 3364 EEPROM_ADDR_WRITE); 3365 3366 for (j = 0; j < 1000; j++) { 3367 val = tr32(GRC_EEPROM_ADDR); 3368 3369 if (val & EEPROM_ADDR_COMPLETE) 3370 break; 3371 msleep(1); 3372 } 3373 if (!(val & EEPROM_ADDR_COMPLETE)) { 3374 rc = -EBUSY; 3375 break; 3376 } 3377 } 3378 3379 return rc; 3380 } 3381 3382 /* offset and length are dword aligned */ 3383 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, 3384 u8 *buf) 3385 { 3386 int ret = 0; 3387 u32 pagesize = tp->nvram_pagesize; 3388 u32 pagemask = pagesize - 1; 3389 u32 nvram_cmd; 3390 u8 *tmp; 3391 3392 tmp = kmalloc(pagesize, GFP_KERNEL); 3393 if (tmp == NULL) 3394 return -ENOMEM; 3395 3396 while (len) { 3397 int j; 3398 u32 phy_addr, page_off, size; 3399 3400 phy_addr = offset & ~pagemask; 3401 3402 for (j = 0; j < pagesize; j += 4) { 3403 ret = tg3_nvram_read_be32(tp, phy_addr + j, 3404 (__be32 *) (tmp + j)); 3405 if (ret) 3406 break; 3407 } 3408 if (ret) 3409 break; 3410 3411 page_off = offset & pagemask; 3412 size = pagesize; 3413 if (len < size) 3414 size = len; 3415 3416 len -= size; 3417 3418 memcpy(tmp + page_off, buf, size); 3419 3420 offset = offset + (pagesize - page_off); 3421 3422 tg3_enable_nvram_access(tp); 3423 3424 /* 3425 * Before we can erase the flash page, we need 3426 * to issue a special "write enable" command. 3427 */ 3428 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3429 3430 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3431 break; 3432 3433 /* Erase the target page */ 3434 tw32(NVRAM_ADDR, phy_addr); 3435 3436 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | 3437 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; 3438 3439 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3440 break; 3441 3442 /* Issue another write enable to start the write. */ 3443 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3444 3445 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3446 break; 3447 3448 for (j = 0; j < pagesize; j += 4) { 3449 __be32 data; 3450 3451 data = *((__be32 *) (tmp + j)); 3452 3453 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3454 3455 tw32(NVRAM_ADDR, phy_addr + j); 3456 3457 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | 3458 NVRAM_CMD_WR; 3459 3460 if (j == 0) 3461 nvram_cmd |= NVRAM_CMD_FIRST; 3462 else if (j == (pagesize - 4)) 3463 nvram_cmd |= NVRAM_CMD_LAST; 3464 3465 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3466 if (ret) 3467 break; 3468 } 3469 if (ret) 3470 break; 3471 } 3472 3473 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3474 tg3_nvram_exec_cmd(tp, nvram_cmd); 3475 3476 kfree(tmp); 3477 3478 return ret; 3479 } 3480 3481 /* offset and length are dword aligned */ 3482 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, 3483 u8 *buf) 3484 { 3485 int i, ret = 0; 3486 3487 for (i = 0; i < len; i += 4, offset += 4) { 3488 u32 page_off, phy_addr, nvram_cmd; 3489 __be32 data; 3490 3491 memcpy(&data, buf + i, 4); 3492 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3493 3494 page_off = offset % tp->nvram_pagesize; 3495 3496 phy_addr = tg3_nvram_phys_addr(tp, offset); 3497 3498 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; 3499 3500 if (page_off == 0 || i == 0) 3501 nvram_cmd |= NVRAM_CMD_FIRST; 3502 if (page_off == (tp->nvram_pagesize - 4)) 3503 nvram_cmd |= NVRAM_CMD_LAST; 3504 3505 if (i == (len - 4)) 3506 nvram_cmd |= NVRAM_CMD_LAST; 3507 3508 if ((nvram_cmd & NVRAM_CMD_FIRST) || 3509 !tg3_flag(tp, FLASH) || 3510 !tg3_flag(tp, 57765_PLUS)) 3511 tw32(NVRAM_ADDR, phy_addr); 3512 3513 if (tg3_asic_rev(tp) != ASIC_REV_5752 && 3514 !tg3_flag(tp, 5755_PLUS) && 3515 (tp->nvram_jedecnum == JEDEC_ST) && 3516 (nvram_cmd & NVRAM_CMD_FIRST)) { 3517 u32 cmd; 3518 3519 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3520 ret = tg3_nvram_exec_cmd(tp, cmd); 3521 if (ret) 3522 break; 3523 } 3524 if (!tg3_flag(tp, FLASH)) { 3525 /* We always do complete word writes to eeprom. */ 3526 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); 3527 } 3528 3529 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3530 if (ret) 3531 break; 3532 } 3533 return ret; 3534 } 3535 3536 /* offset and length are dword aligned */ 3537 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) 3538 { 3539 int ret; 3540 3541 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3542 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & 3543 ~GRC_LCLCTRL_GPIO_OUTPUT1); 3544 udelay(40); 3545 } 3546 3547 if (!tg3_flag(tp, NVRAM)) { 3548 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); 3549 } else { 3550 u32 grc_mode; 3551 3552 ret = tg3_nvram_lock(tp); 3553 if (ret) 3554 return ret; 3555 3556 tg3_enable_nvram_access(tp); 3557 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) 3558 tw32(NVRAM_WRITE1, 0x406); 3559 3560 grc_mode = tr32(GRC_MODE); 3561 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); 3562 3563 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { 3564 ret = tg3_nvram_write_block_buffered(tp, offset, len, 3565 buf); 3566 } else { 3567 ret = tg3_nvram_write_block_unbuffered(tp, offset, len, 3568 buf); 3569 } 3570 3571 grc_mode = tr32(GRC_MODE); 3572 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); 3573 3574 tg3_disable_nvram_access(tp); 3575 tg3_nvram_unlock(tp); 3576 } 3577 3578 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3579 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 3580 udelay(40); 3581 } 3582 3583 return ret; 3584 } 3585 3586 #define RX_CPU_SCRATCH_BASE 0x30000 3587 #define RX_CPU_SCRATCH_SIZE 0x04000 3588 #define TX_CPU_SCRATCH_BASE 0x34000 3589 #define TX_CPU_SCRATCH_SIZE 0x04000 3590 3591 /* tp->lock is held. */ 3592 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base) 3593 { 3594 int i; 3595 const int iters = 10000; 3596 3597 for (i = 0; i < iters; i++) { 3598 tw32(cpu_base + CPU_STATE, 0xffffffff); 3599 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3600 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) 3601 break; 3602 if (pci_channel_offline(tp->pdev)) 3603 return -EBUSY; 3604 } 3605 3606 return (i == iters) ? -EBUSY : 0; 3607 } 3608 3609 /* tp->lock is held. */ 3610 static int tg3_rxcpu_pause(struct tg3 *tp) 3611 { 3612 int rc = tg3_pause_cpu(tp, RX_CPU_BASE); 3613 3614 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 3615 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); 3616 udelay(10); 3617 3618 return rc; 3619 } 3620 3621 /* tp->lock is held. */ 3622 static int tg3_txcpu_pause(struct tg3 *tp) 3623 { 3624 return tg3_pause_cpu(tp, TX_CPU_BASE); 3625 } 3626 3627 /* tp->lock is held. */ 3628 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base) 3629 { 3630 tw32(cpu_base + CPU_STATE, 0xffffffff); 3631 tw32_f(cpu_base + CPU_MODE, 0x00000000); 3632 } 3633 3634 /* tp->lock is held. */ 3635 static void tg3_rxcpu_resume(struct tg3 *tp) 3636 { 3637 tg3_resume_cpu(tp, RX_CPU_BASE); 3638 } 3639 3640 /* tp->lock is held. */ 3641 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base) 3642 { 3643 int rc; 3644 3645 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); 3646 3647 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3648 u32 val = tr32(GRC_VCPU_EXT_CTRL); 3649 3650 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); 3651 return 0; 3652 } 3653 if (cpu_base == RX_CPU_BASE) { 3654 rc = tg3_rxcpu_pause(tp); 3655 } else { 3656 /* 3657 * There is only an Rx CPU for the 5750 derivative in the 3658 * BCM4785. 3659 */ 3660 if (tg3_flag(tp, IS_SSB_CORE)) 3661 return 0; 3662 3663 rc = tg3_txcpu_pause(tp); 3664 } 3665 3666 if (rc) { 3667 netdev_err(tp->dev, "%s timed out, %s CPU\n", 3668 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX"); 3669 return -ENODEV; 3670 } 3671 3672 /* Clear firmware's nvram arbitration. */ 3673 if (tg3_flag(tp, NVRAM)) 3674 tw32(NVRAM_SWARB, SWARB_REQ_CLR0); 3675 return 0; 3676 } 3677 3678 static int tg3_fw_data_len(struct tg3 *tp, 3679 const struct tg3_firmware_hdr *fw_hdr) 3680 { 3681 int fw_len; 3682 3683 /* Non fragmented firmware have one firmware header followed by a 3684 * contiguous chunk of data to be written. The length field in that 3685 * header is not the length of data to be written but the complete 3686 * length of the bss. The data length is determined based on 3687 * tp->fw->size minus headers. 3688 * 3689 * Fragmented firmware have a main header followed by multiple 3690 * fragments. Each fragment is identical to non fragmented firmware 3691 * with a firmware header followed by a contiguous chunk of data. In 3692 * the main header, the length field is unused and set to 0xffffffff. 3693 * In each fragment header the length is the entire size of that 3694 * fragment i.e. fragment data + header length. Data length is 3695 * therefore length field in the header minus TG3_FW_HDR_LEN. 3696 */ 3697 if (tp->fw_len == 0xffffffff) 3698 fw_len = be32_to_cpu(fw_hdr->len); 3699 else 3700 fw_len = tp->fw->size; 3701 3702 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32); 3703 } 3704 3705 /* tp->lock is held. */ 3706 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, 3707 u32 cpu_scratch_base, int cpu_scratch_size, 3708 const struct tg3_firmware_hdr *fw_hdr) 3709 { 3710 int err, i; 3711 void (*write_op)(struct tg3 *, u32, u32); 3712 int total_len = tp->fw->size; 3713 3714 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { 3715 netdev_err(tp->dev, 3716 "%s: Trying to load TX cpu firmware which is 5705\n", 3717 __func__); 3718 return -EINVAL; 3719 } 3720 3721 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766) 3722 write_op = tg3_write_mem; 3723 else 3724 write_op = tg3_write_indirect_reg32; 3725 3726 if (tg3_asic_rev(tp) != ASIC_REV_57766) { 3727 /* It is possible that bootcode is still loading at this point. 3728 * Get the nvram lock first before halting the cpu. 3729 */ 3730 int lock_err = tg3_nvram_lock(tp); 3731 err = tg3_halt_cpu(tp, cpu_base); 3732 if (!lock_err) 3733 tg3_nvram_unlock(tp); 3734 if (err) 3735 goto out; 3736 3737 for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) 3738 write_op(tp, cpu_scratch_base + i, 0); 3739 tw32(cpu_base + CPU_STATE, 0xffffffff); 3740 tw32(cpu_base + CPU_MODE, 3741 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT); 3742 } else { 3743 /* Subtract additional main header for fragmented firmware and 3744 * advance to the first fragment 3745 */ 3746 total_len -= TG3_FW_HDR_LEN; 3747 fw_hdr++; 3748 } 3749 3750 do { 3751 u32 *fw_data = (u32 *)(fw_hdr + 1); 3752 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++) 3753 write_op(tp, cpu_scratch_base + 3754 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) + 3755 (i * sizeof(u32)), 3756 be32_to_cpu(fw_data[i])); 3757 3758 total_len -= be32_to_cpu(fw_hdr->len); 3759 3760 /* Advance to next fragment */ 3761 fw_hdr = (struct tg3_firmware_hdr *) 3762 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len)); 3763 } while (total_len > 0); 3764 3765 err = 0; 3766 3767 out: 3768 return err; 3769 } 3770 3771 /* tp->lock is held. */ 3772 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc) 3773 { 3774 int i; 3775 const int iters = 5; 3776 3777 tw32(cpu_base + CPU_STATE, 0xffffffff); 3778 tw32_f(cpu_base + CPU_PC, pc); 3779 3780 for (i = 0; i < iters; i++) { 3781 if (tr32(cpu_base + CPU_PC) == pc) 3782 break; 3783 tw32(cpu_base + CPU_STATE, 0xffffffff); 3784 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3785 tw32_f(cpu_base + CPU_PC, pc); 3786 udelay(1000); 3787 } 3788 3789 return (i == iters) ? -EBUSY : 0; 3790 } 3791 3792 /* tp->lock is held. */ 3793 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) 3794 { 3795 const struct tg3_firmware_hdr *fw_hdr; 3796 int err; 3797 3798 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3799 3800 /* Firmware blob starts with version numbers, followed by 3801 start address and length. We are setting complete length. 3802 length = end_address_of_bss - start_address_of_text. 3803 Remainder is the blob to be loaded contiguously 3804 from start address. */ 3805 3806 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, 3807 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, 3808 fw_hdr); 3809 if (err) 3810 return err; 3811 3812 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, 3813 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, 3814 fw_hdr); 3815 if (err) 3816 return err; 3817 3818 /* Now startup only the RX cpu. */ 3819 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE, 3820 be32_to_cpu(fw_hdr->base_addr)); 3821 if (err) { 3822 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " 3823 "should be %08x\n", __func__, 3824 tr32(RX_CPU_BASE + CPU_PC), 3825 be32_to_cpu(fw_hdr->base_addr)); 3826 return -ENODEV; 3827 } 3828 3829 tg3_rxcpu_resume(tp); 3830 3831 return 0; 3832 } 3833 3834 static int tg3_validate_rxcpu_state(struct tg3 *tp) 3835 { 3836 const int iters = 1000; 3837 int i; 3838 u32 val; 3839 3840 /* Wait for boot code to complete initialization and enter service 3841 * loop. It is then safe to download service patches 3842 */ 3843 for (i = 0; i < iters; i++) { 3844 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP) 3845 break; 3846 3847 udelay(10); 3848 } 3849 3850 if (i == iters) { 3851 netdev_err(tp->dev, "Boot code not ready for service patches\n"); 3852 return -EBUSY; 3853 } 3854 3855 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE); 3856 if (val & 0xff) { 3857 netdev_warn(tp->dev, 3858 "Other patches exist. Not downloading EEE patch\n"); 3859 return -EEXIST; 3860 } 3861 3862 return 0; 3863 } 3864 3865 /* tp->lock is held. */ 3866 static void tg3_load_57766_firmware(struct tg3 *tp) 3867 { 3868 struct tg3_firmware_hdr *fw_hdr; 3869 3870 if (!tg3_flag(tp, NO_NVRAM)) 3871 return; 3872 3873 if (tg3_validate_rxcpu_state(tp)) 3874 return; 3875 3876 if (!tp->fw) 3877 return; 3878 3879 /* This firmware blob has a different format than older firmware 3880 * releases as given below. The main difference is we have fragmented 3881 * data to be written to non-contiguous locations. 3882 * 3883 * In the beginning we have a firmware header identical to other 3884 * firmware which consists of version, base addr and length. The length 3885 * here is unused and set to 0xffffffff. 3886 * 3887 * This is followed by a series of firmware fragments which are 3888 * individually identical to previous firmware. i.e. they have the 3889 * firmware header and followed by data for that fragment. The version 3890 * field of the individual fragment header is unused. 3891 */ 3892 3893 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3894 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR) 3895 return; 3896 3897 if (tg3_rxcpu_pause(tp)) 3898 return; 3899 3900 /* tg3_load_firmware_cpu() will always succeed for the 57766 */ 3901 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr); 3902 3903 tg3_rxcpu_resume(tp); 3904 } 3905 3906 /* tp->lock is held. */ 3907 static int tg3_load_tso_firmware(struct tg3 *tp) 3908 { 3909 const struct tg3_firmware_hdr *fw_hdr; 3910 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; 3911 int err; 3912 3913 if (!tg3_flag(tp, FW_TSO)) 3914 return 0; 3915 3916 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3917 3918 /* Firmware blob starts with version numbers, followed by 3919 start address and length. We are setting complete length. 3920 length = end_address_of_bss - start_address_of_text. 3921 Remainder is the blob to be loaded contiguously 3922 from start address. */ 3923 3924 cpu_scratch_size = tp->fw_len; 3925 3926 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 3927 cpu_base = RX_CPU_BASE; 3928 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; 3929 } else { 3930 cpu_base = TX_CPU_BASE; 3931 cpu_scratch_base = TX_CPU_SCRATCH_BASE; 3932 cpu_scratch_size = TX_CPU_SCRATCH_SIZE; 3933 } 3934 3935 err = tg3_load_firmware_cpu(tp, cpu_base, 3936 cpu_scratch_base, cpu_scratch_size, 3937 fw_hdr); 3938 if (err) 3939 return err; 3940 3941 /* Now startup the cpu. */ 3942 err = tg3_pause_cpu_and_set_pc(tp, cpu_base, 3943 be32_to_cpu(fw_hdr->base_addr)); 3944 if (err) { 3945 netdev_err(tp->dev, 3946 "%s fails to set CPU PC, is %08x should be %08x\n", 3947 __func__, tr32(cpu_base + CPU_PC), 3948 be32_to_cpu(fw_hdr->base_addr)); 3949 return -ENODEV; 3950 } 3951 3952 tg3_resume_cpu(tp, cpu_base); 3953 return 0; 3954 } 3955 3956 /* tp->lock is held. */ 3957 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index) 3958 { 3959 u32 addr_high, addr_low; 3960 3961 addr_high = ((mac_addr[0] << 8) | mac_addr[1]); 3962 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) | 3963 (mac_addr[4] << 8) | mac_addr[5]); 3964 3965 if (index < 4) { 3966 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high); 3967 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low); 3968 } else { 3969 index -= 4; 3970 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high); 3971 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low); 3972 } 3973 } 3974 3975 /* tp->lock is held. */ 3976 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1) 3977 { 3978 u32 addr_high; 3979 int i; 3980 3981 for (i = 0; i < 4; i++) { 3982 if (i == 1 && skip_mac_1) 3983 continue; 3984 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3985 } 3986 3987 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 3988 tg3_asic_rev(tp) == ASIC_REV_5704) { 3989 for (i = 4; i < 16; i++) 3990 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3991 } 3992 3993 addr_high = (tp->dev->dev_addr[0] + 3994 tp->dev->dev_addr[1] + 3995 tp->dev->dev_addr[2] + 3996 tp->dev->dev_addr[3] + 3997 tp->dev->dev_addr[4] + 3998 tp->dev->dev_addr[5]) & 3999 TX_BACKOFF_SEED_MASK; 4000 tw32(MAC_TX_BACKOFF_SEED, addr_high); 4001 } 4002 4003 static void tg3_enable_register_access(struct tg3 *tp) 4004 { 4005 /* 4006 * Make sure register accesses (indirect or otherwise) will function 4007 * correctly. 4008 */ 4009 pci_write_config_dword(tp->pdev, 4010 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); 4011 } 4012 4013 static int tg3_power_up(struct tg3 *tp) 4014 { 4015 int err; 4016 4017 tg3_enable_register_access(tp); 4018 4019 err = pci_set_power_state(tp->pdev, PCI_D0); 4020 if (!err) { 4021 /* Switch out of Vaux if it is a NIC */ 4022 tg3_pwrsrc_switch_to_vmain(tp); 4023 } else { 4024 netdev_err(tp->dev, "Transition to D0 failed\n"); 4025 } 4026 4027 return err; 4028 } 4029 4030 static int tg3_setup_phy(struct tg3 *, bool); 4031 4032 static int tg3_power_down_prepare(struct tg3 *tp) 4033 { 4034 u32 misc_host_ctrl; 4035 bool device_should_wake, do_low_power; 4036 4037 tg3_enable_register_access(tp); 4038 4039 /* Restore the CLKREQ setting. */ 4040 if (tg3_flag(tp, CLKREQ_BUG)) 4041 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 4042 PCI_EXP_LNKCTL_CLKREQ_EN); 4043 4044 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 4045 tw32(TG3PCI_MISC_HOST_CTRL, 4046 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 4047 4048 device_should_wake = device_may_wakeup(&tp->pdev->dev) && 4049 tg3_flag(tp, WOL_ENABLE); 4050 4051 if (tg3_flag(tp, USE_PHYLIB)) { 4052 do_low_power = false; 4053 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && 4054 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4055 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, }; 4056 struct phy_device *phydev; 4057 u32 phyid; 4058 4059 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 4060 4061 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4062 4063 tp->link_config.speed = phydev->speed; 4064 tp->link_config.duplex = phydev->duplex; 4065 tp->link_config.autoneg = phydev->autoneg; 4066 ethtool_convert_link_mode_to_legacy_u32( 4067 &tp->link_config.advertising, 4068 phydev->advertising); 4069 4070 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising); 4071 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 4072 advertising); 4073 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 4074 advertising); 4075 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, 4076 advertising); 4077 4078 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { 4079 if (tg3_flag(tp, WOL_SPEED_100MB)) { 4080 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, 4081 advertising); 4082 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, 4083 advertising); 4084 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, 4085 advertising); 4086 } else { 4087 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, 4088 advertising); 4089 } 4090 } 4091 4092 linkmode_copy(phydev->advertising, advertising); 4093 phy_start_aneg(phydev); 4094 4095 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; 4096 if (phyid != PHY_ID_BCMAC131) { 4097 phyid &= PHY_BCM_OUI_MASK; 4098 if (phyid == PHY_BCM_OUI_1 || 4099 phyid == PHY_BCM_OUI_2 || 4100 phyid == PHY_BCM_OUI_3) 4101 do_low_power = true; 4102 } 4103 } 4104 } else { 4105 do_low_power = true; 4106 4107 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) 4108 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4109 4110 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 4111 tg3_setup_phy(tp, false); 4112 } 4113 4114 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 4115 u32 val; 4116 4117 val = tr32(GRC_VCPU_EXT_CTRL); 4118 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); 4119 } else if (!tg3_flag(tp, ENABLE_ASF)) { 4120 int i; 4121 u32 val; 4122 4123 for (i = 0; i < 200; i++) { 4124 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); 4125 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 4126 break; 4127 msleep(1); 4128 } 4129 } 4130 if (tg3_flag(tp, WOL_CAP)) 4131 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | 4132 WOL_DRV_STATE_SHUTDOWN | 4133 WOL_DRV_WOL | 4134 WOL_SET_MAGIC_PKT); 4135 4136 if (device_should_wake) { 4137 u32 mac_mode; 4138 4139 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 4140 if (do_low_power && 4141 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 4142 tg3_phy_auxctl_write(tp, 4143 MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 4144 MII_TG3_AUXCTL_PCTL_WOL_EN | 4145 MII_TG3_AUXCTL_PCTL_100TX_LPWR | 4146 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); 4147 udelay(40); 4148 } 4149 4150 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4151 mac_mode = MAC_MODE_PORT_MODE_GMII; 4152 else if (tp->phy_flags & 4153 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) { 4154 if (tp->link_config.active_speed == SPEED_1000) 4155 mac_mode = MAC_MODE_PORT_MODE_GMII; 4156 else 4157 mac_mode = MAC_MODE_PORT_MODE_MII; 4158 } else 4159 mac_mode = MAC_MODE_PORT_MODE_MII; 4160 4161 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; 4162 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 4163 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? 4164 SPEED_100 : SPEED_10; 4165 if (tg3_5700_link_polarity(tp, speed)) 4166 mac_mode |= MAC_MODE_LINK_POLARITY; 4167 else 4168 mac_mode &= ~MAC_MODE_LINK_POLARITY; 4169 } 4170 } else { 4171 mac_mode = MAC_MODE_PORT_MODE_TBI; 4172 } 4173 4174 if (!tg3_flag(tp, 5750_PLUS)) 4175 tw32(MAC_LED_CTRL, tp->led_ctrl); 4176 4177 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; 4178 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && 4179 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) 4180 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; 4181 4182 if (tg3_flag(tp, ENABLE_APE)) 4183 mac_mode |= MAC_MODE_APE_TX_EN | 4184 MAC_MODE_APE_RX_EN | 4185 MAC_MODE_TDE_ENABLE; 4186 4187 tw32_f(MAC_MODE, mac_mode); 4188 udelay(100); 4189 4190 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); 4191 udelay(10); 4192 } 4193 4194 if (!tg3_flag(tp, WOL_SPEED_100MB) && 4195 (tg3_asic_rev(tp) == ASIC_REV_5700 || 4196 tg3_asic_rev(tp) == ASIC_REV_5701)) { 4197 u32 base_val; 4198 4199 base_val = tp->pci_clock_ctrl; 4200 base_val |= (CLOCK_CTRL_RXCLK_DISABLE | 4201 CLOCK_CTRL_TXCLK_DISABLE); 4202 4203 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | 4204 CLOCK_CTRL_PWRDOWN_PLL133, 40); 4205 } else if (tg3_flag(tp, 5780_CLASS) || 4206 tg3_flag(tp, CPMU_PRESENT) || 4207 tg3_asic_rev(tp) == ASIC_REV_5906) { 4208 /* do nothing */ 4209 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { 4210 u32 newbits1, newbits2; 4211 4212 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4213 tg3_asic_rev(tp) == ASIC_REV_5701) { 4214 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | 4215 CLOCK_CTRL_TXCLK_DISABLE | 4216 CLOCK_CTRL_ALTCLK); 4217 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4218 } else if (tg3_flag(tp, 5705_PLUS)) { 4219 newbits1 = CLOCK_CTRL_625_CORE; 4220 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; 4221 } else { 4222 newbits1 = CLOCK_CTRL_ALTCLK; 4223 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4224 } 4225 4226 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, 4227 40); 4228 4229 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, 4230 40); 4231 4232 if (!tg3_flag(tp, 5705_PLUS)) { 4233 u32 newbits3; 4234 4235 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4236 tg3_asic_rev(tp) == ASIC_REV_5701) { 4237 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | 4238 CLOCK_CTRL_TXCLK_DISABLE | 4239 CLOCK_CTRL_44MHZ_CORE); 4240 } else { 4241 newbits3 = CLOCK_CTRL_44MHZ_CORE; 4242 } 4243 4244 tw32_wait_f(TG3PCI_CLOCK_CTRL, 4245 tp->pci_clock_ctrl | newbits3, 40); 4246 } 4247 } 4248 4249 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) 4250 tg3_power_down_phy(tp, do_low_power); 4251 4252 tg3_frob_aux_power(tp, true); 4253 4254 /* Workaround for unstable PLL clock */ 4255 if ((!tg3_flag(tp, IS_SSB_CORE)) && 4256 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) || 4257 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) { 4258 u32 val = tr32(0x7d00); 4259 4260 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); 4261 tw32(0x7d00, val); 4262 if (!tg3_flag(tp, ENABLE_ASF)) { 4263 int err; 4264 4265 err = tg3_nvram_lock(tp); 4266 tg3_halt_cpu(tp, RX_CPU_BASE); 4267 if (!err) 4268 tg3_nvram_unlock(tp); 4269 } 4270 } 4271 4272 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 4273 4274 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); 4275 4276 return 0; 4277 } 4278 4279 static void tg3_power_down(struct tg3 *tp) 4280 { 4281 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); 4282 pci_set_power_state(tp->pdev, PCI_D3hot); 4283 } 4284 4285 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex) 4286 { 4287 switch (val & MII_TG3_AUX_STAT_SPDMASK) { 4288 case MII_TG3_AUX_STAT_10HALF: 4289 *speed = SPEED_10; 4290 *duplex = DUPLEX_HALF; 4291 break; 4292 4293 case MII_TG3_AUX_STAT_10FULL: 4294 *speed = SPEED_10; 4295 *duplex = DUPLEX_FULL; 4296 break; 4297 4298 case MII_TG3_AUX_STAT_100HALF: 4299 *speed = SPEED_100; 4300 *duplex = DUPLEX_HALF; 4301 break; 4302 4303 case MII_TG3_AUX_STAT_100FULL: 4304 *speed = SPEED_100; 4305 *duplex = DUPLEX_FULL; 4306 break; 4307 4308 case MII_TG3_AUX_STAT_1000HALF: 4309 *speed = SPEED_1000; 4310 *duplex = DUPLEX_HALF; 4311 break; 4312 4313 case MII_TG3_AUX_STAT_1000FULL: 4314 *speed = SPEED_1000; 4315 *duplex = DUPLEX_FULL; 4316 break; 4317 4318 default: 4319 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4320 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : 4321 SPEED_10; 4322 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : 4323 DUPLEX_HALF; 4324 break; 4325 } 4326 *speed = SPEED_UNKNOWN; 4327 *duplex = DUPLEX_UNKNOWN; 4328 break; 4329 } 4330 } 4331 4332 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) 4333 { 4334 int err = 0; 4335 u32 val, new_adv; 4336 4337 new_adv = ADVERTISE_CSMA; 4338 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; 4339 new_adv |= mii_advertise_flowctrl(flowctrl); 4340 4341 err = tg3_writephy(tp, MII_ADVERTISE, new_adv); 4342 if (err) 4343 goto done; 4344 4345 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4346 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); 4347 4348 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4349 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) 4350 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4351 4352 err = tg3_writephy(tp, MII_CTRL1000, new_adv); 4353 if (err) 4354 goto done; 4355 } 4356 4357 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4358 goto done; 4359 4360 tw32(TG3_CPMU_EEE_MODE, 4361 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); 4362 4363 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 4364 if (!err) { 4365 u32 err2; 4366 4367 val = 0; 4368 /* Advertise 100-BaseTX EEE ability */ 4369 if (advertise & ADVERTISED_100baseT_Full) 4370 val |= MDIO_AN_EEE_ADV_100TX; 4371 /* Advertise 1000-BaseT EEE ability */ 4372 if (advertise & ADVERTISED_1000baseT_Full) 4373 val |= MDIO_AN_EEE_ADV_1000T; 4374 4375 if (!tp->eee.eee_enabled) { 4376 val = 0; 4377 tp->eee.advertised = 0; 4378 } else { 4379 tp->eee.advertised = advertise & 4380 (ADVERTISED_100baseT_Full | 4381 ADVERTISED_1000baseT_Full); 4382 } 4383 4384 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); 4385 if (err) 4386 val = 0; 4387 4388 switch (tg3_asic_rev(tp)) { 4389 case ASIC_REV_5717: 4390 case ASIC_REV_57765: 4391 case ASIC_REV_57766: 4392 case ASIC_REV_5719: 4393 /* If we advertised any eee advertisements above... */ 4394 if (val) 4395 val = MII_TG3_DSP_TAP26_ALNOKO | 4396 MII_TG3_DSP_TAP26_RMRXSTO | 4397 MII_TG3_DSP_TAP26_OPCSINPT; 4398 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 4399 /* Fall through */ 4400 case ASIC_REV_5720: 4401 case ASIC_REV_5762: 4402 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) 4403 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | 4404 MII_TG3_DSP_CH34TP2_HIBW01); 4405 } 4406 4407 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false); 4408 if (!err) 4409 err = err2; 4410 } 4411 4412 done: 4413 return err; 4414 } 4415 4416 static void tg3_phy_copper_begin(struct tg3 *tp) 4417 { 4418 if (tp->link_config.autoneg == AUTONEG_ENABLE || 4419 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4420 u32 adv, fc; 4421 4422 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4423 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4424 adv = ADVERTISED_10baseT_Half | 4425 ADVERTISED_10baseT_Full; 4426 if (tg3_flag(tp, WOL_SPEED_100MB)) 4427 adv |= ADVERTISED_100baseT_Half | 4428 ADVERTISED_100baseT_Full; 4429 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) { 4430 if (!(tp->phy_flags & 4431 TG3_PHYFLG_DISABLE_1G_HD_ADV)) 4432 adv |= ADVERTISED_1000baseT_Half; 4433 adv |= ADVERTISED_1000baseT_Full; 4434 } 4435 4436 fc = FLOW_CTRL_TX | FLOW_CTRL_RX; 4437 } else { 4438 adv = tp->link_config.advertising; 4439 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 4440 adv &= ~(ADVERTISED_1000baseT_Half | 4441 ADVERTISED_1000baseT_Full); 4442 4443 fc = tp->link_config.flowctrl; 4444 } 4445 4446 tg3_phy_autoneg_cfg(tp, adv, fc); 4447 4448 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4449 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4450 /* Normally during power down we want to autonegotiate 4451 * the lowest possible speed for WOL. However, to avoid 4452 * link flap, we leave it untouched. 4453 */ 4454 return; 4455 } 4456 4457 tg3_writephy(tp, MII_BMCR, 4458 BMCR_ANENABLE | BMCR_ANRESTART); 4459 } else { 4460 int i; 4461 u32 bmcr, orig_bmcr; 4462 4463 tp->link_config.active_speed = tp->link_config.speed; 4464 tp->link_config.active_duplex = tp->link_config.duplex; 4465 4466 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 4467 /* With autoneg disabled, 5715 only links up when the 4468 * advertisement register has the configured speed 4469 * enabled. 4470 */ 4471 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); 4472 } 4473 4474 bmcr = 0; 4475 switch (tp->link_config.speed) { 4476 default: 4477 case SPEED_10: 4478 break; 4479 4480 case SPEED_100: 4481 bmcr |= BMCR_SPEED100; 4482 break; 4483 4484 case SPEED_1000: 4485 bmcr |= BMCR_SPEED1000; 4486 break; 4487 } 4488 4489 if (tp->link_config.duplex == DUPLEX_FULL) 4490 bmcr |= BMCR_FULLDPLX; 4491 4492 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && 4493 (bmcr != orig_bmcr)) { 4494 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); 4495 for (i = 0; i < 1500; i++) { 4496 u32 tmp; 4497 4498 udelay(10); 4499 if (tg3_readphy(tp, MII_BMSR, &tmp) || 4500 tg3_readphy(tp, MII_BMSR, &tmp)) 4501 continue; 4502 if (!(tmp & BMSR_LSTATUS)) { 4503 udelay(40); 4504 break; 4505 } 4506 } 4507 tg3_writephy(tp, MII_BMCR, bmcr); 4508 udelay(40); 4509 } 4510 } 4511 } 4512 4513 static int tg3_phy_pull_config(struct tg3 *tp) 4514 { 4515 int err; 4516 u32 val; 4517 4518 err = tg3_readphy(tp, MII_BMCR, &val); 4519 if (err) 4520 goto done; 4521 4522 if (!(val & BMCR_ANENABLE)) { 4523 tp->link_config.autoneg = AUTONEG_DISABLE; 4524 tp->link_config.advertising = 0; 4525 tg3_flag_clear(tp, PAUSE_AUTONEG); 4526 4527 err = -EIO; 4528 4529 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) { 4530 case 0: 4531 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4532 goto done; 4533 4534 tp->link_config.speed = SPEED_10; 4535 break; 4536 case BMCR_SPEED100: 4537 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4538 goto done; 4539 4540 tp->link_config.speed = SPEED_100; 4541 break; 4542 case BMCR_SPEED1000: 4543 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4544 tp->link_config.speed = SPEED_1000; 4545 break; 4546 } 4547 /* Fall through */ 4548 default: 4549 goto done; 4550 } 4551 4552 if (val & BMCR_FULLDPLX) 4553 tp->link_config.duplex = DUPLEX_FULL; 4554 else 4555 tp->link_config.duplex = DUPLEX_HALF; 4556 4557 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; 4558 4559 err = 0; 4560 goto done; 4561 } 4562 4563 tp->link_config.autoneg = AUTONEG_ENABLE; 4564 tp->link_config.advertising = ADVERTISED_Autoneg; 4565 tg3_flag_set(tp, PAUSE_AUTONEG); 4566 4567 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4568 u32 adv; 4569 4570 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4571 if (err) 4572 goto done; 4573 4574 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL); 4575 tp->link_config.advertising |= adv | ADVERTISED_TP; 4576 4577 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val); 4578 } else { 4579 tp->link_config.advertising |= ADVERTISED_FIBRE; 4580 } 4581 4582 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4583 u32 adv; 4584 4585 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4586 err = tg3_readphy(tp, MII_CTRL1000, &val); 4587 if (err) 4588 goto done; 4589 4590 adv = mii_ctrl1000_to_ethtool_adv_t(val); 4591 } else { 4592 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4593 if (err) 4594 goto done; 4595 4596 adv = tg3_decode_flowctrl_1000X(val); 4597 tp->link_config.flowctrl = adv; 4598 4599 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL); 4600 adv = mii_adv_to_ethtool_adv_x(val); 4601 } 4602 4603 tp->link_config.advertising |= adv; 4604 } 4605 4606 done: 4607 return err; 4608 } 4609 4610 static int tg3_init_5401phy_dsp(struct tg3 *tp) 4611 { 4612 int err; 4613 4614 /* Turn off tap power management. */ 4615 /* Set Extended packet length bit */ 4616 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 4617 4618 err |= tg3_phydsp_write(tp, 0x0012, 0x1804); 4619 err |= tg3_phydsp_write(tp, 0x0013, 0x1204); 4620 err |= tg3_phydsp_write(tp, 0x8006, 0x0132); 4621 err |= tg3_phydsp_write(tp, 0x8006, 0x0232); 4622 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); 4623 4624 udelay(40); 4625 4626 return err; 4627 } 4628 4629 static bool tg3_phy_eee_config_ok(struct tg3 *tp) 4630 { 4631 struct ethtool_eee eee; 4632 4633 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4634 return true; 4635 4636 tg3_eee_pull_config(tp, &eee); 4637 4638 if (tp->eee.eee_enabled) { 4639 if (tp->eee.advertised != eee.advertised || 4640 tp->eee.tx_lpi_timer != eee.tx_lpi_timer || 4641 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) 4642 return false; 4643 } else { 4644 /* EEE is disabled but we're advertising */ 4645 if (eee.advertised) 4646 return false; 4647 } 4648 4649 return true; 4650 } 4651 4652 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) 4653 { 4654 u32 advmsk, tgtadv, advertising; 4655 4656 advertising = tp->link_config.advertising; 4657 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; 4658 4659 advmsk = ADVERTISE_ALL; 4660 if (tp->link_config.active_duplex == DUPLEX_FULL) { 4661 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); 4662 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4663 } 4664 4665 if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) 4666 return false; 4667 4668 if ((*lcladv & advmsk) != tgtadv) 4669 return false; 4670 4671 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4672 u32 tg3_ctrl; 4673 4674 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); 4675 4676 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) 4677 return false; 4678 4679 if (tgtadv && 4680 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4681 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) { 4682 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4683 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL | 4684 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 4685 } else { 4686 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); 4687 } 4688 4689 if (tg3_ctrl != tgtadv) 4690 return false; 4691 } 4692 4693 return true; 4694 } 4695 4696 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) 4697 { 4698 u32 lpeth = 0; 4699 4700 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4701 u32 val; 4702 4703 if (tg3_readphy(tp, MII_STAT1000, &val)) 4704 return false; 4705 4706 lpeth = mii_stat1000_to_ethtool_lpa_t(val); 4707 } 4708 4709 if (tg3_readphy(tp, MII_LPA, rmtadv)) 4710 return false; 4711 4712 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); 4713 tp->link_config.rmt_adv = lpeth; 4714 4715 return true; 4716 } 4717 4718 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up) 4719 { 4720 if (curr_link_up != tp->link_up) { 4721 if (curr_link_up) { 4722 netif_carrier_on(tp->dev); 4723 } else { 4724 netif_carrier_off(tp->dev); 4725 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4726 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 4727 } 4728 4729 tg3_link_report(tp); 4730 return true; 4731 } 4732 4733 return false; 4734 } 4735 4736 static void tg3_clear_mac_status(struct tg3 *tp) 4737 { 4738 tw32(MAC_EVENT, 0); 4739 4740 tw32_f(MAC_STATUS, 4741 MAC_STATUS_SYNC_CHANGED | 4742 MAC_STATUS_CFG_CHANGED | 4743 MAC_STATUS_MI_COMPLETION | 4744 MAC_STATUS_LNKSTATE_CHANGED); 4745 udelay(40); 4746 } 4747 4748 static void tg3_setup_eee(struct tg3 *tp) 4749 { 4750 u32 val; 4751 4752 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | 4753 TG3_CPMU_EEE_LNKIDL_UART_IDL; 4754 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 4755 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; 4756 4757 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); 4758 4759 tw32_f(TG3_CPMU_EEE_CTRL, 4760 TG3_CPMU_EEE_CTRL_EXIT_20_1_US); 4761 4762 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | 4763 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) | 4764 TG3_CPMU_EEEMD_LPI_IN_RX | 4765 TG3_CPMU_EEEMD_EEE_ENABLE; 4766 4767 if (tg3_asic_rev(tp) != ASIC_REV_5717) 4768 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; 4769 4770 if (tg3_flag(tp, ENABLE_APE)) 4771 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; 4772 4773 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0); 4774 4775 tw32_f(TG3_CPMU_EEE_DBTMR1, 4776 TG3_CPMU_DBTMR1_PCIEXIT_2047US | 4777 (tp->eee.tx_lpi_timer & 0xffff)); 4778 4779 tw32_f(TG3_CPMU_EEE_DBTMR2, 4780 TG3_CPMU_DBTMR2_APE_TX_2047US | 4781 TG3_CPMU_DBTMR2_TXIDXEQ_2047US); 4782 } 4783 4784 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) 4785 { 4786 bool current_link_up; 4787 u32 bmsr, val; 4788 u32 lcl_adv, rmt_adv; 4789 u32 current_speed; 4790 u8 current_duplex; 4791 int i, err; 4792 4793 tg3_clear_mac_status(tp); 4794 4795 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 4796 tw32_f(MAC_MI_MODE, 4797 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 4798 udelay(80); 4799 } 4800 4801 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); 4802 4803 /* Some third-party PHYs need to be reset on link going 4804 * down. 4805 */ 4806 if ((tg3_asic_rev(tp) == ASIC_REV_5703 || 4807 tg3_asic_rev(tp) == ASIC_REV_5704 || 4808 tg3_asic_rev(tp) == ASIC_REV_5705) && 4809 tp->link_up) { 4810 tg3_readphy(tp, MII_BMSR, &bmsr); 4811 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4812 !(bmsr & BMSR_LSTATUS)) 4813 force_reset = true; 4814 } 4815 if (force_reset) 4816 tg3_phy_reset(tp); 4817 4818 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 4819 tg3_readphy(tp, MII_BMSR, &bmsr); 4820 if (tg3_readphy(tp, MII_BMSR, &bmsr) || 4821 !tg3_flag(tp, INIT_COMPLETE)) 4822 bmsr = 0; 4823 4824 if (!(bmsr & BMSR_LSTATUS)) { 4825 err = tg3_init_5401phy_dsp(tp); 4826 if (err) 4827 return err; 4828 4829 tg3_readphy(tp, MII_BMSR, &bmsr); 4830 for (i = 0; i < 1000; i++) { 4831 udelay(10); 4832 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4833 (bmsr & BMSR_LSTATUS)) { 4834 udelay(40); 4835 break; 4836 } 4837 } 4838 4839 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == 4840 TG3_PHY_REV_BCM5401_B0 && 4841 !(bmsr & BMSR_LSTATUS) && 4842 tp->link_config.active_speed == SPEED_1000) { 4843 err = tg3_phy_reset(tp); 4844 if (!err) 4845 err = tg3_init_5401phy_dsp(tp); 4846 if (err) 4847 return err; 4848 } 4849 } 4850 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4851 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) { 4852 /* 5701 {A0,B0} CRC bug workaround */ 4853 tg3_writephy(tp, 0x15, 0x0a75); 4854 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4855 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 4856 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4857 } 4858 4859 /* Clear pending interrupts... */ 4860 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4861 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4862 4863 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) 4864 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); 4865 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) 4866 tg3_writephy(tp, MII_TG3_IMASK, ~0); 4867 4868 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4869 tg3_asic_rev(tp) == ASIC_REV_5701) { 4870 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) 4871 tg3_writephy(tp, MII_TG3_EXT_CTRL, 4872 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 4873 else 4874 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); 4875 } 4876 4877 current_link_up = false; 4878 current_speed = SPEED_UNKNOWN; 4879 current_duplex = DUPLEX_UNKNOWN; 4880 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; 4881 tp->link_config.rmt_adv = 0; 4882 4883 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { 4884 err = tg3_phy_auxctl_read(tp, 4885 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4886 &val); 4887 if (!err && !(val & (1 << 10))) { 4888 tg3_phy_auxctl_write(tp, 4889 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4890 val | (1 << 10)); 4891 goto relink; 4892 } 4893 } 4894 4895 bmsr = 0; 4896 for (i = 0; i < 100; i++) { 4897 tg3_readphy(tp, MII_BMSR, &bmsr); 4898 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4899 (bmsr & BMSR_LSTATUS)) 4900 break; 4901 udelay(40); 4902 } 4903 4904 if (bmsr & BMSR_LSTATUS) { 4905 u32 aux_stat, bmcr; 4906 4907 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); 4908 for (i = 0; i < 2000; i++) { 4909 udelay(10); 4910 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && 4911 aux_stat) 4912 break; 4913 } 4914 4915 tg3_aux_stat_to_speed_duplex(tp, aux_stat, 4916 ¤t_speed, 4917 ¤t_duplex); 4918 4919 bmcr = 0; 4920 for (i = 0; i < 200; i++) { 4921 tg3_readphy(tp, MII_BMCR, &bmcr); 4922 if (tg3_readphy(tp, MII_BMCR, &bmcr)) 4923 continue; 4924 if (bmcr && bmcr != 0x7fff) 4925 break; 4926 udelay(10); 4927 } 4928 4929 lcl_adv = 0; 4930 rmt_adv = 0; 4931 4932 tp->link_config.active_speed = current_speed; 4933 tp->link_config.active_duplex = current_duplex; 4934 4935 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 4936 bool eee_config_ok = tg3_phy_eee_config_ok(tp); 4937 4938 if ((bmcr & BMCR_ANENABLE) && 4939 eee_config_ok && 4940 tg3_phy_copper_an_config_ok(tp, &lcl_adv) && 4941 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) 4942 current_link_up = true; 4943 4944 /* EEE settings changes take effect only after a phy 4945 * reset. If we have skipped a reset due to Link Flap 4946 * Avoidance being enabled, do it now. 4947 */ 4948 if (!eee_config_ok && 4949 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 4950 !force_reset) { 4951 tg3_setup_eee(tp); 4952 tg3_phy_reset(tp); 4953 } 4954 } else { 4955 if (!(bmcr & BMCR_ANENABLE) && 4956 tp->link_config.speed == current_speed && 4957 tp->link_config.duplex == current_duplex) { 4958 current_link_up = true; 4959 } 4960 } 4961 4962 if (current_link_up && 4963 tp->link_config.active_duplex == DUPLEX_FULL) { 4964 u32 reg, bit; 4965 4966 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4967 reg = MII_TG3_FET_GEN_STAT; 4968 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; 4969 } else { 4970 reg = MII_TG3_EXT_STAT; 4971 bit = MII_TG3_EXT_STAT_MDIX; 4972 } 4973 4974 if (!tg3_readphy(tp, reg, &val) && (val & bit)) 4975 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; 4976 4977 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 4978 } 4979 } 4980 4981 relink: 4982 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4983 tg3_phy_copper_begin(tp); 4984 4985 if (tg3_flag(tp, ROBOSWITCH)) { 4986 current_link_up = true; 4987 /* FIXME: when BCM5325 switch is used use 100 MBit/s */ 4988 current_speed = SPEED_1000; 4989 current_duplex = DUPLEX_FULL; 4990 tp->link_config.active_speed = current_speed; 4991 tp->link_config.active_duplex = current_duplex; 4992 } 4993 4994 tg3_readphy(tp, MII_BMSR, &bmsr); 4995 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || 4996 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 4997 current_link_up = true; 4998 } 4999 5000 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 5001 if (current_link_up) { 5002 if (tp->link_config.active_speed == SPEED_100 || 5003 tp->link_config.active_speed == SPEED_10) 5004 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5005 else 5006 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5007 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) 5008 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5009 else 5010 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5011 5012 /* In order for the 5750 core in BCM4785 chip to work properly 5013 * in RGMII mode, the Led Control Register must be set up. 5014 */ 5015 if (tg3_flag(tp, RGMII_MODE)) { 5016 u32 led_ctrl = tr32(MAC_LED_CTRL); 5017 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON); 5018 5019 if (tp->link_config.active_speed == SPEED_10) 5020 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE; 5021 else if (tp->link_config.active_speed == SPEED_100) 5022 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5023 LED_CTRL_100MBPS_ON); 5024 else if (tp->link_config.active_speed == SPEED_1000) 5025 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5026 LED_CTRL_1000MBPS_ON); 5027 5028 tw32(MAC_LED_CTRL, led_ctrl); 5029 udelay(40); 5030 } 5031 5032 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5033 if (tp->link_config.active_duplex == DUPLEX_HALF) 5034 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5035 5036 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 5037 if (current_link_up && 5038 tg3_5700_link_polarity(tp, tp->link_config.active_speed)) 5039 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 5040 else 5041 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 5042 } 5043 5044 /* ??? Without this setting Netgear GA302T PHY does not 5045 * ??? send/receive packets... 5046 */ 5047 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && 5048 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) { 5049 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; 5050 tw32_f(MAC_MI_MODE, tp->mi_mode); 5051 udelay(80); 5052 } 5053 5054 tw32_f(MAC_MODE, tp->mac_mode); 5055 udelay(40); 5056 5057 tg3_phy_eee_adjust(tp, current_link_up); 5058 5059 if (tg3_flag(tp, USE_LINKCHG_REG)) { 5060 /* Polled via timer. */ 5061 tw32_f(MAC_EVENT, 0); 5062 } else { 5063 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5064 } 5065 udelay(40); 5066 5067 if (tg3_asic_rev(tp) == ASIC_REV_5700 && 5068 current_link_up && 5069 tp->link_config.active_speed == SPEED_1000 && 5070 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { 5071 udelay(120); 5072 tw32_f(MAC_STATUS, 5073 (MAC_STATUS_SYNC_CHANGED | 5074 MAC_STATUS_CFG_CHANGED)); 5075 udelay(40); 5076 tg3_write_mem(tp, 5077 NIC_SRAM_FIRMWARE_MBOX, 5078 NIC_SRAM_FIRMWARE_MBOX_MAGIC2); 5079 } 5080 5081 /* Prevent send BD corruption. */ 5082 if (tg3_flag(tp, CLKREQ_BUG)) { 5083 if (tp->link_config.active_speed == SPEED_100 || 5084 tp->link_config.active_speed == SPEED_10) 5085 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL, 5086 PCI_EXP_LNKCTL_CLKREQ_EN); 5087 else 5088 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 5089 PCI_EXP_LNKCTL_CLKREQ_EN); 5090 } 5091 5092 tg3_test_and_report_link_chg(tp, current_link_up); 5093 5094 return 0; 5095 } 5096 5097 struct tg3_fiber_aneginfo { 5098 int state; 5099 #define ANEG_STATE_UNKNOWN 0 5100 #define ANEG_STATE_AN_ENABLE 1 5101 #define ANEG_STATE_RESTART_INIT 2 5102 #define ANEG_STATE_RESTART 3 5103 #define ANEG_STATE_DISABLE_LINK_OK 4 5104 #define ANEG_STATE_ABILITY_DETECT_INIT 5 5105 #define ANEG_STATE_ABILITY_DETECT 6 5106 #define ANEG_STATE_ACK_DETECT_INIT 7 5107 #define ANEG_STATE_ACK_DETECT 8 5108 #define ANEG_STATE_COMPLETE_ACK_INIT 9 5109 #define ANEG_STATE_COMPLETE_ACK 10 5110 #define ANEG_STATE_IDLE_DETECT_INIT 11 5111 #define ANEG_STATE_IDLE_DETECT 12 5112 #define ANEG_STATE_LINK_OK 13 5113 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 5114 #define ANEG_STATE_NEXT_PAGE_WAIT 15 5115 5116 u32 flags; 5117 #define MR_AN_ENABLE 0x00000001 5118 #define MR_RESTART_AN 0x00000002 5119 #define MR_AN_COMPLETE 0x00000004 5120 #define MR_PAGE_RX 0x00000008 5121 #define MR_NP_LOADED 0x00000010 5122 #define MR_TOGGLE_TX 0x00000020 5123 #define MR_LP_ADV_FULL_DUPLEX 0x00000040 5124 #define MR_LP_ADV_HALF_DUPLEX 0x00000080 5125 #define MR_LP_ADV_SYM_PAUSE 0x00000100 5126 #define MR_LP_ADV_ASYM_PAUSE 0x00000200 5127 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400 5128 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800 5129 #define MR_LP_ADV_NEXT_PAGE 0x00001000 5130 #define MR_TOGGLE_RX 0x00002000 5131 #define MR_NP_RX 0x00004000 5132 5133 #define MR_LINK_OK 0x80000000 5134 5135 unsigned long link_time, cur_time; 5136 5137 u32 ability_match_cfg; 5138 int ability_match_count; 5139 5140 char ability_match, idle_match, ack_match; 5141 5142 u32 txconfig, rxconfig; 5143 #define ANEG_CFG_NP 0x00000080 5144 #define ANEG_CFG_ACK 0x00000040 5145 #define ANEG_CFG_RF2 0x00000020 5146 #define ANEG_CFG_RF1 0x00000010 5147 #define ANEG_CFG_PS2 0x00000001 5148 #define ANEG_CFG_PS1 0x00008000 5149 #define ANEG_CFG_HD 0x00004000 5150 #define ANEG_CFG_FD 0x00002000 5151 #define ANEG_CFG_INVAL 0x00001f06 5152 5153 }; 5154 #define ANEG_OK 0 5155 #define ANEG_DONE 1 5156 #define ANEG_TIMER_ENAB 2 5157 #define ANEG_FAILED -1 5158 5159 #define ANEG_STATE_SETTLE_TIME 10000 5160 5161 static int tg3_fiber_aneg_smachine(struct tg3 *tp, 5162 struct tg3_fiber_aneginfo *ap) 5163 { 5164 u16 flowctrl; 5165 unsigned long delta; 5166 u32 rx_cfg_reg; 5167 int ret; 5168 5169 if (ap->state == ANEG_STATE_UNKNOWN) { 5170 ap->rxconfig = 0; 5171 ap->link_time = 0; 5172 ap->cur_time = 0; 5173 ap->ability_match_cfg = 0; 5174 ap->ability_match_count = 0; 5175 ap->ability_match = 0; 5176 ap->idle_match = 0; 5177 ap->ack_match = 0; 5178 } 5179 ap->cur_time++; 5180 5181 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { 5182 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); 5183 5184 if (rx_cfg_reg != ap->ability_match_cfg) { 5185 ap->ability_match_cfg = rx_cfg_reg; 5186 ap->ability_match = 0; 5187 ap->ability_match_count = 0; 5188 } else { 5189 if (++ap->ability_match_count > 1) { 5190 ap->ability_match = 1; 5191 ap->ability_match_cfg = rx_cfg_reg; 5192 } 5193 } 5194 if (rx_cfg_reg & ANEG_CFG_ACK) 5195 ap->ack_match = 1; 5196 else 5197 ap->ack_match = 0; 5198 5199 ap->idle_match = 0; 5200 } else { 5201 ap->idle_match = 1; 5202 ap->ability_match_cfg = 0; 5203 ap->ability_match_count = 0; 5204 ap->ability_match = 0; 5205 ap->ack_match = 0; 5206 5207 rx_cfg_reg = 0; 5208 } 5209 5210 ap->rxconfig = rx_cfg_reg; 5211 ret = ANEG_OK; 5212 5213 switch (ap->state) { 5214 case ANEG_STATE_UNKNOWN: 5215 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) 5216 ap->state = ANEG_STATE_AN_ENABLE; 5217 5218 /* fall through */ 5219 case ANEG_STATE_AN_ENABLE: 5220 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); 5221 if (ap->flags & MR_AN_ENABLE) { 5222 ap->link_time = 0; 5223 ap->cur_time = 0; 5224 ap->ability_match_cfg = 0; 5225 ap->ability_match_count = 0; 5226 ap->ability_match = 0; 5227 ap->idle_match = 0; 5228 ap->ack_match = 0; 5229 5230 ap->state = ANEG_STATE_RESTART_INIT; 5231 } else { 5232 ap->state = ANEG_STATE_DISABLE_LINK_OK; 5233 } 5234 break; 5235 5236 case ANEG_STATE_RESTART_INIT: 5237 ap->link_time = ap->cur_time; 5238 ap->flags &= ~(MR_NP_LOADED); 5239 ap->txconfig = 0; 5240 tw32(MAC_TX_AUTO_NEG, 0); 5241 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5242 tw32_f(MAC_MODE, tp->mac_mode); 5243 udelay(40); 5244 5245 ret = ANEG_TIMER_ENAB; 5246 ap->state = ANEG_STATE_RESTART; 5247 5248 /* fall through */ 5249 case ANEG_STATE_RESTART: 5250 delta = ap->cur_time - ap->link_time; 5251 if (delta > ANEG_STATE_SETTLE_TIME) 5252 ap->state = ANEG_STATE_ABILITY_DETECT_INIT; 5253 else 5254 ret = ANEG_TIMER_ENAB; 5255 break; 5256 5257 case ANEG_STATE_DISABLE_LINK_OK: 5258 ret = ANEG_DONE; 5259 break; 5260 5261 case ANEG_STATE_ABILITY_DETECT_INIT: 5262 ap->flags &= ~(MR_TOGGLE_TX); 5263 ap->txconfig = ANEG_CFG_FD; 5264 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5265 if (flowctrl & ADVERTISE_1000XPAUSE) 5266 ap->txconfig |= ANEG_CFG_PS1; 5267 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5268 ap->txconfig |= ANEG_CFG_PS2; 5269 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5270 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5271 tw32_f(MAC_MODE, tp->mac_mode); 5272 udelay(40); 5273 5274 ap->state = ANEG_STATE_ABILITY_DETECT; 5275 break; 5276 5277 case ANEG_STATE_ABILITY_DETECT: 5278 if (ap->ability_match != 0 && ap->rxconfig != 0) 5279 ap->state = ANEG_STATE_ACK_DETECT_INIT; 5280 break; 5281 5282 case ANEG_STATE_ACK_DETECT_INIT: 5283 ap->txconfig |= ANEG_CFG_ACK; 5284 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5285 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5286 tw32_f(MAC_MODE, tp->mac_mode); 5287 udelay(40); 5288 5289 ap->state = ANEG_STATE_ACK_DETECT; 5290 5291 /* fall through */ 5292 case ANEG_STATE_ACK_DETECT: 5293 if (ap->ack_match != 0) { 5294 if ((ap->rxconfig & ~ANEG_CFG_ACK) == 5295 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { 5296 ap->state = ANEG_STATE_COMPLETE_ACK_INIT; 5297 } else { 5298 ap->state = ANEG_STATE_AN_ENABLE; 5299 } 5300 } else if (ap->ability_match != 0 && 5301 ap->rxconfig == 0) { 5302 ap->state = ANEG_STATE_AN_ENABLE; 5303 } 5304 break; 5305 5306 case ANEG_STATE_COMPLETE_ACK_INIT: 5307 if (ap->rxconfig & ANEG_CFG_INVAL) { 5308 ret = ANEG_FAILED; 5309 break; 5310 } 5311 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | 5312 MR_LP_ADV_HALF_DUPLEX | 5313 MR_LP_ADV_SYM_PAUSE | 5314 MR_LP_ADV_ASYM_PAUSE | 5315 MR_LP_ADV_REMOTE_FAULT1 | 5316 MR_LP_ADV_REMOTE_FAULT2 | 5317 MR_LP_ADV_NEXT_PAGE | 5318 MR_TOGGLE_RX | 5319 MR_NP_RX); 5320 if (ap->rxconfig & ANEG_CFG_FD) 5321 ap->flags |= MR_LP_ADV_FULL_DUPLEX; 5322 if (ap->rxconfig & ANEG_CFG_HD) 5323 ap->flags |= MR_LP_ADV_HALF_DUPLEX; 5324 if (ap->rxconfig & ANEG_CFG_PS1) 5325 ap->flags |= MR_LP_ADV_SYM_PAUSE; 5326 if (ap->rxconfig & ANEG_CFG_PS2) 5327 ap->flags |= MR_LP_ADV_ASYM_PAUSE; 5328 if (ap->rxconfig & ANEG_CFG_RF1) 5329 ap->flags |= MR_LP_ADV_REMOTE_FAULT1; 5330 if (ap->rxconfig & ANEG_CFG_RF2) 5331 ap->flags |= MR_LP_ADV_REMOTE_FAULT2; 5332 if (ap->rxconfig & ANEG_CFG_NP) 5333 ap->flags |= MR_LP_ADV_NEXT_PAGE; 5334 5335 ap->link_time = ap->cur_time; 5336 5337 ap->flags ^= (MR_TOGGLE_TX); 5338 if (ap->rxconfig & 0x0008) 5339 ap->flags |= MR_TOGGLE_RX; 5340 if (ap->rxconfig & ANEG_CFG_NP) 5341 ap->flags |= MR_NP_RX; 5342 ap->flags |= MR_PAGE_RX; 5343 5344 ap->state = ANEG_STATE_COMPLETE_ACK; 5345 ret = ANEG_TIMER_ENAB; 5346 break; 5347 5348 case ANEG_STATE_COMPLETE_ACK: 5349 if (ap->ability_match != 0 && 5350 ap->rxconfig == 0) { 5351 ap->state = ANEG_STATE_AN_ENABLE; 5352 break; 5353 } 5354 delta = ap->cur_time - ap->link_time; 5355 if (delta > ANEG_STATE_SETTLE_TIME) { 5356 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { 5357 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5358 } else { 5359 if ((ap->txconfig & ANEG_CFG_NP) == 0 && 5360 !(ap->flags & MR_NP_RX)) { 5361 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5362 } else { 5363 ret = ANEG_FAILED; 5364 } 5365 } 5366 } 5367 break; 5368 5369 case ANEG_STATE_IDLE_DETECT_INIT: 5370 ap->link_time = ap->cur_time; 5371 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5372 tw32_f(MAC_MODE, tp->mac_mode); 5373 udelay(40); 5374 5375 ap->state = ANEG_STATE_IDLE_DETECT; 5376 ret = ANEG_TIMER_ENAB; 5377 break; 5378 5379 case ANEG_STATE_IDLE_DETECT: 5380 if (ap->ability_match != 0 && 5381 ap->rxconfig == 0) { 5382 ap->state = ANEG_STATE_AN_ENABLE; 5383 break; 5384 } 5385 delta = ap->cur_time - ap->link_time; 5386 if (delta > ANEG_STATE_SETTLE_TIME) { 5387 /* XXX another gem from the Broadcom driver :( */ 5388 ap->state = ANEG_STATE_LINK_OK; 5389 } 5390 break; 5391 5392 case ANEG_STATE_LINK_OK: 5393 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); 5394 ret = ANEG_DONE; 5395 break; 5396 5397 case ANEG_STATE_NEXT_PAGE_WAIT_INIT: 5398 /* ??? unimplemented */ 5399 break; 5400 5401 case ANEG_STATE_NEXT_PAGE_WAIT: 5402 /* ??? unimplemented */ 5403 break; 5404 5405 default: 5406 ret = ANEG_FAILED; 5407 break; 5408 } 5409 5410 return ret; 5411 } 5412 5413 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) 5414 { 5415 int res = 0; 5416 struct tg3_fiber_aneginfo aninfo; 5417 int status = ANEG_FAILED; 5418 unsigned int tick; 5419 u32 tmp; 5420 5421 tw32_f(MAC_TX_AUTO_NEG, 0); 5422 5423 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 5424 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); 5425 udelay(40); 5426 5427 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); 5428 udelay(40); 5429 5430 memset(&aninfo, 0, sizeof(aninfo)); 5431 aninfo.flags |= MR_AN_ENABLE; 5432 aninfo.state = ANEG_STATE_UNKNOWN; 5433 aninfo.cur_time = 0; 5434 tick = 0; 5435 while (++tick < 195000) { 5436 status = tg3_fiber_aneg_smachine(tp, &aninfo); 5437 if (status == ANEG_DONE || status == ANEG_FAILED) 5438 break; 5439 5440 udelay(1); 5441 } 5442 5443 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5444 tw32_f(MAC_MODE, tp->mac_mode); 5445 udelay(40); 5446 5447 *txflags = aninfo.txconfig; 5448 *rxflags = aninfo.flags; 5449 5450 if (status == ANEG_DONE && 5451 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | 5452 MR_LP_ADV_FULL_DUPLEX))) 5453 res = 1; 5454 5455 return res; 5456 } 5457 5458 static void tg3_init_bcm8002(struct tg3 *tp) 5459 { 5460 u32 mac_status = tr32(MAC_STATUS); 5461 int i; 5462 5463 /* Reset when initting first time or we have a link. */ 5464 if (tg3_flag(tp, INIT_COMPLETE) && 5465 !(mac_status & MAC_STATUS_PCS_SYNCED)) 5466 return; 5467 5468 /* Set PLL lock range. */ 5469 tg3_writephy(tp, 0x16, 0x8007); 5470 5471 /* SW reset */ 5472 tg3_writephy(tp, MII_BMCR, BMCR_RESET); 5473 5474 /* Wait for reset to complete. */ 5475 /* XXX schedule_timeout() ... */ 5476 for (i = 0; i < 500; i++) 5477 udelay(10); 5478 5479 /* Config mode; select PMA/Ch 1 regs. */ 5480 tg3_writephy(tp, 0x10, 0x8411); 5481 5482 /* Enable auto-lock and comdet, select txclk for tx. */ 5483 tg3_writephy(tp, 0x11, 0x0a10); 5484 5485 tg3_writephy(tp, 0x18, 0x00a0); 5486 tg3_writephy(tp, 0x16, 0x41ff); 5487 5488 /* Assert and deassert POR. */ 5489 tg3_writephy(tp, 0x13, 0x0400); 5490 udelay(40); 5491 tg3_writephy(tp, 0x13, 0x0000); 5492 5493 tg3_writephy(tp, 0x11, 0x0a50); 5494 udelay(40); 5495 tg3_writephy(tp, 0x11, 0x0a10); 5496 5497 /* Wait for signal to stabilize */ 5498 /* XXX schedule_timeout() ... */ 5499 for (i = 0; i < 15000; i++) 5500 udelay(10); 5501 5502 /* Deselect the channel register so we can read the PHYID 5503 * later. 5504 */ 5505 tg3_writephy(tp, 0x10, 0x8011); 5506 } 5507 5508 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) 5509 { 5510 u16 flowctrl; 5511 bool current_link_up; 5512 u32 sg_dig_ctrl, sg_dig_status; 5513 u32 serdes_cfg, expected_sg_dig_ctrl; 5514 int workaround, port_a; 5515 5516 serdes_cfg = 0; 5517 expected_sg_dig_ctrl = 0; 5518 workaround = 0; 5519 port_a = 1; 5520 current_link_up = false; 5521 5522 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 && 5523 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) { 5524 workaround = 1; 5525 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 5526 port_a = 0; 5527 5528 /* preserve bits 0-11,13,14 for signal pre-emphasis */ 5529 /* preserve bits 20-23 for voltage regulator */ 5530 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; 5531 } 5532 5533 sg_dig_ctrl = tr32(SG_DIG_CTRL); 5534 5535 if (tp->link_config.autoneg != AUTONEG_ENABLE) { 5536 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { 5537 if (workaround) { 5538 u32 val = serdes_cfg; 5539 5540 if (port_a) 5541 val |= 0xc010000; 5542 else 5543 val |= 0x4010000; 5544 tw32_f(MAC_SERDES_CFG, val); 5545 } 5546 5547 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5548 } 5549 if (mac_status & MAC_STATUS_PCS_SYNCED) { 5550 tg3_setup_flow_control(tp, 0, 0); 5551 current_link_up = true; 5552 } 5553 goto out; 5554 } 5555 5556 /* Want auto-negotiation. */ 5557 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; 5558 5559 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5560 if (flowctrl & ADVERTISE_1000XPAUSE) 5561 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; 5562 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5563 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; 5564 5565 if (sg_dig_ctrl != expected_sg_dig_ctrl) { 5566 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && 5567 tp->serdes_counter && 5568 ((mac_status & (MAC_STATUS_PCS_SYNCED | 5569 MAC_STATUS_RCVD_CFG)) == 5570 MAC_STATUS_PCS_SYNCED)) { 5571 tp->serdes_counter--; 5572 current_link_up = true; 5573 goto out; 5574 } 5575 restart_autoneg: 5576 if (workaround) 5577 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); 5578 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); 5579 udelay(5); 5580 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); 5581 5582 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5583 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5584 } else if (mac_status & (MAC_STATUS_PCS_SYNCED | 5585 MAC_STATUS_SIGNAL_DET)) { 5586 sg_dig_status = tr32(SG_DIG_STATUS); 5587 mac_status = tr32(MAC_STATUS); 5588 5589 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && 5590 (mac_status & MAC_STATUS_PCS_SYNCED)) { 5591 u32 local_adv = 0, remote_adv = 0; 5592 5593 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) 5594 local_adv |= ADVERTISE_1000XPAUSE; 5595 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) 5596 local_adv |= ADVERTISE_1000XPSE_ASYM; 5597 5598 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) 5599 remote_adv |= LPA_1000XPAUSE; 5600 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) 5601 remote_adv |= LPA_1000XPAUSE_ASYM; 5602 5603 tp->link_config.rmt_adv = 5604 mii_adv_to_ethtool_adv_x(remote_adv); 5605 5606 tg3_setup_flow_control(tp, local_adv, remote_adv); 5607 current_link_up = true; 5608 tp->serdes_counter = 0; 5609 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5610 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { 5611 if (tp->serdes_counter) 5612 tp->serdes_counter--; 5613 else { 5614 if (workaround) { 5615 u32 val = serdes_cfg; 5616 5617 if (port_a) 5618 val |= 0xc010000; 5619 else 5620 val |= 0x4010000; 5621 5622 tw32_f(MAC_SERDES_CFG, val); 5623 } 5624 5625 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5626 udelay(40); 5627 5628 /* Link parallel detection - link is up */ 5629 /* only if we have PCS_SYNC and not */ 5630 /* receiving config code words */ 5631 mac_status = tr32(MAC_STATUS); 5632 if ((mac_status & MAC_STATUS_PCS_SYNCED) && 5633 !(mac_status & MAC_STATUS_RCVD_CFG)) { 5634 tg3_setup_flow_control(tp, 0, 0); 5635 current_link_up = true; 5636 tp->phy_flags |= 5637 TG3_PHYFLG_PARALLEL_DETECT; 5638 tp->serdes_counter = 5639 SERDES_PARALLEL_DET_TIMEOUT; 5640 } else 5641 goto restart_autoneg; 5642 } 5643 } 5644 } else { 5645 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5646 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5647 } 5648 5649 out: 5650 return current_link_up; 5651 } 5652 5653 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) 5654 { 5655 bool current_link_up = false; 5656 5657 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) 5658 goto out; 5659 5660 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5661 u32 txflags, rxflags; 5662 int i; 5663 5664 if (fiber_autoneg(tp, &txflags, &rxflags)) { 5665 u32 local_adv = 0, remote_adv = 0; 5666 5667 if (txflags & ANEG_CFG_PS1) 5668 local_adv |= ADVERTISE_1000XPAUSE; 5669 if (txflags & ANEG_CFG_PS2) 5670 local_adv |= ADVERTISE_1000XPSE_ASYM; 5671 5672 if (rxflags & MR_LP_ADV_SYM_PAUSE) 5673 remote_adv |= LPA_1000XPAUSE; 5674 if (rxflags & MR_LP_ADV_ASYM_PAUSE) 5675 remote_adv |= LPA_1000XPAUSE_ASYM; 5676 5677 tp->link_config.rmt_adv = 5678 mii_adv_to_ethtool_adv_x(remote_adv); 5679 5680 tg3_setup_flow_control(tp, local_adv, remote_adv); 5681 5682 current_link_up = true; 5683 } 5684 for (i = 0; i < 30; i++) { 5685 udelay(20); 5686 tw32_f(MAC_STATUS, 5687 (MAC_STATUS_SYNC_CHANGED | 5688 MAC_STATUS_CFG_CHANGED)); 5689 udelay(40); 5690 if ((tr32(MAC_STATUS) & 5691 (MAC_STATUS_SYNC_CHANGED | 5692 MAC_STATUS_CFG_CHANGED)) == 0) 5693 break; 5694 } 5695 5696 mac_status = tr32(MAC_STATUS); 5697 if (!current_link_up && 5698 (mac_status & MAC_STATUS_PCS_SYNCED) && 5699 !(mac_status & MAC_STATUS_RCVD_CFG)) 5700 current_link_up = true; 5701 } else { 5702 tg3_setup_flow_control(tp, 0, 0); 5703 5704 /* Forcing 1000FD link up. */ 5705 current_link_up = true; 5706 5707 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); 5708 udelay(40); 5709 5710 tw32_f(MAC_MODE, tp->mac_mode); 5711 udelay(40); 5712 } 5713 5714 out: 5715 return current_link_up; 5716 } 5717 5718 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) 5719 { 5720 u32 orig_pause_cfg; 5721 u32 orig_active_speed; 5722 u8 orig_active_duplex; 5723 u32 mac_status; 5724 bool current_link_up; 5725 int i; 5726 5727 orig_pause_cfg = tp->link_config.active_flowctrl; 5728 orig_active_speed = tp->link_config.active_speed; 5729 orig_active_duplex = tp->link_config.active_duplex; 5730 5731 if (!tg3_flag(tp, HW_AUTONEG) && 5732 tp->link_up && 5733 tg3_flag(tp, INIT_COMPLETE)) { 5734 mac_status = tr32(MAC_STATUS); 5735 mac_status &= (MAC_STATUS_PCS_SYNCED | 5736 MAC_STATUS_SIGNAL_DET | 5737 MAC_STATUS_CFG_CHANGED | 5738 MAC_STATUS_RCVD_CFG); 5739 if (mac_status == (MAC_STATUS_PCS_SYNCED | 5740 MAC_STATUS_SIGNAL_DET)) { 5741 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5742 MAC_STATUS_CFG_CHANGED)); 5743 return 0; 5744 } 5745 } 5746 5747 tw32_f(MAC_TX_AUTO_NEG, 0); 5748 5749 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 5750 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; 5751 tw32_f(MAC_MODE, tp->mac_mode); 5752 udelay(40); 5753 5754 if (tp->phy_id == TG3_PHY_ID_BCM8002) 5755 tg3_init_bcm8002(tp); 5756 5757 /* Enable link change event even when serdes polling. */ 5758 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5759 udelay(40); 5760 5761 current_link_up = false; 5762 tp->link_config.rmt_adv = 0; 5763 mac_status = tr32(MAC_STATUS); 5764 5765 if (tg3_flag(tp, HW_AUTONEG)) 5766 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); 5767 else 5768 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); 5769 5770 tp->napi[0].hw_status->status = 5771 (SD_STATUS_UPDATED | 5772 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); 5773 5774 for (i = 0; i < 100; i++) { 5775 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5776 MAC_STATUS_CFG_CHANGED)); 5777 udelay(5); 5778 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | 5779 MAC_STATUS_CFG_CHANGED | 5780 MAC_STATUS_LNKSTATE_CHANGED)) == 0) 5781 break; 5782 } 5783 5784 mac_status = tr32(MAC_STATUS); 5785 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { 5786 current_link_up = false; 5787 if (tp->link_config.autoneg == AUTONEG_ENABLE && 5788 tp->serdes_counter == 0) { 5789 tw32_f(MAC_MODE, (tp->mac_mode | 5790 MAC_MODE_SEND_CONFIGS)); 5791 udelay(1); 5792 tw32_f(MAC_MODE, tp->mac_mode); 5793 } 5794 } 5795 5796 if (current_link_up) { 5797 tp->link_config.active_speed = SPEED_1000; 5798 tp->link_config.active_duplex = DUPLEX_FULL; 5799 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5800 LED_CTRL_LNKLED_OVERRIDE | 5801 LED_CTRL_1000MBPS_ON)); 5802 } else { 5803 tp->link_config.active_speed = SPEED_UNKNOWN; 5804 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 5805 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5806 LED_CTRL_LNKLED_OVERRIDE | 5807 LED_CTRL_TRAFFIC_OVERRIDE)); 5808 } 5809 5810 if (!tg3_test_and_report_link_chg(tp, current_link_up)) { 5811 u32 now_pause_cfg = tp->link_config.active_flowctrl; 5812 if (orig_pause_cfg != now_pause_cfg || 5813 orig_active_speed != tp->link_config.active_speed || 5814 orig_active_duplex != tp->link_config.active_duplex) 5815 tg3_link_report(tp); 5816 } 5817 5818 return 0; 5819 } 5820 5821 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) 5822 { 5823 int err = 0; 5824 u32 bmsr, bmcr; 5825 u32 current_speed = SPEED_UNKNOWN; 5826 u8 current_duplex = DUPLEX_UNKNOWN; 5827 bool current_link_up = false; 5828 u32 local_adv, remote_adv, sgsr; 5829 5830 if ((tg3_asic_rev(tp) == ASIC_REV_5719 || 5831 tg3_asic_rev(tp) == ASIC_REV_5720) && 5832 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) && 5833 (sgsr & SERDES_TG3_SGMII_MODE)) { 5834 5835 if (force_reset) 5836 tg3_phy_reset(tp); 5837 5838 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 5839 5840 if (!(sgsr & SERDES_TG3_LINK_UP)) { 5841 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5842 } else { 5843 current_link_up = true; 5844 if (sgsr & SERDES_TG3_SPEED_1000) { 5845 current_speed = SPEED_1000; 5846 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5847 } else if (sgsr & SERDES_TG3_SPEED_100) { 5848 current_speed = SPEED_100; 5849 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5850 } else { 5851 current_speed = SPEED_10; 5852 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5853 } 5854 5855 if (sgsr & SERDES_TG3_FULL_DUPLEX) 5856 current_duplex = DUPLEX_FULL; 5857 else 5858 current_duplex = DUPLEX_HALF; 5859 } 5860 5861 tw32_f(MAC_MODE, tp->mac_mode); 5862 udelay(40); 5863 5864 tg3_clear_mac_status(tp); 5865 5866 goto fiber_setup_done; 5867 } 5868 5869 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5870 tw32_f(MAC_MODE, tp->mac_mode); 5871 udelay(40); 5872 5873 tg3_clear_mac_status(tp); 5874 5875 if (force_reset) 5876 tg3_phy_reset(tp); 5877 5878 tp->link_config.rmt_adv = 0; 5879 5880 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5881 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5882 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5883 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5884 bmsr |= BMSR_LSTATUS; 5885 else 5886 bmsr &= ~BMSR_LSTATUS; 5887 } 5888 5889 err |= tg3_readphy(tp, MII_BMCR, &bmcr); 5890 5891 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && 5892 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 5893 /* do nothing, just check for link up at the end */ 5894 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5895 u32 adv, newadv; 5896 5897 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5898 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | 5899 ADVERTISE_1000XPAUSE | 5900 ADVERTISE_1000XPSE_ASYM | 5901 ADVERTISE_SLCT); 5902 5903 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5904 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); 5905 5906 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { 5907 tg3_writephy(tp, MII_ADVERTISE, newadv); 5908 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; 5909 tg3_writephy(tp, MII_BMCR, bmcr); 5910 5911 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5912 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; 5913 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5914 5915 return err; 5916 } 5917 } else { 5918 u32 new_bmcr; 5919 5920 bmcr &= ~BMCR_SPEED1000; 5921 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); 5922 5923 if (tp->link_config.duplex == DUPLEX_FULL) 5924 new_bmcr |= BMCR_FULLDPLX; 5925 5926 if (new_bmcr != bmcr) { 5927 /* BMCR_SPEED1000 is a reserved bit that needs 5928 * to be set on write. 5929 */ 5930 new_bmcr |= BMCR_SPEED1000; 5931 5932 /* Force a linkdown */ 5933 if (tp->link_up) { 5934 u32 adv; 5935 5936 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5937 adv &= ~(ADVERTISE_1000XFULL | 5938 ADVERTISE_1000XHALF | 5939 ADVERTISE_SLCT); 5940 tg3_writephy(tp, MII_ADVERTISE, adv); 5941 tg3_writephy(tp, MII_BMCR, bmcr | 5942 BMCR_ANRESTART | 5943 BMCR_ANENABLE); 5944 udelay(10); 5945 tg3_carrier_off(tp); 5946 } 5947 tg3_writephy(tp, MII_BMCR, new_bmcr); 5948 bmcr = new_bmcr; 5949 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5950 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5951 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5952 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5953 bmsr |= BMSR_LSTATUS; 5954 else 5955 bmsr &= ~BMSR_LSTATUS; 5956 } 5957 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5958 } 5959 } 5960 5961 if (bmsr & BMSR_LSTATUS) { 5962 current_speed = SPEED_1000; 5963 current_link_up = true; 5964 if (bmcr & BMCR_FULLDPLX) 5965 current_duplex = DUPLEX_FULL; 5966 else 5967 current_duplex = DUPLEX_HALF; 5968 5969 local_adv = 0; 5970 remote_adv = 0; 5971 5972 if (bmcr & BMCR_ANENABLE) { 5973 u32 common; 5974 5975 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); 5976 err |= tg3_readphy(tp, MII_LPA, &remote_adv); 5977 common = local_adv & remote_adv; 5978 if (common & (ADVERTISE_1000XHALF | 5979 ADVERTISE_1000XFULL)) { 5980 if (common & ADVERTISE_1000XFULL) 5981 current_duplex = DUPLEX_FULL; 5982 else 5983 current_duplex = DUPLEX_HALF; 5984 5985 tp->link_config.rmt_adv = 5986 mii_adv_to_ethtool_adv_x(remote_adv); 5987 } else if (!tg3_flag(tp, 5780_CLASS)) { 5988 /* Link is up via parallel detect */ 5989 } else { 5990 current_link_up = false; 5991 } 5992 } 5993 } 5994 5995 fiber_setup_done: 5996 if (current_link_up && current_duplex == DUPLEX_FULL) 5997 tg3_setup_flow_control(tp, local_adv, remote_adv); 5998 5999 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 6000 if (tp->link_config.active_duplex == DUPLEX_HALF) 6001 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 6002 6003 tw32_f(MAC_MODE, tp->mac_mode); 6004 udelay(40); 6005 6006 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 6007 6008 tp->link_config.active_speed = current_speed; 6009 tp->link_config.active_duplex = current_duplex; 6010 6011 tg3_test_and_report_link_chg(tp, current_link_up); 6012 return err; 6013 } 6014 6015 static void tg3_serdes_parallel_detect(struct tg3 *tp) 6016 { 6017 if (tp->serdes_counter) { 6018 /* Give autoneg time to complete. */ 6019 tp->serdes_counter--; 6020 return; 6021 } 6022 6023 if (!tp->link_up && 6024 (tp->link_config.autoneg == AUTONEG_ENABLE)) { 6025 u32 bmcr; 6026 6027 tg3_readphy(tp, MII_BMCR, &bmcr); 6028 if (bmcr & BMCR_ANENABLE) { 6029 u32 phy1, phy2; 6030 6031 /* Select shadow register 0x1f */ 6032 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); 6033 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); 6034 6035 /* Select expansion interrupt status register */ 6036 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6037 MII_TG3_DSP_EXP1_INT_STAT); 6038 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6039 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6040 6041 if ((phy1 & 0x10) && !(phy2 & 0x20)) { 6042 /* We have signal detect and not receiving 6043 * config code words, link is up by parallel 6044 * detection. 6045 */ 6046 6047 bmcr &= ~BMCR_ANENABLE; 6048 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; 6049 tg3_writephy(tp, MII_BMCR, bmcr); 6050 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; 6051 } 6052 } 6053 } else if (tp->link_up && 6054 (tp->link_config.autoneg == AUTONEG_ENABLE) && 6055 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 6056 u32 phy2; 6057 6058 /* Select expansion interrupt status register */ 6059 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6060 MII_TG3_DSP_EXP1_INT_STAT); 6061 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6062 if (phy2 & 0x20) { 6063 u32 bmcr; 6064 6065 /* Config code words received, turn on autoneg. */ 6066 tg3_readphy(tp, MII_BMCR, &bmcr); 6067 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); 6068 6069 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 6070 6071 } 6072 } 6073 } 6074 6075 static int tg3_setup_phy(struct tg3 *tp, bool force_reset) 6076 { 6077 u32 val; 6078 int err; 6079 6080 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 6081 err = tg3_setup_fiber_phy(tp, force_reset); 6082 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 6083 err = tg3_setup_fiber_mii_phy(tp, force_reset); 6084 else 6085 err = tg3_setup_copper_phy(tp, force_reset); 6086 6087 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 6088 u32 scale; 6089 6090 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; 6091 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) 6092 scale = 65; 6093 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) 6094 scale = 6; 6095 else 6096 scale = 12; 6097 6098 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; 6099 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); 6100 tw32(GRC_MISC_CFG, val); 6101 } 6102 6103 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 6104 (6 << TX_LENGTHS_IPG_SHIFT); 6105 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 6106 tg3_asic_rev(tp) == ASIC_REV_5762) 6107 val |= tr32(MAC_TX_LENGTHS) & 6108 (TX_LENGTHS_JMB_FRM_LEN_MSK | 6109 TX_LENGTHS_CNT_DWN_VAL_MSK); 6110 6111 if (tp->link_config.active_speed == SPEED_1000 && 6112 tp->link_config.active_duplex == DUPLEX_HALF) 6113 tw32(MAC_TX_LENGTHS, val | 6114 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); 6115 else 6116 tw32(MAC_TX_LENGTHS, val | 6117 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); 6118 6119 if (!tg3_flag(tp, 5705_PLUS)) { 6120 if (tp->link_up) { 6121 tw32(HOSTCC_STAT_COAL_TICKS, 6122 tp->coal.stats_block_coalesce_usecs); 6123 } else { 6124 tw32(HOSTCC_STAT_COAL_TICKS, 0); 6125 } 6126 } 6127 6128 if (tg3_flag(tp, ASPM_WORKAROUND)) { 6129 val = tr32(PCIE_PWR_MGMT_THRESH); 6130 if (!tp->link_up) 6131 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | 6132 tp->pwrmgmt_thresh; 6133 else 6134 val |= PCIE_PWR_MGMT_L1_THRESH_MSK; 6135 tw32(PCIE_PWR_MGMT_THRESH, val); 6136 } 6137 6138 return err; 6139 } 6140 6141 /* tp->lock must be held */ 6142 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts) 6143 { 6144 u64 stamp; 6145 6146 ptp_read_system_prets(sts); 6147 stamp = tr32(TG3_EAV_REF_CLCK_LSB); 6148 ptp_read_system_postts(sts); 6149 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32; 6150 6151 return stamp; 6152 } 6153 6154 /* tp->lock must be held */ 6155 static void tg3_refclk_write(struct tg3 *tp, u64 newval) 6156 { 6157 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6158 6159 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP); 6160 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff); 6161 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32); 6162 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME); 6163 } 6164 6165 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); 6166 static inline void tg3_full_unlock(struct tg3 *tp); 6167 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) 6168 { 6169 struct tg3 *tp = netdev_priv(dev); 6170 6171 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 6172 SOF_TIMESTAMPING_RX_SOFTWARE | 6173 SOF_TIMESTAMPING_SOFTWARE; 6174 6175 if (tg3_flag(tp, PTP_CAPABLE)) { 6176 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 6177 SOF_TIMESTAMPING_RX_HARDWARE | 6178 SOF_TIMESTAMPING_RAW_HARDWARE; 6179 } 6180 6181 if (tp->ptp_clock) 6182 info->phc_index = ptp_clock_index(tp->ptp_clock); 6183 else 6184 info->phc_index = -1; 6185 6186 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 6187 6188 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 6189 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 6190 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 6191 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 6192 return 0; 6193 } 6194 6195 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 6196 { 6197 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6198 bool neg_adj = false; 6199 u32 correction = 0; 6200 6201 if (ppb < 0) { 6202 neg_adj = true; 6203 ppb = -ppb; 6204 } 6205 6206 /* Frequency adjustment is performed using hardware with a 24 bit 6207 * accumulator and a programmable correction value. On each clk, the 6208 * correction value gets added to the accumulator and when it 6209 * overflows, the time counter is incremented/decremented. 6210 * 6211 * So conversion from ppb to correction value is 6212 * ppb * (1 << 24) / 1000000000 6213 */ 6214 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) & 6215 TG3_EAV_REF_CLK_CORRECT_MASK; 6216 6217 tg3_full_lock(tp, 0); 6218 6219 if (correction) 6220 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 6221 TG3_EAV_REF_CLK_CORRECT_EN | 6222 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction); 6223 else 6224 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0); 6225 6226 tg3_full_unlock(tp); 6227 6228 return 0; 6229 } 6230 6231 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 6232 { 6233 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6234 6235 tg3_full_lock(tp, 0); 6236 tp->ptp_adjust += delta; 6237 tg3_full_unlock(tp); 6238 6239 return 0; 6240 } 6241 6242 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, 6243 struct ptp_system_timestamp *sts) 6244 { 6245 u64 ns; 6246 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6247 6248 tg3_full_lock(tp, 0); 6249 ns = tg3_refclk_read(tp, sts); 6250 ns += tp->ptp_adjust; 6251 tg3_full_unlock(tp); 6252 6253 *ts = ns_to_timespec64(ns); 6254 6255 return 0; 6256 } 6257 6258 static int tg3_ptp_settime(struct ptp_clock_info *ptp, 6259 const struct timespec64 *ts) 6260 { 6261 u64 ns; 6262 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6263 6264 ns = timespec64_to_ns(ts); 6265 6266 tg3_full_lock(tp, 0); 6267 tg3_refclk_write(tp, ns); 6268 tp->ptp_adjust = 0; 6269 tg3_full_unlock(tp); 6270 6271 return 0; 6272 } 6273 6274 static int tg3_ptp_enable(struct ptp_clock_info *ptp, 6275 struct ptp_clock_request *rq, int on) 6276 { 6277 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6278 u32 clock_ctl; 6279 int rval = 0; 6280 6281 switch (rq->type) { 6282 case PTP_CLK_REQ_PEROUT: 6283 /* Reject requests with unsupported flags */ 6284 if (rq->perout.flags) 6285 return -EOPNOTSUPP; 6286 6287 if (rq->perout.index != 0) 6288 return -EINVAL; 6289 6290 tg3_full_lock(tp, 0); 6291 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6292 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK; 6293 6294 if (on) { 6295 u64 nsec; 6296 6297 nsec = rq->perout.start.sec * 1000000000ULL + 6298 rq->perout.start.nsec; 6299 6300 if (rq->perout.period.sec || rq->perout.period.nsec) { 6301 netdev_warn(tp->dev, 6302 "Device supports only a one-shot timesync output, period must be 0\n"); 6303 rval = -EINVAL; 6304 goto err_out; 6305 } 6306 6307 if (nsec & (1ULL << 63)) { 6308 netdev_warn(tp->dev, 6309 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n"); 6310 rval = -EINVAL; 6311 goto err_out; 6312 } 6313 6314 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff)); 6315 tw32(TG3_EAV_WATCHDOG0_MSB, 6316 TG3_EAV_WATCHDOG0_EN | 6317 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK)); 6318 6319 tw32(TG3_EAV_REF_CLCK_CTL, 6320 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0); 6321 } else { 6322 tw32(TG3_EAV_WATCHDOG0_MSB, 0); 6323 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl); 6324 } 6325 6326 err_out: 6327 tg3_full_unlock(tp); 6328 return rval; 6329 6330 default: 6331 break; 6332 } 6333 6334 return -EOPNOTSUPP; 6335 } 6336 6337 static const struct ptp_clock_info tg3_ptp_caps = { 6338 .owner = THIS_MODULE, 6339 .name = "tg3 clock", 6340 .max_adj = 250000000, 6341 .n_alarm = 0, 6342 .n_ext_ts = 0, 6343 .n_per_out = 1, 6344 .n_pins = 0, 6345 .pps = 0, 6346 .adjfreq = tg3_ptp_adjfreq, 6347 .adjtime = tg3_ptp_adjtime, 6348 .gettimex64 = tg3_ptp_gettimex, 6349 .settime64 = tg3_ptp_settime, 6350 .enable = tg3_ptp_enable, 6351 }; 6352 6353 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, 6354 struct skb_shared_hwtstamps *timestamp) 6355 { 6356 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); 6357 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + 6358 tp->ptp_adjust); 6359 } 6360 6361 /* tp->lock must be held */ 6362 static void tg3_ptp_init(struct tg3 *tp) 6363 { 6364 if (!tg3_flag(tp, PTP_CAPABLE)) 6365 return; 6366 6367 /* Initialize the hardware clock to the system time. */ 6368 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real())); 6369 tp->ptp_adjust = 0; 6370 tp->ptp_info = tg3_ptp_caps; 6371 } 6372 6373 /* tp->lock must be held */ 6374 static void tg3_ptp_resume(struct tg3 *tp) 6375 { 6376 if (!tg3_flag(tp, PTP_CAPABLE)) 6377 return; 6378 6379 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust); 6380 tp->ptp_adjust = 0; 6381 } 6382 6383 static void tg3_ptp_fini(struct tg3 *tp) 6384 { 6385 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock) 6386 return; 6387 6388 ptp_clock_unregister(tp->ptp_clock); 6389 tp->ptp_clock = NULL; 6390 tp->ptp_adjust = 0; 6391 } 6392 6393 static inline int tg3_irq_sync(struct tg3 *tp) 6394 { 6395 return tp->irq_sync; 6396 } 6397 6398 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) 6399 { 6400 int i; 6401 6402 dst = (u32 *)((u8 *)dst + off); 6403 for (i = 0; i < len; i += sizeof(u32)) 6404 *dst++ = tr32(off + i); 6405 } 6406 6407 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) 6408 { 6409 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); 6410 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); 6411 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); 6412 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); 6413 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); 6414 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); 6415 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); 6416 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); 6417 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); 6418 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); 6419 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); 6420 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); 6421 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); 6422 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); 6423 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); 6424 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); 6425 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); 6426 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); 6427 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); 6428 6429 if (tg3_flag(tp, SUPPORT_MSIX)) 6430 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); 6431 6432 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); 6433 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); 6434 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); 6435 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); 6436 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); 6437 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); 6438 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); 6439 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); 6440 6441 if (!tg3_flag(tp, 5705_PLUS)) { 6442 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); 6443 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); 6444 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); 6445 } 6446 6447 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); 6448 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); 6449 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); 6450 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); 6451 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); 6452 6453 if (tg3_flag(tp, NVRAM)) 6454 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); 6455 } 6456 6457 static void tg3_dump_state(struct tg3 *tp) 6458 { 6459 int i; 6460 u32 *regs; 6461 6462 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); 6463 if (!regs) 6464 return; 6465 6466 if (tg3_flag(tp, PCI_EXPRESS)) { 6467 /* Read up to but not including private PCI registers */ 6468 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) 6469 regs[i / sizeof(u32)] = tr32(i); 6470 } else 6471 tg3_dump_legacy_regs(tp, regs); 6472 6473 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { 6474 if (!regs[i + 0] && !regs[i + 1] && 6475 !regs[i + 2] && !regs[i + 3]) 6476 continue; 6477 6478 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", 6479 i * 4, 6480 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); 6481 } 6482 6483 kfree(regs); 6484 6485 for (i = 0; i < tp->irq_cnt; i++) { 6486 struct tg3_napi *tnapi = &tp->napi[i]; 6487 6488 /* SW status block */ 6489 netdev_err(tp->dev, 6490 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", 6491 i, 6492 tnapi->hw_status->status, 6493 tnapi->hw_status->status_tag, 6494 tnapi->hw_status->rx_jumbo_consumer, 6495 tnapi->hw_status->rx_consumer, 6496 tnapi->hw_status->rx_mini_consumer, 6497 tnapi->hw_status->idx[0].rx_producer, 6498 tnapi->hw_status->idx[0].tx_consumer); 6499 6500 netdev_err(tp->dev, 6501 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", 6502 i, 6503 tnapi->last_tag, tnapi->last_irq_tag, 6504 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, 6505 tnapi->rx_rcb_ptr, 6506 tnapi->prodring.rx_std_prod_idx, 6507 tnapi->prodring.rx_std_cons_idx, 6508 tnapi->prodring.rx_jmb_prod_idx, 6509 tnapi->prodring.rx_jmb_cons_idx); 6510 } 6511 } 6512 6513 /* This is called whenever we suspect that the system chipset is re- 6514 * ordering the sequence of MMIO to the tx send mailbox. The symptom 6515 * is bogus tx completions. We try to recover by setting the 6516 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later 6517 * in the workqueue. 6518 */ 6519 static void tg3_tx_recover(struct tg3 *tp) 6520 { 6521 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || 6522 tp->write32_tx_mbox == tg3_write_indirect_mbox); 6523 6524 netdev_warn(tp->dev, 6525 "The system may be re-ordering memory-mapped I/O " 6526 "cycles to the network device, attempting to recover. " 6527 "Please report the problem to the driver maintainer " 6528 "and include system chipset information.\n"); 6529 6530 tg3_flag_set(tp, TX_RECOVERY_PENDING); 6531 } 6532 6533 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) 6534 { 6535 /* Tell compiler to fetch tx indices from memory. */ 6536 barrier(); 6537 return tnapi->tx_pending - 6538 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); 6539 } 6540 6541 /* Tigon3 never reports partial packet sends. So we do not 6542 * need special logic to handle SKBs that have not had all 6543 * of their frags sent yet, like SunGEM does. 6544 */ 6545 static void tg3_tx(struct tg3_napi *tnapi) 6546 { 6547 struct tg3 *tp = tnapi->tp; 6548 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; 6549 u32 sw_idx = tnapi->tx_cons; 6550 struct netdev_queue *txq; 6551 int index = tnapi - tp->napi; 6552 unsigned int pkts_compl = 0, bytes_compl = 0; 6553 6554 if (tg3_flag(tp, ENABLE_TSS)) 6555 index--; 6556 6557 txq = netdev_get_tx_queue(tp->dev, index); 6558 6559 while (sw_idx != hw_idx) { 6560 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; 6561 struct sk_buff *skb = ri->skb; 6562 int i, tx_bug = 0; 6563 6564 if (unlikely(skb == NULL)) { 6565 tg3_tx_recover(tp); 6566 return; 6567 } 6568 6569 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) { 6570 struct skb_shared_hwtstamps timestamp; 6571 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB); 6572 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; 6573 6574 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); 6575 6576 skb_tstamp_tx(skb, ×tamp); 6577 } 6578 6579 pci_unmap_single(tp->pdev, 6580 dma_unmap_addr(ri, mapping), 6581 skb_headlen(skb), 6582 PCI_DMA_TODEVICE); 6583 6584 ri->skb = NULL; 6585 6586 while (ri->fragmented) { 6587 ri->fragmented = false; 6588 sw_idx = NEXT_TX(sw_idx); 6589 ri = &tnapi->tx_buffers[sw_idx]; 6590 } 6591 6592 sw_idx = NEXT_TX(sw_idx); 6593 6594 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 6595 ri = &tnapi->tx_buffers[sw_idx]; 6596 if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) 6597 tx_bug = 1; 6598 6599 pci_unmap_page(tp->pdev, 6600 dma_unmap_addr(ri, mapping), 6601 skb_frag_size(&skb_shinfo(skb)->frags[i]), 6602 PCI_DMA_TODEVICE); 6603 6604 while (ri->fragmented) { 6605 ri->fragmented = false; 6606 sw_idx = NEXT_TX(sw_idx); 6607 ri = &tnapi->tx_buffers[sw_idx]; 6608 } 6609 6610 sw_idx = NEXT_TX(sw_idx); 6611 } 6612 6613 pkts_compl++; 6614 bytes_compl += skb->len; 6615 6616 dev_consume_skb_any(skb); 6617 6618 if (unlikely(tx_bug)) { 6619 tg3_tx_recover(tp); 6620 return; 6621 } 6622 } 6623 6624 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); 6625 6626 tnapi->tx_cons = sw_idx; 6627 6628 /* Need to make the tx_cons update visible to tg3_start_xmit() 6629 * before checking for netif_queue_stopped(). Without the 6630 * memory barrier, there is a small possibility that tg3_start_xmit() 6631 * will miss it and cause the queue to be stopped forever. 6632 */ 6633 smp_mb(); 6634 6635 if (unlikely(netif_tx_queue_stopped(txq) && 6636 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { 6637 __netif_tx_lock(txq, smp_processor_id()); 6638 if (netif_tx_queue_stopped(txq) && 6639 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) 6640 netif_tx_wake_queue(txq); 6641 __netif_tx_unlock(txq); 6642 } 6643 } 6644 6645 static void tg3_frag_free(bool is_frag, void *data) 6646 { 6647 if (is_frag) 6648 skb_free_frag(data); 6649 else 6650 kfree(data); 6651 } 6652 6653 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) 6654 { 6655 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) + 6656 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6657 6658 if (!ri->data) 6659 return; 6660 6661 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), 6662 map_sz, PCI_DMA_FROMDEVICE); 6663 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data); 6664 ri->data = NULL; 6665 } 6666 6667 6668 /* Returns size of skb allocated or < 0 on error. 6669 * 6670 * We only need to fill in the address because the other members 6671 * of the RX descriptor are invariant, see tg3_init_rings. 6672 * 6673 * Note the purposeful assymetry of cpu vs. chip accesses. For 6674 * posting buffers we only dirty the first cache line of the RX 6675 * descriptor (containing the address). Whereas for the RX status 6676 * buffers the cpu only reads the last cacheline of the RX descriptor 6677 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 6678 */ 6679 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, 6680 u32 opaque_key, u32 dest_idx_unmasked, 6681 unsigned int *frag_size) 6682 { 6683 struct tg3_rx_buffer_desc *desc; 6684 struct ring_info *map; 6685 u8 *data; 6686 dma_addr_t mapping; 6687 int skb_size, data_size, dest_idx; 6688 6689 switch (opaque_key) { 6690 case RXD_OPAQUE_RING_STD: 6691 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6692 desc = &tpr->rx_std[dest_idx]; 6693 map = &tpr->rx_std_buffers[dest_idx]; 6694 data_size = tp->rx_pkt_map_sz; 6695 break; 6696 6697 case RXD_OPAQUE_RING_JUMBO: 6698 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6699 desc = &tpr->rx_jmb[dest_idx].std; 6700 map = &tpr->rx_jmb_buffers[dest_idx]; 6701 data_size = TG3_RX_JMB_MAP_SZ; 6702 break; 6703 6704 default: 6705 return -EINVAL; 6706 } 6707 6708 /* Do not overwrite any of the map or rp information 6709 * until we are sure we can commit to a new buffer. 6710 * 6711 * Callers depend upon this behavior and assume that 6712 * we leave everything unchanged if we fail. 6713 */ 6714 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + 6715 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6716 if (skb_size <= PAGE_SIZE) { 6717 data = napi_alloc_frag(skb_size); 6718 *frag_size = skb_size; 6719 } else { 6720 data = kmalloc(skb_size, GFP_ATOMIC); 6721 *frag_size = 0; 6722 } 6723 if (!data) 6724 return -ENOMEM; 6725 6726 mapping = pci_map_single(tp->pdev, 6727 data + TG3_RX_OFFSET(tp), 6728 data_size, 6729 PCI_DMA_FROMDEVICE); 6730 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) { 6731 tg3_frag_free(skb_size <= PAGE_SIZE, data); 6732 return -EIO; 6733 } 6734 6735 map->data = data; 6736 dma_unmap_addr_set(map, mapping, mapping); 6737 6738 desc->addr_hi = ((u64)mapping >> 32); 6739 desc->addr_lo = ((u64)mapping & 0xffffffff); 6740 6741 return data_size; 6742 } 6743 6744 /* We only need to move over in the address because the other 6745 * members of the RX descriptor are invariant. See notes above 6746 * tg3_alloc_rx_data for full details. 6747 */ 6748 static void tg3_recycle_rx(struct tg3_napi *tnapi, 6749 struct tg3_rx_prodring_set *dpr, 6750 u32 opaque_key, int src_idx, 6751 u32 dest_idx_unmasked) 6752 { 6753 struct tg3 *tp = tnapi->tp; 6754 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 6755 struct ring_info *src_map, *dest_map; 6756 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; 6757 int dest_idx; 6758 6759 switch (opaque_key) { 6760 case RXD_OPAQUE_RING_STD: 6761 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6762 dest_desc = &dpr->rx_std[dest_idx]; 6763 dest_map = &dpr->rx_std_buffers[dest_idx]; 6764 src_desc = &spr->rx_std[src_idx]; 6765 src_map = &spr->rx_std_buffers[src_idx]; 6766 break; 6767 6768 case RXD_OPAQUE_RING_JUMBO: 6769 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6770 dest_desc = &dpr->rx_jmb[dest_idx].std; 6771 dest_map = &dpr->rx_jmb_buffers[dest_idx]; 6772 src_desc = &spr->rx_jmb[src_idx].std; 6773 src_map = &spr->rx_jmb_buffers[src_idx]; 6774 break; 6775 6776 default: 6777 return; 6778 } 6779 6780 dest_map->data = src_map->data; 6781 dma_unmap_addr_set(dest_map, mapping, 6782 dma_unmap_addr(src_map, mapping)); 6783 dest_desc->addr_hi = src_desc->addr_hi; 6784 dest_desc->addr_lo = src_desc->addr_lo; 6785 6786 /* Ensure that the update to the skb happens after the physical 6787 * addresses have been transferred to the new BD location. 6788 */ 6789 smp_wmb(); 6790 6791 src_map->data = NULL; 6792 } 6793 6794 /* The RX ring scheme is composed of multiple rings which post fresh 6795 * buffers to the chip, and one special ring the chip uses to report 6796 * status back to the host. 6797 * 6798 * The special ring reports the status of received packets to the 6799 * host. The chip does not write into the original descriptor the 6800 * RX buffer was obtained from. The chip simply takes the original 6801 * descriptor as provided by the host, updates the status and length 6802 * field, then writes this into the next status ring entry. 6803 * 6804 * Each ring the host uses to post buffers to the chip is described 6805 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, 6806 * it is first placed into the on-chip ram. When the packet's length 6807 * is known, it walks down the TG3_BDINFO entries to select the ring. 6808 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO 6809 * which is within the range of the new packet's length is chosen. 6810 * 6811 * The "separate ring for rx status" scheme may sound queer, but it makes 6812 * sense from a cache coherency perspective. If only the host writes 6813 * to the buffer post rings, and only the chip writes to the rx status 6814 * rings, then cache lines never move beyond shared-modified state. 6815 * If both the host and chip were to write into the same ring, cache line 6816 * eviction could occur since both entities want it in an exclusive state. 6817 */ 6818 static int tg3_rx(struct tg3_napi *tnapi, int budget) 6819 { 6820 struct tg3 *tp = tnapi->tp; 6821 u32 work_mask, rx_std_posted = 0; 6822 u32 std_prod_idx, jmb_prod_idx; 6823 u32 sw_idx = tnapi->rx_rcb_ptr; 6824 u16 hw_idx; 6825 int received; 6826 struct tg3_rx_prodring_set *tpr = &tnapi->prodring; 6827 6828 hw_idx = *(tnapi->rx_rcb_prod_idx); 6829 /* 6830 * We need to order the read of hw_idx and the read of 6831 * the opaque cookie. 6832 */ 6833 rmb(); 6834 work_mask = 0; 6835 received = 0; 6836 std_prod_idx = tpr->rx_std_prod_idx; 6837 jmb_prod_idx = tpr->rx_jmb_prod_idx; 6838 while (sw_idx != hw_idx && budget > 0) { 6839 struct ring_info *ri; 6840 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; 6841 unsigned int len; 6842 struct sk_buff *skb; 6843 dma_addr_t dma_addr; 6844 u32 opaque_key, desc_idx, *post_ptr; 6845 u8 *data; 6846 u64 tstamp = 0; 6847 6848 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 6849 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 6850 if (opaque_key == RXD_OPAQUE_RING_STD) { 6851 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; 6852 dma_addr = dma_unmap_addr(ri, mapping); 6853 data = ri->data; 6854 post_ptr = &std_prod_idx; 6855 rx_std_posted++; 6856 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 6857 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; 6858 dma_addr = dma_unmap_addr(ri, mapping); 6859 data = ri->data; 6860 post_ptr = &jmb_prod_idx; 6861 } else 6862 goto next_pkt_nopost; 6863 6864 work_mask |= opaque_key; 6865 6866 if (desc->err_vlan & RXD_ERR_MASK) { 6867 drop_it: 6868 tg3_recycle_rx(tnapi, tpr, opaque_key, 6869 desc_idx, *post_ptr); 6870 drop_it_no_recycle: 6871 /* Other statistics kept track of by card. */ 6872 tp->rx_dropped++; 6873 goto next_pkt; 6874 } 6875 6876 prefetch(data + TG3_RX_OFFSET(tp)); 6877 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 6878 ETH_FCS_LEN; 6879 6880 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6881 RXD_FLAG_PTPSTAT_PTPV1 || 6882 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6883 RXD_FLAG_PTPSTAT_PTPV2) { 6884 tstamp = tr32(TG3_RX_TSTAMP_LSB); 6885 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32; 6886 } 6887 6888 if (len > TG3_RX_COPY_THRESH(tp)) { 6889 int skb_size; 6890 unsigned int frag_size; 6891 6892 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, 6893 *post_ptr, &frag_size); 6894 if (skb_size < 0) 6895 goto drop_it; 6896 6897 pci_unmap_single(tp->pdev, dma_addr, skb_size, 6898 PCI_DMA_FROMDEVICE); 6899 6900 /* Ensure that the update to the data happens 6901 * after the usage of the old DMA mapping. 6902 */ 6903 smp_wmb(); 6904 6905 ri->data = NULL; 6906 6907 skb = build_skb(data, frag_size); 6908 if (!skb) { 6909 tg3_frag_free(frag_size != 0, data); 6910 goto drop_it_no_recycle; 6911 } 6912 skb_reserve(skb, TG3_RX_OFFSET(tp)); 6913 } else { 6914 tg3_recycle_rx(tnapi, tpr, opaque_key, 6915 desc_idx, *post_ptr); 6916 6917 skb = netdev_alloc_skb(tp->dev, 6918 len + TG3_RAW_IP_ALIGN); 6919 if (skb == NULL) 6920 goto drop_it_no_recycle; 6921 6922 skb_reserve(skb, TG3_RAW_IP_ALIGN); 6923 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 6924 memcpy(skb->data, 6925 data + TG3_RX_OFFSET(tp), 6926 len); 6927 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 6928 } 6929 6930 skb_put(skb, len); 6931 if (tstamp) 6932 tg3_hwclock_to_timestamp(tp, tstamp, 6933 skb_hwtstamps(skb)); 6934 6935 if ((tp->dev->features & NETIF_F_RXCSUM) && 6936 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 6937 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 6938 >> RXD_TCPCSUM_SHIFT) == 0xffff)) 6939 skb->ip_summed = CHECKSUM_UNNECESSARY; 6940 else 6941 skb_checksum_none_assert(skb); 6942 6943 skb->protocol = eth_type_trans(skb, tp->dev); 6944 6945 if (len > (tp->dev->mtu + ETH_HLEN) && 6946 skb->protocol != htons(ETH_P_8021Q) && 6947 skb->protocol != htons(ETH_P_8021AD)) { 6948 dev_kfree_skb_any(skb); 6949 goto drop_it_no_recycle; 6950 } 6951 6952 if (desc->type_flags & RXD_FLAG_VLAN && 6953 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) 6954 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 6955 desc->err_vlan & RXD_VLAN_MASK); 6956 6957 napi_gro_receive(&tnapi->napi, skb); 6958 6959 received++; 6960 budget--; 6961 6962 next_pkt: 6963 (*post_ptr)++; 6964 6965 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 6966 tpr->rx_std_prod_idx = std_prod_idx & 6967 tp->rx_std_ring_mask; 6968 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 6969 tpr->rx_std_prod_idx); 6970 work_mask &= ~RXD_OPAQUE_RING_STD; 6971 rx_std_posted = 0; 6972 } 6973 next_pkt_nopost: 6974 sw_idx++; 6975 sw_idx &= tp->rx_ret_ring_mask; 6976 6977 /* Refresh hw_idx to see if there is new work */ 6978 if (sw_idx == hw_idx) { 6979 hw_idx = *(tnapi->rx_rcb_prod_idx); 6980 rmb(); 6981 } 6982 } 6983 6984 /* ACK the status ring. */ 6985 tnapi->rx_rcb_ptr = sw_idx; 6986 tw32_rx_mbox(tnapi->consmbox, sw_idx); 6987 6988 /* Refill RX ring(s). */ 6989 if (!tg3_flag(tp, ENABLE_RSS)) { 6990 /* Sync BD data before updating mailbox */ 6991 wmb(); 6992 6993 if (work_mask & RXD_OPAQUE_RING_STD) { 6994 tpr->rx_std_prod_idx = std_prod_idx & 6995 tp->rx_std_ring_mask; 6996 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 6997 tpr->rx_std_prod_idx); 6998 } 6999 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 7000 tpr->rx_jmb_prod_idx = jmb_prod_idx & 7001 tp->rx_jmb_ring_mask; 7002 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 7003 tpr->rx_jmb_prod_idx); 7004 } 7005 } else if (work_mask) { 7006 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be 7007 * updated before the producer indices can be updated. 7008 */ 7009 smp_wmb(); 7010 7011 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; 7012 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; 7013 7014 if (tnapi != &tp->napi[1]) { 7015 tp->rx_refill = true; 7016 napi_schedule(&tp->napi[1].napi); 7017 } 7018 } 7019 7020 return received; 7021 } 7022 7023 static void tg3_poll_link(struct tg3 *tp) 7024 { 7025 /* handle link change and other phy events */ 7026 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 7027 struct tg3_hw_status *sblk = tp->napi[0].hw_status; 7028 7029 if (sblk->status & SD_STATUS_LINK_CHG) { 7030 sblk->status = SD_STATUS_UPDATED | 7031 (sblk->status & ~SD_STATUS_LINK_CHG); 7032 spin_lock(&tp->lock); 7033 if (tg3_flag(tp, USE_PHYLIB)) { 7034 tw32_f(MAC_STATUS, 7035 (MAC_STATUS_SYNC_CHANGED | 7036 MAC_STATUS_CFG_CHANGED | 7037 MAC_STATUS_MI_COMPLETION | 7038 MAC_STATUS_LNKSTATE_CHANGED)); 7039 udelay(40); 7040 } else 7041 tg3_setup_phy(tp, false); 7042 spin_unlock(&tp->lock); 7043 } 7044 } 7045 } 7046 7047 static int tg3_rx_prodring_xfer(struct tg3 *tp, 7048 struct tg3_rx_prodring_set *dpr, 7049 struct tg3_rx_prodring_set *spr) 7050 { 7051 u32 si, di, cpycnt, src_prod_idx; 7052 int i, err = 0; 7053 7054 while (1) { 7055 src_prod_idx = spr->rx_std_prod_idx; 7056 7057 /* Make sure updates to the rx_std_buffers[] entries and the 7058 * standard producer index are seen in the correct order. 7059 */ 7060 smp_rmb(); 7061 7062 if (spr->rx_std_cons_idx == src_prod_idx) 7063 break; 7064 7065 if (spr->rx_std_cons_idx < src_prod_idx) 7066 cpycnt = src_prod_idx - spr->rx_std_cons_idx; 7067 else 7068 cpycnt = tp->rx_std_ring_mask + 1 - 7069 spr->rx_std_cons_idx; 7070 7071 cpycnt = min(cpycnt, 7072 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); 7073 7074 si = spr->rx_std_cons_idx; 7075 di = dpr->rx_std_prod_idx; 7076 7077 for (i = di; i < di + cpycnt; i++) { 7078 if (dpr->rx_std_buffers[i].data) { 7079 cpycnt = i - di; 7080 err = -ENOSPC; 7081 break; 7082 } 7083 } 7084 7085 if (!cpycnt) 7086 break; 7087 7088 /* Ensure that updates to the rx_std_buffers ring and the 7089 * shadowed hardware producer ring from tg3_recycle_skb() are 7090 * ordered correctly WRT the skb check above. 7091 */ 7092 smp_rmb(); 7093 7094 memcpy(&dpr->rx_std_buffers[di], 7095 &spr->rx_std_buffers[si], 7096 cpycnt * sizeof(struct ring_info)); 7097 7098 for (i = 0; i < cpycnt; i++, di++, si++) { 7099 struct tg3_rx_buffer_desc *sbd, *dbd; 7100 sbd = &spr->rx_std[si]; 7101 dbd = &dpr->rx_std[di]; 7102 dbd->addr_hi = sbd->addr_hi; 7103 dbd->addr_lo = sbd->addr_lo; 7104 } 7105 7106 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & 7107 tp->rx_std_ring_mask; 7108 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & 7109 tp->rx_std_ring_mask; 7110 } 7111 7112 while (1) { 7113 src_prod_idx = spr->rx_jmb_prod_idx; 7114 7115 /* Make sure updates to the rx_jmb_buffers[] entries and 7116 * the jumbo producer index are seen in the correct order. 7117 */ 7118 smp_rmb(); 7119 7120 if (spr->rx_jmb_cons_idx == src_prod_idx) 7121 break; 7122 7123 if (spr->rx_jmb_cons_idx < src_prod_idx) 7124 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; 7125 else 7126 cpycnt = tp->rx_jmb_ring_mask + 1 - 7127 spr->rx_jmb_cons_idx; 7128 7129 cpycnt = min(cpycnt, 7130 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); 7131 7132 si = spr->rx_jmb_cons_idx; 7133 di = dpr->rx_jmb_prod_idx; 7134 7135 for (i = di; i < di + cpycnt; i++) { 7136 if (dpr->rx_jmb_buffers[i].data) { 7137 cpycnt = i - di; 7138 err = -ENOSPC; 7139 break; 7140 } 7141 } 7142 7143 if (!cpycnt) 7144 break; 7145 7146 /* Ensure that updates to the rx_jmb_buffers ring and the 7147 * shadowed hardware producer ring from tg3_recycle_skb() are 7148 * ordered correctly WRT the skb check above. 7149 */ 7150 smp_rmb(); 7151 7152 memcpy(&dpr->rx_jmb_buffers[di], 7153 &spr->rx_jmb_buffers[si], 7154 cpycnt * sizeof(struct ring_info)); 7155 7156 for (i = 0; i < cpycnt; i++, di++, si++) { 7157 struct tg3_rx_buffer_desc *sbd, *dbd; 7158 sbd = &spr->rx_jmb[si].std; 7159 dbd = &dpr->rx_jmb[di].std; 7160 dbd->addr_hi = sbd->addr_hi; 7161 dbd->addr_lo = sbd->addr_lo; 7162 } 7163 7164 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & 7165 tp->rx_jmb_ring_mask; 7166 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & 7167 tp->rx_jmb_ring_mask; 7168 } 7169 7170 return err; 7171 } 7172 7173 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) 7174 { 7175 struct tg3 *tp = tnapi->tp; 7176 7177 /* run TX completion thread */ 7178 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { 7179 tg3_tx(tnapi); 7180 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7181 return work_done; 7182 } 7183 7184 if (!tnapi->rx_rcb_prod_idx) 7185 return work_done; 7186 7187 /* run RX thread, within the bounds set by NAPI. 7188 * All RX "locking" is done by ensuring outside 7189 * code synchronizes with tg3->napi.poll() 7190 */ 7191 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 7192 work_done += tg3_rx(tnapi, budget - work_done); 7193 7194 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { 7195 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; 7196 int i, err = 0; 7197 u32 std_prod_idx = dpr->rx_std_prod_idx; 7198 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; 7199 7200 tp->rx_refill = false; 7201 for (i = 1; i <= tp->rxq_cnt; i++) 7202 err |= tg3_rx_prodring_xfer(tp, dpr, 7203 &tp->napi[i].prodring); 7204 7205 wmb(); 7206 7207 if (std_prod_idx != dpr->rx_std_prod_idx) 7208 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 7209 dpr->rx_std_prod_idx); 7210 7211 if (jmb_prod_idx != dpr->rx_jmb_prod_idx) 7212 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 7213 dpr->rx_jmb_prod_idx); 7214 7215 if (err) 7216 tw32_f(HOSTCC_MODE, tp->coal_now); 7217 } 7218 7219 return work_done; 7220 } 7221 7222 static inline void tg3_reset_task_schedule(struct tg3 *tp) 7223 { 7224 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) 7225 schedule_work(&tp->reset_task); 7226 } 7227 7228 static inline void tg3_reset_task_cancel(struct tg3 *tp) 7229 { 7230 cancel_work_sync(&tp->reset_task); 7231 tg3_flag_clear(tp, RESET_TASK_PENDING); 7232 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 7233 } 7234 7235 static int tg3_poll_msix(struct napi_struct *napi, int budget) 7236 { 7237 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7238 struct tg3 *tp = tnapi->tp; 7239 int work_done = 0; 7240 struct tg3_hw_status *sblk = tnapi->hw_status; 7241 7242 while (1) { 7243 work_done = tg3_poll_work(tnapi, work_done, budget); 7244 7245 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7246 goto tx_recovery; 7247 7248 if (unlikely(work_done >= budget)) 7249 break; 7250 7251 /* tp->last_tag is used in tg3_int_reenable() below 7252 * to tell the hw how much work has been processed, 7253 * so we must read it before checking for more work. 7254 */ 7255 tnapi->last_tag = sblk->status_tag; 7256 tnapi->last_irq_tag = tnapi->last_tag; 7257 rmb(); 7258 7259 /* check for RX/TX work to do */ 7260 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && 7261 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { 7262 7263 /* This test here is not race free, but will reduce 7264 * the number of interrupts by looping again. 7265 */ 7266 if (tnapi == &tp->napi[1] && tp->rx_refill) 7267 continue; 7268 7269 napi_complete_done(napi, work_done); 7270 /* Reenable interrupts. */ 7271 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 7272 7273 /* This test here is synchronized by napi_schedule() 7274 * and napi_complete() to close the race condition. 7275 */ 7276 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) { 7277 tw32(HOSTCC_MODE, tp->coalesce_mode | 7278 HOSTCC_MODE_ENABLE | 7279 tnapi->coal_now); 7280 } 7281 break; 7282 } 7283 } 7284 7285 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); 7286 return work_done; 7287 7288 tx_recovery: 7289 /* work_done is guaranteed to be less than budget. */ 7290 napi_complete(napi); 7291 tg3_reset_task_schedule(tp); 7292 return work_done; 7293 } 7294 7295 static void tg3_process_error(struct tg3 *tp) 7296 { 7297 u32 val; 7298 bool real_error = false; 7299 7300 if (tg3_flag(tp, ERROR_PROCESSED)) 7301 return; 7302 7303 /* Check Flow Attention register */ 7304 val = tr32(HOSTCC_FLOW_ATTN); 7305 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { 7306 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); 7307 real_error = true; 7308 } 7309 7310 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { 7311 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); 7312 real_error = true; 7313 } 7314 7315 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { 7316 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); 7317 real_error = true; 7318 } 7319 7320 if (!real_error) 7321 return; 7322 7323 tg3_dump_state(tp); 7324 7325 tg3_flag_set(tp, ERROR_PROCESSED); 7326 tg3_reset_task_schedule(tp); 7327 } 7328 7329 static int tg3_poll(struct napi_struct *napi, int budget) 7330 { 7331 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7332 struct tg3 *tp = tnapi->tp; 7333 int work_done = 0; 7334 struct tg3_hw_status *sblk = tnapi->hw_status; 7335 7336 while (1) { 7337 if (sblk->status & SD_STATUS_ERROR) 7338 tg3_process_error(tp); 7339 7340 tg3_poll_link(tp); 7341 7342 work_done = tg3_poll_work(tnapi, work_done, budget); 7343 7344 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7345 goto tx_recovery; 7346 7347 if (unlikely(work_done >= budget)) 7348 break; 7349 7350 if (tg3_flag(tp, TAGGED_STATUS)) { 7351 /* tp->last_tag is used in tg3_int_reenable() below 7352 * to tell the hw how much work has been processed, 7353 * so we must read it before checking for more work. 7354 */ 7355 tnapi->last_tag = sblk->status_tag; 7356 tnapi->last_irq_tag = tnapi->last_tag; 7357 rmb(); 7358 } else 7359 sblk->status &= ~SD_STATUS_UPDATED; 7360 7361 if (likely(!tg3_has_work(tnapi))) { 7362 napi_complete_done(napi, work_done); 7363 tg3_int_reenable(tnapi); 7364 break; 7365 } 7366 } 7367 7368 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); 7369 return work_done; 7370 7371 tx_recovery: 7372 /* work_done is guaranteed to be less than budget. */ 7373 napi_complete(napi); 7374 tg3_reset_task_schedule(tp); 7375 return work_done; 7376 } 7377 7378 static void tg3_napi_disable(struct tg3 *tp) 7379 { 7380 int i; 7381 7382 for (i = tp->irq_cnt - 1; i >= 0; i--) 7383 napi_disable(&tp->napi[i].napi); 7384 } 7385 7386 static void tg3_napi_enable(struct tg3 *tp) 7387 { 7388 int i; 7389 7390 for (i = 0; i < tp->irq_cnt; i++) 7391 napi_enable(&tp->napi[i].napi); 7392 } 7393 7394 static void tg3_napi_init(struct tg3 *tp) 7395 { 7396 int i; 7397 7398 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64); 7399 for (i = 1; i < tp->irq_cnt; i++) 7400 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64); 7401 } 7402 7403 static void tg3_napi_fini(struct tg3 *tp) 7404 { 7405 int i; 7406 7407 for (i = 0; i < tp->irq_cnt; i++) 7408 netif_napi_del(&tp->napi[i].napi); 7409 } 7410 7411 static inline void tg3_netif_stop(struct tg3 *tp) 7412 { 7413 netif_trans_update(tp->dev); /* prevent tx timeout */ 7414 tg3_napi_disable(tp); 7415 netif_carrier_off(tp->dev); 7416 netif_tx_disable(tp->dev); 7417 } 7418 7419 /* tp->lock must be held */ 7420 static inline void tg3_netif_start(struct tg3 *tp) 7421 { 7422 tg3_ptp_resume(tp); 7423 7424 /* NOTE: unconditional netif_tx_wake_all_queues is only 7425 * appropriate so long as all callers are assured to 7426 * have free tx slots (such as after tg3_init_hw) 7427 */ 7428 netif_tx_wake_all_queues(tp->dev); 7429 7430 if (tp->link_up) 7431 netif_carrier_on(tp->dev); 7432 7433 tg3_napi_enable(tp); 7434 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; 7435 tg3_enable_ints(tp); 7436 } 7437 7438 static void tg3_irq_quiesce(struct tg3 *tp) 7439 __releases(tp->lock) 7440 __acquires(tp->lock) 7441 { 7442 int i; 7443 7444 BUG_ON(tp->irq_sync); 7445 7446 tp->irq_sync = 1; 7447 smp_mb(); 7448 7449 spin_unlock_bh(&tp->lock); 7450 7451 for (i = 0; i < tp->irq_cnt; i++) 7452 synchronize_irq(tp->napi[i].irq_vec); 7453 7454 spin_lock_bh(&tp->lock); 7455 } 7456 7457 /* Fully shutdown all tg3 driver activity elsewhere in the system. 7458 * If irq_sync is non-zero, then the IRQ handler must be synchronized 7459 * with as well. Most of the time, this is not necessary except when 7460 * shutting down the device. 7461 */ 7462 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) 7463 { 7464 spin_lock_bh(&tp->lock); 7465 if (irq_sync) 7466 tg3_irq_quiesce(tp); 7467 } 7468 7469 static inline void tg3_full_unlock(struct tg3 *tp) 7470 { 7471 spin_unlock_bh(&tp->lock); 7472 } 7473 7474 /* One-shot MSI handler - Chip automatically disables interrupt 7475 * after sending MSI so driver doesn't have to do it. 7476 */ 7477 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) 7478 { 7479 struct tg3_napi *tnapi = dev_id; 7480 struct tg3 *tp = tnapi->tp; 7481 7482 prefetch(tnapi->hw_status); 7483 if (tnapi->rx_rcb) 7484 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7485 7486 if (likely(!tg3_irq_sync(tp))) 7487 napi_schedule(&tnapi->napi); 7488 7489 return IRQ_HANDLED; 7490 } 7491 7492 /* MSI ISR - No need to check for interrupt sharing and no need to 7493 * flush status block and interrupt mailbox. PCI ordering rules 7494 * guarantee that MSI will arrive after the status block. 7495 */ 7496 static irqreturn_t tg3_msi(int irq, void *dev_id) 7497 { 7498 struct tg3_napi *tnapi = dev_id; 7499 struct tg3 *tp = tnapi->tp; 7500 7501 prefetch(tnapi->hw_status); 7502 if (tnapi->rx_rcb) 7503 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7504 /* 7505 * Writing any value to intr-mbox-0 clears PCI INTA# and 7506 * chip-internal interrupt pending events. 7507 * Writing non-zero to intr-mbox-0 additional tells the 7508 * NIC to stop sending us irqs, engaging "in-intr-handler" 7509 * event coalescing. 7510 */ 7511 tw32_mailbox(tnapi->int_mbox, 0x00000001); 7512 if (likely(!tg3_irq_sync(tp))) 7513 napi_schedule(&tnapi->napi); 7514 7515 return IRQ_RETVAL(1); 7516 } 7517 7518 static irqreturn_t tg3_interrupt(int irq, void *dev_id) 7519 { 7520 struct tg3_napi *tnapi = dev_id; 7521 struct tg3 *tp = tnapi->tp; 7522 struct tg3_hw_status *sblk = tnapi->hw_status; 7523 unsigned int handled = 1; 7524 7525 /* In INTx mode, it is possible for the interrupt to arrive at 7526 * the CPU before the status block posted prior to the interrupt. 7527 * Reading the PCI State register will confirm whether the 7528 * interrupt is ours and will flush the status block. 7529 */ 7530 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { 7531 if (tg3_flag(tp, CHIP_RESETTING) || 7532 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7533 handled = 0; 7534 goto out; 7535 } 7536 } 7537 7538 /* 7539 * Writing any value to intr-mbox-0 clears PCI INTA# and 7540 * chip-internal interrupt pending events. 7541 * Writing non-zero to intr-mbox-0 additional tells the 7542 * NIC to stop sending us irqs, engaging "in-intr-handler" 7543 * event coalescing. 7544 * 7545 * Flush the mailbox to de-assert the IRQ immediately to prevent 7546 * spurious interrupts. The flush impacts performance but 7547 * excessive spurious interrupts can be worse in some cases. 7548 */ 7549 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7550 if (tg3_irq_sync(tp)) 7551 goto out; 7552 sblk->status &= ~SD_STATUS_UPDATED; 7553 if (likely(tg3_has_work(tnapi))) { 7554 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7555 napi_schedule(&tnapi->napi); 7556 } else { 7557 /* No work, shared interrupt perhaps? re-enable 7558 * interrupts, and flush that PCI write 7559 */ 7560 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 7561 0x00000000); 7562 } 7563 out: 7564 return IRQ_RETVAL(handled); 7565 } 7566 7567 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) 7568 { 7569 struct tg3_napi *tnapi = dev_id; 7570 struct tg3 *tp = tnapi->tp; 7571 struct tg3_hw_status *sblk = tnapi->hw_status; 7572 unsigned int handled = 1; 7573 7574 /* In INTx mode, it is possible for the interrupt to arrive at 7575 * the CPU before the status block posted prior to the interrupt. 7576 * Reading the PCI State register will confirm whether the 7577 * interrupt is ours and will flush the status block. 7578 */ 7579 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { 7580 if (tg3_flag(tp, CHIP_RESETTING) || 7581 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7582 handled = 0; 7583 goto out; 7584 } 7585 } 7586 7587 /* 7588 * writing any value to intr-mbox-0 clears PCI INTA# and 7589 * chip-internal interrupt pending events. 7590 * writing non-zero to intr-mbox-0 additional tells the 7591 * NIC to stop sending us irqs, engaging "in-intr-handler" 7592 * event coalescing. 7593 * 7594 * Flush the mailbox to de-assert the IRQ immediately to prevent 7595 * spurious interrupts. The flush impacts performance but 7596 * excessive spurious interrupts can be worse in some cases. 7597 */ 7598 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7599 7600 /* 7601 * In a shared interrupt configuration, sometimes other devices' 7602 * interrupts will scream. We record the current status tag here 7603 * so that the above check can report that the screaming interrupts 7604 * are unhandled. Eventually they will be silenced. 7605 */ 7606 tnapi->last_irq_tag = sblk->status_tag; 7607 7608 if (tg3_irq_sync(tp)) 7609 goto out; 7610 7611 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7612 7613 napi_schedule(&tnapi->napi); 7614 7615 out: 7616 return IRQ_RETVAL(handled); 7617 } 7618 7619 /* ISR for interrupt test */ 7620 static irqreturn_t tg3_test_isr(int irq, void *dev_id) 7621 { 7622 struct tg3_napi *tnapi = dev_id; 7623 struct tg3 *tp = tnapi->tp; 7624 struct tg3_hw_status *sblk = tnapi->hw_status; 7625 7626 if ((sblk->status & SD_STATUS_UPDATED) || 7627 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7628 tg3_disable_ints(tp); 7629 return IRQ_RETVAL(1); 7630 } 7631 return IRQ_RETVAL(0); 7632 } 7633 7634 #ifdef CONFIG_NET_POLL_CONTROLLER 7635 static void tg3_poll_controller(struct net_device *dev) 7636 { 7637 int i; 7638 struct tg3 *tp = netdev_priv(dev); 7639 7640 if (tg3_irq_sync(tp)) 7641 return; 7642 7643 for (i = 0; i < tp->irq_cnt; i++) 7644 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); 7645 } 7646 #endif 7647 7648 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue) 7649 { 7650 struct tg3 *tp = netdev_priv(dev); 7651 7652 if (netif_msg_tx_err(tp)) { 7653 netdev_err(dev, "transmit timed out, resetting\n"); 7654 tg3_dump_state(tp); 7655 } 7656 7657 tg3_reset_task_schedule(tp); 7658 } 7659 7660 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ 7661 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) 7662 { 7663 u32 base = (u32) mapping & 0xffffffff; 7664 7665 return base + len + 8 < base; 7666 } 7667 7668 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes 7669 * of any 4GB boundaries: 4G, 8G, etc 7670 */ 7671 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7672 u32 len, u32 mss) 7673 { 7674 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) { 7675 u32 base = (u32) mapping & 0xffffffff; 7676 7677 return ((base + len + (mss & 0x3fff)) < base); 7678 } 7679 return 0; 7680 } 7681 7682 /* Test for DMA addresses > 40-bit */ 7683 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7684 int len) 7685 { 7686 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) 7687 if (tg3_flag(tp, 40BIT_DMA_BUG)) 7688 return ((u64) mapping + len) > DMA_BIT_MASK(40); 7689 return 0; 7690 #else 7691 return 0; 7692 #endif 7693 } 7694 7695 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, 7696 dma_addr_t mapping, u32 len, u32 flags, 7697 u32 mss, u32 vlan) 7698 { 7699 txbd->addr_hi = ((u64) mapping >> 32); 7700 txbd->addr_lo = ((u64) mapping & 0xffffffff); 7701 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); 7702 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); 7703 } 7704 7705 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, 7706 dma_addr_t map, u32 len, u32 flags, 7707 u32 mss, u32 vlan) 7708 { 7709 struct tg3 *tp = tnapi->tp; 7710 bool hwbug = false; 7711 7712 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) 7713 hwbug = true; 7714 7715 if (tg3_4g_overflow_test(map, len)) 7716 hwbug = true; 7717 7718 if (tg3_4g_tso_overflow_test(tp, map, len, mss)) 7719 hwbug = true; 7720 7721 if (tg3_40bit_overflow_test(tp, map, len)) 7722 hwbug = true; 7723 7724 if (tp->dma_limit) { 7725 u32 prvidx = *entry; 7726 u32 tmp_flag = flags & ~TXD_FLAG_END; 7727 while (len > tp->dma_limit && *budget) { 7728 u32 frag_len = tp->dma_limit; 7729 len -= tp->dma_limit; 7730 7731 /* Avoid the 8byte DMA problem */ 7732 if (len <= 8) { 7733 len += tp->dma_limit / 2; 7734 frag_len = tp->dma_limit / 2; 7735 } 7736 7737 tnapi->tx_buffers[*entry].fragmented = true; 7738 7739 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7740 frag_len, tmp_flag, mss, vlan); 7741 *budget -= 1; 7742 prvidx = *entry; 7743 *entry = NEXT_TX(*entry); 7744 7745 map += frag_len; 7746 } 7747 7748 if (len) { 7749 if (*budget) { 7750 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7751 len, flags, mss, vlan); 7752 *budget -= 1; 7753 *entry = NEXT_TX(*entry); 7754 } else { 7755 hwbug = true; 7756 tnapi->tx_buffers[prvidx].fragmented = false; 7757 } 7758 } 7759 } else { 7760 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7761 len, flags, mss, vlan); 7762 *entry = NEXT_TX(*entry); 7763 } 7764 7765 return hwbug; 7766 } 7767 7768 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) 7769 { 7770 int i; 7771 struct sk_buff *skb; 7772 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; 7773 7774 skb = txb->skb; 7775 txb->skb = NULL; 7776 7777 pci_unmap_single(tnapi->tp->pdev, 7778 dma_unmap_addr(txb, mapping), 7779 skb_headlen(skb), 7780 PCI_DMA_TODEVICE); 7781 7782 while (txb->fragmented) { 7783 txb->fragmented = false; 7784 entry = NEXT_TX(entry); 7785 txb = &tnapi->tx_buffers[entry]; 7786 } 7787 7788 for (i = 0; i <= last; i++) { 7789 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 7790 7791 entry = NEXT_TX(entry); 7792 txb = &tnapi->tx_buffers[entry]; 7793 7794 pci_unmap_page(tnapi->tp->pdev, 7795 dma_unmap_addr(txb, mapping), 7796 skb_frag_size(frag), PCI_DMA_TODEVICE); 7797 7798 while (txb->fragmented) { 7799 txb->fragmented = false; 7800 entry = NEXT_TX(entry); 7801 txb = &tnapi->tx_buffers[entry]; 7802 } 7803 } 7804 } 7805 7806 /* Workaround 4GB and 40-bit hardware DMA bugs. */ 7807 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, 7808 struct sk_buff **pskb, 7809 u32 *entry, u32 *budget, 7810 u32 base_flags, u32 mss, u32 vlan) 7811 { 7812 struct tg3 *tp = tnapi->tp; 7813 struct sk_buff *new_skb, *skb = *pskb; 7814 dma_addr_t new_addr = 0; 7815 int ret = 0; 7816 7817 if (tg3_asic_rev(tp) != ASIC_REV_5701) 7818 new_skb = skb_copy(skb, GFP_ATOMIC); 7819 else { 7820 int more_headroom = 4 - ((unsigned long)skb->data & 3); 7821 7822 new_skb = skb_copy_expand(skb, 7823 skb_headroom(skb) + more_headroom, 7824 skb_tailroom(skb), GFP_ATOMIC); 7825 } 7826 7827 if (!new_skb) { 7828 ret = -1; 7829 } else { 7830 /* New SKB is guaranteed to be linear. */ 7831 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, 7832 PCI_DMA_TODEVICE); 7833 /* Make sure the mapping succeeded */ 7834 if (pci_dma_mapping_error(tp->pdev, new_addr)) { 7835 dev_kfree_skb_any(new_skb); 7836 ret = -1; 7837 } else { 7838 u32 save_entry = *entry; 7839 7840 base_flags |= TXD_FLAG_END; 7841 7842 tnapi->tx_buffers[*entry].skb = new_skb; 7843 dma_unmap_addr_set(&tnapi->tx_buffers[*entry], 7844 mapping, new_addr); 7845 7846 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, 7847 new_skb->len, base_flags, 7848 mss, vlan)) { 7849 tg3_tx_skb_unmap(tnapi, save_entry, -1); 7850 dev_kfree_skb_any(new_skb); 7851 ret = -1; 7852 } 7853 } 7854 } 7855 7856 dev_consume_skb_any(skb); 7857 *pskb = new_skb; 7858 return ret; 7859 } 7860 7861 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb) 7862 { 7863 /* Check if we will never have enough descriptors, 7864 * as gso_segs can be more than current ring size 7865 */ 7866 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; 7867 } 7868 7869 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); 7870 7871 /* Use GSO to workaround all TSO packets that meet HW bug conditions 7872 * indicated in tg3_tx_frag_set() 7873 */ 7874 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, 7875 struct netdev_queue *txq, struct sk_buff *skb) 7876 { 7877 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; 7878 struct sk_buff *segs, *seg, *next; 7879 7880 /* Estimate the number of fragments in the worst case */ 7881 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) { 7882 netif_tx_stop_queue(txq); 7883 7884 /* netif_tx_stop_queue() must be done before checking 7885 * checking tx index in tg3_tx_avail() below, because in 7886 * tg3_tx(), we update tx index before checking for 7887 * netif_tx_queue_stopped(). 7888 */ 7889 smp_mb(); 7890 if (tg3_tx_avail(tnapi) <= frag_cnt_est) 7891 return NETDEV_TX_BUSY; 7892 7893 netif_tx_wake_queue(txq); 7894 } 7895 7896 segs = skb_gso_segment(skb, tp->dev->features & 7897 ~(NETIF_F_TSO | NETIF_F_TSO6)); 7898 if (IS_ERR(segs) || !segs) 7899 goto tg3_tso_bug_end; 7900 7901 skb_list_walk_safe(segs, seg, next) { 7902 skb_mark_not_on_list(seg); 7903 tg3_start_xmit(seg, tp->dev); 7904 } 7905 7906 tg3_tso_bug_end: 7907 dev_consume_skb_any(skb); 7908 7909 return NETDEV_TX_OK; 7910 } 7911 7912 /* hard_start_xmit for all devices */ 7913 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 7914 { 7915 struct tg3 *tp = netdev_priv(dev); 7916 u32 len, entry, base_flags, mss, vlan = 0; 7917 u32 budget; 7918 int i = -1, would_hit_hwbug; 7919 dma_addr_t mapping; 7920 struct tg3_napi *tnapi; 7921 struct netdev_queue *txq; 7922 unsigned int last; 7923 struct iphdr *iph = NULL; 7924 struct tcphdr *tcph = NULL; 7925 __sum16 tcp_csum = 0, ip_csum = 0; 7926 __be16 ip_tot_len = 0; 7927 7928 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 7929 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 7930 if (tg3_flag(tp, ENABLE_TSS)) 7931 tnapi++; 7932 7933 budget = tg3_tx_avail(tnapi); 7934 7935 /* We are running in BH disabled context with netif_tx_lock 7936 * and TX reclaim runs via tp->napi.poll inside of a software 7937 * interrupt. Furthermore, IRQ processing runs lockless so we have 7938 * no IRQ context deadlocks to worry about either. Rejoice! 7939 */ 7940 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { 7941 if (!netif_tx_queue_stopped(txq)) { 7942 netif_tx_stop_queue(txq); 7943 7944 /* This is a hard error, log it. */ 7945 netdev_err(dev, 7946 "BUG! Tx Ring full when queue awake!\n"); 7947 } 7948 return NETDEV_TX_BUSY; 7949 } 7950 7951 entry = tnapi->tx_prod; 7952 base_flags = 0; 7953 7954 mss = skb_shinfo(skb)->gso_size; 7955 if (mss) { 7956 u32 tcp_opt_len, hdr_len; 7957 7958 if (skb_cow_head(skb, 0)) 7959 goto drop; 7960 7961 iph = ip_hdr(skb); 7962 tcp_opt_len = tcp_optlen(skb); 7963 7964 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; 7965 7966 /* HW/FW can not correctly segment packets that have been 7967 * vlan encapsulated. 7968 */ 7969 if (skb->protocol == htons(ETH_P_8021Q) || 7970 skb->protocol == htons(ETH_P_8021AD)) { 7971 if (tg3_tso_bug_gso_check(tnapi, skb)) 7972 return tg3_tso_bug(tp, tnapi, txq, skb); 7973 goto drop; 7974 } 7975 7976 if (!skb_is_gso_v6(skb)) { 7977 if (unlikely((ETH_HLEN + hdr_len) > 80) && 7978 tg3_flag(tp, TSO_BUG)) { 7979 if (tg3_tso_bug_gso_check(tnapi, skb)) 7980 return tg3_tso_bug(tp, tnapi, txq, skb); 7981 goto drop; 7982 } 7983 ip_csum = iph->check; 7984 ip_tot_len = iph->tot_len; 7985 iph->check = 0; 7986 iph->tot_len = htons(mss + hdr_len); 7987 } 7988 7989 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 7990 TXD_FLAG_CPU_POST_DMA); 7991 7992 tcph = tcp_hdr(skb); 7993 tcp_csum = tcph->check; 7994 7995 if (tg3_flag(tp, HW_TSO_1) || 7996 tg3_flag(tp, HW_TSO_2) || 7997 tg3_flag(tp, HW_TSO_3)) { 7998 tcph->check = 0; 7999 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 8000 } else { 8001 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 8002 0, IPPROTO_TCP, 0); 8003 } 8004 8005 if (tg3_flag(tp, HW_TSO_3)) { 8006 mss |= (hdr_len & 0xc) << 12; 8007 if (hdr_len & 0x10) 8008 base_flags |= 0x00000010; 8009 base_flags |= (hdr_len & 0x3e0) << 5; 8010 } else if (tg3_flag(tp, HW_TSO_2)) 8011 mss |= hdr_len << 9; 8012 else if (tg3_flag(tp, HW_TSO_1) || 8013 tg3_asic_rev(tp) == ASIC_REV_5705) { 8014 if (tcp_opt_len || iph->ihl > 5) { 8015 int tsflags; 8016 8017 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 8018 mss |= (tsflags << 11); 8019 } 8020 } else { 8021 if (tcp_opt_len || iph->ihl > 5) { 8022 int tsflags; 8023 8024 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 8025 base_flags |= tsflags << 12; 8026 } 8027 } 8028 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 8029 /* HW/FW can not correctly checksum packets that have been 8030 * vlan encapsulated. 8031 */ 8032 if (skb->protocol == htons(ETH_P_8021Q) || 8033 skb->protocol == htons(ETH_P_8021AD)) { 8034 if (skb_checksum_help(skb)) 8035 goto drop; 8036 } else { 8037 base_flags |= TXD_FLAG_TCPUDP_CSUM; 8038 } 8039 } 8040 8041 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 8042 !mss && skb->len > VLAN_ETH_FRAME_LEN) 8043 base_flags |= TXD_FLAG_JMB_PKT; 8044 8045 if (skb_vlan_tag_present(skb)) { 8046 base_flags |= TXD_FLAG_VLAN; 8047 vlan = skb_vlan_tag_get(skb); 8048 } 8049 8050 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && 8051 tg3_flag(tp, TX_TSTAMP_EN)) { 8052 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 8053 base_flags |= TXD_FLAG_HWTSTAMP; 8054 } 8055 8056 len = skb_headlen(skb); 8057 8058 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); 8059 if (pci_dma_mapping_error(tp->pdev, mapping)) 8060 goto drop; 8061 8062 8063 tnapi->tx_buffers[entry].skb = skb; 8064 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 8065 8066 would_hit_hwbug = 0; 8067 8068 if (tg3_flag(tp, 5701_DMA_BUG)) 8069 would_hit_hwbug = 1; 8070 8071 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | 8072 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), 8073 mss, vlan)) { 8074 would_hit_hwbug = 1; 8075 } else if (skb_shinfo(skb)->nr_frags > 0) { 8076 u32 tmp_mss = mss; 8077 8078 if (!tg3_flag(tp, HW_TSO_1) && 8079 !tg3_flag(tp, HW_TSO_2) && 8080 !tg3_flag(tp, HW_TSO_3)) 8081 tmp_mss = 0; 8082 8083 /* Now loop through additional data 8084 * fragments, and queue them. 8085 */ 8086 last = skb_shinfo(skb)->nr_frags - 1; 8087 for (i = 0; i <= last; i++) { 8088 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 8089 8090 len = skb_frag_size(frag); 8091 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, 8092 len, DMA_TO_DEVICE); 8093 8094 tnapi->tx_buffers[entry].skb = NULL; 8095 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 8096 mapping); 8097 if (dma_mapping_error(&tp->pdev->dev, mapping)) 8098 goto dma_error; 8099 8100 if (!budget || 8101 tg3_tx_frag_set(tnapi, &entry, &budget, mapping, 8102 len, base_flags | 8103 ((i == last) ? TXD_FLAG_END : 0), 8104 tmp_mss, vlan)) { 8105 would_hit_hwbug = 1; 8106 break; 8107 } 8108 } 8109 } 8110 8111 if (would_hit_hwbug) { 8112 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 8113 8114 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) { 8115 /* If it's a TSO packet, do GSO instead of 8116 * allocating and copying to a large linear SKB 8117 */ 8118 if (ip_tot_len) { 8119 iph->check = ip_csum; 8120 iph->tot_len = ip_tot_len; 8121 } 8122 tcph->check = tcp_csum; 8123 return tg3_tso_bug(tp, tnapi, txq, skb); 8124 } 8125 8126 /* If the workaround fails due to memory/mapping 8127 * failure, silently drop this packet. 8128 */ 8129 entry = tnapi->tx_prod; 8130 budget = tg3_tx_avail(tnapi); 8131 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, 8132 base_flags, mss, vlan)) 8133 goto drop_nofree; 8134 } 8135 8136 skb_tx_timestamp(skb); 8137 netdev_tx_sent_queue(txq, skb->len); 8138 8139 /* Sync BD data before updating mailbox */ 8140 wmb(); 8141 8142 tnapi->tx_prod = entry; 8143 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 8144 netif_tx_stop_queue(txq); 8145 8146 /* netif_tx_stop_queue() must be done before checking 8147 * checking tx index in tg3_tx_avail() below, because in 8148 * tg3_tx(), we update tx index before checking for 8149 * netif_tx_queue_stopped(). 8150 */ 8151 smp_mb(); 8152 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 8153 netif_tx_wake_queue(txq); 8154 } 8155 8156 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { 8157 /* Packets are ready, update Tx producer idx on card. */ 8158 tw32_tx_mbox(tnapi->prodmbox, entry); 8159 } 8160 8161 return NETDEV_TX_OK; 8162 8163 dma_error: 8164 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); 8165 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; 8166 drop: 8167 dev_kfree_skb_any(skb); 8168 drop_nofree: 8169 tp->tx_dropped++; 8170 return NETDEV_TX_OK; 8171 } 8172 8173 static void tg3_mac_loopback(struct tg3 *tp, bool enable) 8174 { 8175 if (enable) { 8176 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | 8177 MAC_MODE_PORT_MODE_MASK); 8178 8179 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; 8180 8181 if (!tg3_flag(tp, 5705_PLUS)) 8182 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 8183 8184 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 8185 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 8186 else 8187 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 8188 } else { 8189 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; 8190 8191 if (tg3_flag(tp, 5705_PLUS) || 8192 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || 8193 tg3_asic_rev(tp) == ASIC_REV_5700) 8194 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 8195 } 8196 8197 tw32(MAC_MODE, tp->mac_mode); 8198 udelay(40); 8199 } 8200 8201 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) 8202 { 8203 u32 val, bmcr, mac_mode, ptest = 0; 8204 8205 tg3_phy_toggle_apd(tp, false); 8206 tg3_phy_toggle_automdix(tp, false); 8207 8208 if (extlpbk && tg3_phy_set_extloopbk(tp)) 8209 return -EIO; 8210 8211 bmcr = BMCR_FULLDPLX; 8212 switch (speed) { 8213 case SPEED_10: 8214 break; 8215 case SPEED_100: 8216 bmcr |= BMCR_SPEED100; 8217 break; 8218 case SPEED_1000: 8219 default: 8220 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 8221 speed = SPEED_100; 8222 bmcr |= BMCR_SPEED100; 8223 } else { 8224 speed = SPEED_1000; 8225 bmcr |= BMCR_SPEED1000; 8226 } 8227 } 8228 8229 if (extlpbk) { 8230 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 8231 tg3_readphy(tp, MII_CTRL1000, &val); 8232 val |= CTL1000_AS_MASTER | 8233 CTL1000_ENABLE_MASTER; 8234 tg3_writephy(tp, MII_CTRL1000, val); 8235 } else { 8236 ptest = MII_TG3_FET_PTEST_TRIM_SEL | 8237 MII_TG3_FET_PTEST_TRIM_2; 8238 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); 8239 } 8240 } else 8241 bmcr |= BMCR_LOOPBACK; 8242 8243 tg3_writephy(tp, MII_BMCR, bmcr); 8244 8245 /* The write needs to be flushed for the FETs */ 8246 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 8247 tg3_readphy(tp, MII_BMCR, &bmcr); 8248 8249 udelay(40); 8250 8251 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 8252 tg3_asic_rev(tp) == ASIC_REV_5785) { 8253 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | 8254 MII_TG3_FET_PTEST_FRC_TX_LINK | 8255 MII_TG3_FET_PTEST_FRC_TX_LOCK); 8256 8257 /* The write needs to be flushed for the AC131 */ 8258 tg3_readphy(tp, MII_TG3_FET_PTEST, &val); 8259 } 8260 8261 /* Reset to prevent losing 1st rx packet intermittently */ 8262 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 8263 tg3_flag(tp, 5780_CLASS)) { 8264 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 8265 udelay(10); 8266 tw32_f(MAC_RX_MODE, tp->rx_mode); 8267 } 8268 8269 mac_mode = tp->mac_mode & 8270 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 8271 if (speed == SPEED_1000) 8272 mac_mode |= MAC_MODE_PORT_MODE_GMII; 8273 else 8274 mac_mode |= MAC_MODE_PORT_MODE_MII; 8275 8276 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 8277 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; 8278 8279 if (masked_phy_id == TG3_PHY_ID_BCM5401) 8280 mac_mode &= ~MAC_MODE_LINK_POLARITY; 8281 else if (masked_phy_id == TG3_PHY_ID_BCM5411) 8282 mac_mode |= MAC_MODE_LINK_POLARITY; 8283 8284 tg3_writephy(tp, MII_TG3_EXT_CTRL, 8285 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 8286 } 8287 8288 tw32(MAC_MODE, mac_mode); 8289 udelay(40); 8290 8291 return 0; 8292 } 8293 8294 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) 8295 { 8296 struct tg3 *tp = netdev_priv(dev); 8297 8298 if (features & NETIF_F_LOOPBACK) { 8299 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) 8300 return; 8301 8302 spin_lock_bh(&tp->lock); 8303 tg3_mac_loopback(tp, true); 8304 netif_carrier_on(tp->dev); 8305 spin_unlock_bh(&tp->lock); 8306 netdev_info(dev, "Internal MAC loopback mode enabled.\n"); 8307 } else { 8308 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 8309 return; 8310 8311 spin_lock_bh(&tp->lock); 8312 tg3_mac_loopback(tp, false); 8313 /* Force link status check */ 8314 tg3_setup_phy(tp, true); 8315 spin_unlock_bh(&tp->lock); 8316 netdev_info(dev, "Internal MAC loopback mode disabled.\n"); 8317 } 8318 } 8319 8320 static netdev_features_t tg3_fix_features(struct net_device *dev, 8321 netdev_features_t features) 8322 { 8323 struct tg3 *tp = netdev_priv(dev); 8324 8325 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) 8326 features &= ~NETIF_F_ALL_TSO; 8327 8328 return features; 8329 } 8330 8331 static int tg3_set_features(struct net_device *dev, netdev_features_t features) 8332 { 8333 netdev_features_t changed = dev->features ^ features; 8334 8335 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) 8336 tg3_set_loopback(dev, features); 8337 8338 return 0; 8339 } 8340 8341 static void tg3_rx_prodring_free(struct tg3 *tp, 8342 struct tg3_rx_prodring_set *tpr) 8343 { 8344 int i; 8345 8346 if (tpr != &tp->napi[0].prodring) { 8347 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; 8348 i = (i + 1) & tp->rx_std_ring_mask) 8349 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8350 tp->rx_pkt_map_sz); 8351 8352 if (tg3_flag(tp, JUMBO_CAPABLE)) { 8353 for (i = tpr->rx_jmb_cons_idx; 8354 i != tpr->rx_jmb_prod_idx; 8355 i = (i + 1) & tp->rx_jmb_ring_mask) { 8356 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8357 TG3_RX_JMB_MAP_SZ); 8358 } 8359 } 8360 8361 return; 8362 } 8363 8364 for (i = 0; i <= tp->rx_std_ring_mask; i++) 8365 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8366 tp->rx_pkt_map_sz); 8367 8368 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8369 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) 8370 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8371 TG3_RX_JMB_MAP_SZ); 8372 } 8373 } 8374 8375 /* Initialize rx rings for packet processing. 8376 * 8377 * The chip has been shut down and the driver detached from 8378 * the networking, so no interrupts or new tx packets will 8379 * end up in the driver. tp->{tx,}lock are held and thus 8380 * we may not sleep. 8381 */ 8382 static int tg3_rx_prodring_alloc(struct tg3 *tp, 8383 struct tg3_rx_prodring_set *tpr) 8384 { 8385 u32 i, rx_pkt_dma_sz; 8386 8387 tpr->rx_std_cons_idx = 0; 8388 tpr->rx_std_prod_idx = 0; 8389 tpr->rx_jmb_cons_idx = 0; 8390 tpr->rx_jmb_prod_idx = 0; 8391 8392 if (tpr != &tp->napi[0].prodring) { 8393 memset(&tpr->rx_std_buffers[0], 0, 8394 TG3_RX_STD_BUFF_RING_SIZE(tp)); 8395 if (tpr->rx_jmb_buffers) 8396 memset(&tpr->rx_jmb_buffers[0], 0, 8397 TG3_RX_JMB_BUFF_RING_SIZE(tp)); 8398 goto done; 8399 } 8400 8401 /* Zero out all descriptors. */ 8402 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); 8403 8404 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; 8405 if (tg3_flag(tp, 5780_CLASS) && 8406 tp->dev->mtu > ETH_DATA_LEN) 8407 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; 8408 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); 8409 8410 /* Initialize invariants of the rings, we only set this 8411 * stuff once. This works because the card does not 8412 * write into the rx buffer posting rings. 8413 */ 8414 for (i = 0; i <= tp->rx_std_ring_mask; i++) { 8415 struct tg3_rx_buffer_desc *rxd; 8416 8417 rxd = &tpr->rx_std[i]; 8418 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; 8419 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); 8420 rxd->opaque = (RXD_OPAQUE_RING_STD | 8421 (i << RXD_OPAQUE_INDEX_SHIFT)); 8422 } 8423 8424 /* Now allocate fresh SKBs for each rx ring. */ 8425 for (i = 0; i < tp->rx_pending; i++) { 8426 unsigned int frag_size; 8427 8428 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i, 8429 &frag_size) < 0) { 8430 netdev_warn(tp->dev, 8431 "Using a smaller RX standard ring. Only " 8432 "%d out of %d buffers were allocated " 8433 "successfully\n", i, tp->rx_pending); 8434 if (i == 0) 8435 goto initfail; 8436 tp->rx_pending = i; 8437 break; 8438 } 8439 } 8440 8441 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 8442 goto done; 8443 8444 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); 8445 8446 if (!tg3_flag(tp, JUMBO_RING_ENABLE)) 8447 goto done; 8448 8449 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { 8450 struct tg3_rx_buffer_desc *rxd; 8451 8452 rxd = &tpr->rx_jmb[i].std; 8453 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; 8454 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | 8455 RXD_FLAG_JUMBO; 8456 rxd->opaque = (RXD_OPAQUE_RING_JUMBO | 8457 (i << RXD_OPAQUE_INDEX_SHIFT)); 8458 } 8459 8460 for (i = 0; i < tp->rx_jumbo_pending; i++) { 8461 unsigned int frag_size; 8462 8463 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i, 8464 &frag_size) < 0) { 8465 netdev_warn(tp->dev, 8466 "Using a smaller RX jumbo ring. Only %d " 8467 "out of %d buffers were allocated " 8468 "successfully\n", i, tp->rx_jumbo_pending); 8469 if (i == 0) 8470 goto initfail; 8471 tp->rx_jumbo_pending = i; 8472 break; 8473 } 8474 } 8475 8476 done: 8477 return 0; 8478 8479 initfail: 8480 tg3_rx_prodring_free(tp, tpr); 8481 return -ENOMEM; 8482 } 8483 8484 static void tg3_rx_prodring_fini(struct tg3 *tp, 8485 struct tg3_rx_prodring_set *tpr) 8486 { 8487 kfree(tpr->rx_std_buffers); 8488 tpr->rx_std_buffers = NULL; 8489 kfree(tpr->rx_jmb_buffers); 8490 tpr->rx_jmb_buffers = NULL; 8491 if (tpr->rx_std) { 8492 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), 8493 tpr->rx_std, tpr->rx_std_mapping); 8494 tpr->rx_std = NULL; 8495 } 8496 if (tpr->rx_jmb) { 8497 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), 8498 tpr->rx_jmb, tpr->rx_jmb_mapping); 8499 tpr->rx_jmb = NULL; 8500 } 8501 } 8502 8503 static int tg3_rx_prodring_init(struct tg3 *tp, 8504 struct tg3_rx_prodring_set *tpr) 8505 { 8506 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), 8507 GFP_KERNEL); 8508 if (!tpr->rx_std_buffers) 8509 return -ENOMEM; 8510 8511 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, 8512 TG3_RX_STD_RING_BYTES(tp), 8513 &tpr->rx_std_mapping, 8514 GFP_KERNEL); 8515 if (!tpr->rx_std) 8516 goto err_out; 8517 8518 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8519 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), 8520 GFP_KERNEL); 8521 if (!tpr->rx_jmb_buffers) 8522 goto err_out; 8523 8524 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, 8525 TG3_RX_JMB_RING_BYTES(tp), 8526 &tpr->rx_jmb_mapping, 8527 GFP_KERNEL); 8528 if (!tpr->rx_jmb) 8529 goto err_out; 8530 } 8531 8532 return 0; 8533 8534 err_out: 8535 tg3_rx_prodring_fini(tp, tpr); 8536 return -ENOMEM; 8537 } 8538 8539 /* Free up pending packets in all rx/tx rings. 8540 * 8541 * The chip has been shut down and the driver detached from 8542 * the networking, so no interrupts or new tx packets will 8543 * end up in the driver. tp->{tx,}lock is not held and we are not 8544 * in an interrupt context and thus may sleep. 8545 */ 8546 static void tg3_free_rings(struct tg3 *tp) 8547 { 8548 int i, j; 8549 8550 for (j = 0; j < tp->irq_cnt; j++) { 8551 struct tg3_napi *tnapi = &tp->napi[j]; 8552 8553 tg3_rx_prodring_free(tp, &tnapi->prodring); 8554 8555 if (!tnapi->tx_buffers) 8556 continue; 8557 8558 for (i = 0; i < TG3_TX_RING_SIZE; i++) { 8559 struct sk_buff *skb = tnapi->tx_buffers[i].skb; 8560 8561 if (!skb) 8562 continue; 8563 8564 tg3_tx_skb_unmap(tnapi, i, 8565 skb_shinfo(skb)->nr_frags - 1); 8566 8567 dev_consume_skb_any(skb); 8568 } 8569 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j)); 8570 } 8571 } 8572 8573 /* Initialize tx/rx rings for packet processing. 8574 * 8575 * The chip has been shut down and the driver detached from 8576 * the networking, so no interrupts or new tx packets will 8577 * end up in the driver. tp->{tx,}lock are held and thus 8578 * we may not sleep. 8579 */ 8580 static int tg3_init_rings(struct tg3 *tp) 8581 { 8582 int i; 8583 8584 /* Free up all the SKBs. */ 8585 tg3_free_rings(tp); 8586 8587 for (i = 0; i < tp->irq_cnt; i++) { 8588 struct tg3_napi *tnapi = &tp->napi[i]; 8589 8590 tnapi->last_tag = 0; 8591 tnapi->last_irq_tag = 0; 8592 tnapi->hw_status->status = 0; 8593 tnapi->hw_status->status_tag = 0; 8594 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8595 8596 tnapi->tx_prod = 0; 8597 tnapi->tx_cons = 0; 8598 if (tnapi->tx_ring) 8599 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); 8600 8601 tnapi->rx_rcb_ptr = 0; 8602 if (tnapi->rx_rcb) 8603 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 8604 8605 if (tnapi->prodring.rx_std && 8606 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { 8607 tg3_free_rings(tp); 8608 return -ENOMEM; 8609 } 8610 } 8611 8612 return 0; 8613 } 8614 8615 static void tg3_mem_tx_release(struct tg3 *tp) 8616 { 8617 int i; 8618 8619 for (i = 0; i < tp->irq_max; i++) { 8620 struct tg3_napi *tnapi = &tp->napi[i]; 8621 8622 if (tnapi->tx_ring) { 8623 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, 8624 tnapi->tx_ring, tnapi->tx_desc_mapping); 8625 tnapi->tx_ring = NULL; 8626 } 8627 8628 kfree(tnapi->tx_buffers); 8629 tnapi->tx_buffers = NULL; 8630 } 8631 } 8632 8633 static int tg3_mem_tx_acquire(struct tg3 *tp) 8634 { 8635 int i; 8636 struct tg3_napi *tnapi = &tp->napi[0]; 8637 8638 /* If multivector TSS is enabled, vector 0 does not handle 8639 * tx interrupts. Don't allocate any resources for it. 8640 */ 8641 if (tg3_flag(tp, ENABLE_TSS)) 8642 tnapi++; 8643 8644 for (i = 0; i < tp->txq_cnt; i++, tnapi++) { 8645 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE, 8646 sizeof(struct tg3_tx_ring_info), 8647 GFP_KERNEL); 8648 if (!tnapi->tx_buffers) 8649 goto err_out; 8650 8651 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, 8652 TG3_TX_RING_BYTES, 8653 &tnapi->tx_desc_mapping, 8654 GFP_KERNEL); 8655 if (!tnapi->tx_ring) 8656 goto err_out; 8657 } 8658 8659 return 0; 8660 8661 err_out: 8662 tg3_mem_tx_release(tp); 8663 return -ENOMEM; 8664 } 8665 8666 static void tg3_mem_rx_release(struct tg3 *tp) 8667 { 8668 int i; 8669 8670 for (i = 0; i < tp->irq_max; i++) { 8671 struct tg3_napi *tnapi = &tp->napi[i]; 8672 8673 tg3_rx_prodring_fini(tp, &tnapi->prodring); 8674 8675 if (!tnapi->rx_rcb) 8676 continue; 8677 8678 dma_free_coherent(&tp->pdev->dev, 8679 TG3_RX_RCB_RING_BYTES(tp), 8680 tnapi->rx_rcb, 8681 tnapi->rx_rcb_mapping); 8682 tnapi->rx_rcb = NULL; 8683 } 8684 } 8685 8686 static int tg3_mem_rx_acquire(struct tg3 *tp) 8687 { 8688 unsigned int i, limit; 8689 8690 limit = tp->rxq_cnt; 8691 8692 /* If RSS is enabled, we need a (dummy) producer ring 8693 * set on vector zero. This is the true hw prodring. 8694 */ 8695 if (tg3_flag(tp, ENABLE_RSS)) 8696 limit++; 8697 8698 for (i = 0; i < limit; i++) { 8699 struct tg3_napi *tnapi = &tp->napi[i]; 8700 8701 if (tg3_rx_prodring_init(tp, &tnapi->prodring)) 8702 goto err_out; 8703 8704 /* If multivector RSS is enabled, vector 0 8705 * does not handle rx or tx interrupts. 8706 * Don't allocate any resources for it. 8707 */ 8708 if (!i && tg3_flag(tp, ENABLE_RSS)) 8709 continue; 8710 8711 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, 8712 TG3_RX_RCB_RING_BYTES(tp), 8713 &tnapi->rx_rcb_mapping, 8714 GFP_KERNEL); 8715 if (!tnapi->rx_rcb) 8716 goto err_out; 8717 } 8718 8719 return 0; 8720 8721 err_out: 8722 tg3_mem_rx_release(tp); 8723 return -ENOMEM; 8724 } 8725 8726 /* 8727 * Must not be invoked with interrupt sources disabled and 8728 * the hardware shutdown down. 8729 */ 8730 static void tg3_free_consistent(struct tg3 *tp) 8731 { 8732 int i; 8733 8734 for (i = 0; i < tp->irq_cnt; i++) { 8735 struct tg3_napi *tnapi = &tp->napi[i]; 8736 8737 if (tnapi->hw_status) { 8738 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, 8739 tnapi->hw_status, 8740 tnapi->status_mapping); 8741 tnapi->hw_status = NULL; 8742 } 8743 } 8744 8745 tg3_mem_rx_release(tp); 8746 tg3_mem_tx_release(tp); 8747 8748 /* tp->hw_stats can be referenced safely: 8749 * 1. under rtnl_lock 8750 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set. 8751 */ 8752 if (tp->hw_stats) { 8753 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), 8754 tp->hw_stats, tp->stats_mapping); 8755 tp->hw_stats = NULL; 8756 } 8757 } 8758 8759 /* 8760 * Must not be invoked with interrupt sources disabled and 8761 * the hardware shutdown down. Can sleep. 8762 */ 8763 static int tg3_alloc_consistent(struct tg3 *tp) 8764 { 8765 int i; 8766 8767 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, 8768 sizeof(struct tg3_hw_stats), 8769 &tp->stats_mapping, GFP_KERNEL); 8770 if (!tp->hw_stats) 8771 goto err_out; 8772 8773 for (i = 0; i < tp->irq_cnt; i++) { 8774 struct tg3_napi *tnapi = &tp->napi[i]; 8775 struct tg3_hw_status *sblk; 8776 8777 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, 8778 TG3_HW_STATUS_SIZE, 8779 &tnapi->status_mapping, 8780 GFP_KERNEL); 8781 if (!tnapi->hw_status) 8782 goto err_out; 8783 8784 sblk = tnapi->hw_status; 8785 8786 if (tg3_flag(tp, ENABLE_RSS)) { 8787 u16 *prodptr = NULL; 8788 8789 /* 8790 * When RSS is enabled, the status block format changes 8791 * slightly. The "rx_jumbo_consumer", "reserved", 8792 * and "rx_mini_consumer" members get mapped to the 8793 * other three rx return ring producer indexes. 8794 */ 8795 switch (i) { 8796 case 1: 8797 prodptr = &sblk->idx[0].rx_producer; 8798 break; 8799 case 2: 8800 prodptr = &sblk->rx_jumbo_consumer; 8801 break; 8802 case 3: 8803 prodptr = &sblk->reserved; 8804 break; 8805 case 4: 8806 prodptr = &sblk->rx_mini_consumer; 8807 break; 8808 } 8809 tnapi->rx_rcb_prod_idx = prodptr; 8810 } else { 8811 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; 8812 } 8813 } 8814 8815 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp)) 8816 goto err_out; 8817 8818 return 0; 8819 8820 err_out: 8821 tg3_free_consistent(tp); 8822 return -ENOMEM; 8823 } 8824 8825 #define MAX_WAIT_CNT 1000 8826 8827 /* To stop a block, clear the enable bit and poll till it 8828 * clears. tp->lock is held. 8829 */ 8830 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent) 8831 { 8832 unsigned int i; 8833 u32 val; 8834 8835 if (tg3_flag(tp, 5705_PLUS)) { 8836 switch (ofs) { 8837 case RCVLSC_MODE: 8838 case DMAC_MODE: 8839 case MBFREE_MODE: 8840 case BUFMGR_MODE: 8841 case MEMARB_MODE: 8842 /* We can't enable/disable these bits of the 8843 * 5705/5750, just say success. 8844 */ 8845 return 0; 8846 8847 default: 8848 break; 8849 } 8850 } 8851 8852 val = tr32(ofs); 8853 val &= ~enable_bit; 8854 tw32_f(ofs, val); 8855 8856 for (i = 0; i < MAX_WAIT_CNT; i++) { 8857 if (pci_channel_offline(tp->pdev)) { 8858 dev_err(&tp->pdev->dev, 8859 "tg3_stop_block device offline, " 8860 "ofs=%lx enable_bit=%x\n", 8861 ofs, enable_bit); 8862 return -ENODEV; 8863 } 8864 8865 udelay(100); 8866 val = tr32(ofs); 8867 if ((val & enable_bit) == 0) 8868 break; 8869 } 8870 8871 if (i == MAX_WAIT_CNT && !silent) { 8872 dev_err(&tp->pdev->dev, 8873 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", 8874 ofs, enable_bit); 8875 return -ENODEV; 8876 } 8877 8878 return 0; 8879 } 8880 8881 /* tp->lock is held. */ 8882 static int tg3_abort_hw(struct tg3 *tp, bool silent) 8883 { 8884 int i, err; 8885 8886 tg3_disable_ints(tp); 8887 8888 if (pci_channel_offline(tp->pdev)) { 8889 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE); 8890 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8891 err = -ENODEV; 8892 goto err_no_dev; 8893 } 8894 8895 tp->rx_mode &= ~RX_MODE_ENABLE; 8896 tw32_f(MAC_RX_MODE, tp->rx_mode); 8897 udelay(10); 8898 8899 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); 8900 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); 8901 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); 8902 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); 8903 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); 8904 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); 8905 8906 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); 8907 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); 8908 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); 8909 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); 8910 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); 8911 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); 8912 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); 8913 8914 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8915 tw32_f(MAC_MODE, tp->mac_mode); 8916 udelay(40); 8917 8918 tp->tx_mode &= ~TX_MODE_ENABLE; 8919 tw32_f(MAC_TX_MODE, tp->tx_mode); 8920 8921 for (i = 0; i < MAX_WAIT_CNT; i++) { 8922 udelay(100); 8923 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) 8924 break; 8925 } 8926 if (i >= MAX_WAIT_CNT) { 8927 dev_err(&tp->pdev->dev, 8928 "%s timed out, TX_MODE_ENABLE will not clear " 8929 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); 8930 err |= -ENODEV; 8931 } 8932 8933 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); 8934 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); 8935 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); 8936 8937 tw32(FTQ_RESET, 0xffffffff); 8938 tw32(FTQ_RESET, 0x00000000); 8939 8940 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); 8941 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); 8942 8943 err_no_dev: 8944 for (i = 0; i < tp->irq_cnt; i++) { 8945 struct tg3_napi *tnapi = &tp->napi[i]; 8946 if (tnapi->hw_status) 8947 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8948 } 8949 8950 return err; 8951 } 8952 8953 /* Save PCI command register before chip reset */ 8954 static void tg3_save_pci_state(struct tg3 *tp) 8955 { 8956 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); 8957 } 8958 8959 /* Restore PCI state after chip reset */ 8960 static void tg3_restore_pci_state(struct tg3 *tp) 8961 { 8962 u32 val; 8963 8964 /* Re-enable indirect register accesses. */ 8965 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 8966 tp->misc_host_ctrl); 8967 8968 /* Set MAX PCI retry to zero. */ 8969 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); 8970 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 8971 tg3_flag(tp, PCIX_MODE)) 8972 val |= PCISTATE_RETRY_SAME_DMA; 8973 /* Allow reads and writes to the APE register and memory space. */ 8974 if (tg3_flag(tp, ENABLE_APE)) 8975 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 8976 PCISTATE_ALLOW_APE_SHMEM_WR | 8977 PCISTATE_ALLOW_APE_PSPACE_WR; 8978 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); 8979 8980 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); 8981 8982 if (!tg3_flag(tp, PCI_EXPRESS)) { 8983 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 8984 tp->pci_cacheline_sz); 8985 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 8986 tp->pci_lat_timer); 8987 } 8988 8989 /* Make sure PCI-X relaxed ordering bit is clear. */ 8990 if (tg3_flag(tp, PCIX_MODE)) { 8991 u16 pcix_cmd; 8992 8993 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 8994 &pcix_cmd); 8995 pcix_cmd &= ~PCI_X_CMD_ERO; 8996 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 8997 pcix_cmd); 8998 } 8999 9000 if (tg3_flag(tp, 5780_CLASS)) { 9001 9002 /* Chip reset on 5780 will reset MSI enable bit, 9003 * so need to restore it. 9004 */ 9005 if (tg3_flag(tp, USING_MSI)) { 9006 u16 ctrl; 9007 9008 pci_read_config_word(tp->pdev, 9009 tp->msi_cap + PCI_MSI_FLAGS, 9010 &ctrl); 9011 pci_write_config_word(tp->pdev, 9012 tp->msi_cap + PCI_MSI_FLAGS, 9013 ctrl | PCI_MSI_FLAGS_ENABLE); 9014 val = tr32(MSGINT_MODE); 9015 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); 9016 } 9017 } 9018 } 9019 9020 static void tg3_override_clk(struct tg3 *tp) 9021 { 9022 u32 val; 9023 9024 switch (tg3_asic_rev(tp)) { 9025 case ASIC_REV_5717: 9026 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9027 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 9028 TG3_CPMU_MAC_ORIDE_ENABLE); 9029 break; 9030 9031 case ASIC_REV_5719: 9032 case ASIC_REV_5720: 9033 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 9034 break; 9035 9036 default: 9037 return; 9038 } 9039 } 9040 9041 static void tg3_restore_clk(struct tg3 *tp) 9042 { 9043 u32 val; 9044 9045 switch (tg3_asic_rev(tp)) { 9046 case ASIC_REV_5717: 9047 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9048 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, 9049 val & ~TG3_CPMU_MAC_ORIDE_ENABLE); 9050 break; 9051 9052 case ASIC_REV_5719: 9053 case ASIC_REV_5720: 9054 val = tr32(TG3_CPMU_CLCK_ORIDE); 9055 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 9056 break; 9057 9058 default: 9059 return; 9060 } 9061 } 9062 9063 /* tp->lock is held. */ 9064 static int tg3_chip_reset(struct tg3 *tp) 9065 __releases(tp->lock) 9066 __acquires(tp->lock) 9067 { 9068 u32 val; 9069 void (*write_op)(struct tg3 *, u32, u32); 9070 int i, err; 9071 9072 if (!pci_device_is_present(tp->pdev)) 9073 return -ENODEV; 9074 9075 tg3_nvram_lock(tp); 9076 9077 tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 9078 9079 /* No matching tg3_nvram_unlock() after this because 9080 * chip reset below will undo the nvram lock. 9081 */ 9082 tp->nvram_lock_cnt = 0; 9083 9084 /* GRC_MISC_CFG core clock reset will clear the memory 9085 * enable bit in PCI register 4 and the MSI enable bit 9086 * on some chips, so we save relevant registers here. 9087 */ 9088 tg3_save_pci_state(tp); 9089 9090 if (tg3_asic_rev(tp) == ASIC_REV_5752 || 9091 tg3_flag(tp, 5755_PLUS)) 9092 tw32(GRC_FASTBOOT_PC, 0); 9093 9094 /* 9095 * We must avoid the readl() that normally takes place. 9096 * It locks machines, causes machine checks, and other 9097 * fun things. So, temporarily disable the 5701 9098 * hardware workaround, while we do the reset. 9099 */ 9100 write_op = tp->write32; 9101 if (write_op == tg3_write_flush_reg32) 9102 tp->write32 = tg3_write32; 9103 9104 /* Prevent the irq handler from reading or writing PCI registers 9105 * during chip reset when the memory enable bit in the PCI command 9106 * register may be cleared. The chip does not generate interrupt 9107 * at this time, but the irq handler may still be called due to irq 9108 * sharing or irqpoll. 9109 */ 9110 tg3_flag_set(tp, CHIP_RESETTING); 9111 for (i = 0; i < tp->irq_cnt; i++) { 9112 struct tg3_napi *tnapi = &tp->napi[i]; 9113 if (tnapi->hw_status) { 9114 tnapi->hw_status->status = 0; 9115 tnapi->hw_status->status_tag = 0; 9116 } 9117 tnapi->last_tag = 0; 9118 tnapi->last_irq_tag = 0; 9119 } 9120 smp_mb(); 9121 9122 tg3_full_unlock(tp); 9123 9124 for (i = 0; i < tp->irq_cnt; i++) 9125 synchronize_irq(tp->napi[i].irq_vec); 9126 9127 tg3_full_lock(tp, 0); 9128 9129 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9130 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9131 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9132 } 9133 9134 /* do the reset */ 9135 val = GRC_MISC_CFG_CORECLK_RESET; 9136 9137 if (tg3_flag(tp, PCI_EXPRESS)) { 9138 /* Force PCIe 1.0a mode */ 9139 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 9140 !tg3_flag(tp, 57765_PLUS) && 9141 tr32(TG3_PCIE_PHY_TSTCTL) == 9142 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) 9143 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); 9144 9145 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) { 9146 tw32(GRC_MISC_CFG, (1 << 29)); 9147 val |= (1 << 29); 9148 } 9149 } 9150 9151 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 9152 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); 9153 tw32(GRC_VCPU_EXT_CTRL, 9154 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); 9155 } 9156 9157 /* Set the clock to the highest frequency to avoid timeouts. With link 9158 * aware mode, the clock speed could be slow and bootcode does not 9159 * complete within the expected time. Override the clock to allow the 9160 * bootcode to finish sooner and then restore it. 9161 */ 9162 tg3_override_clk(tp); 9163 9164 /* Manage gphy power for all CPMU absent PCIe devices. */ 9165 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) 9166 val |= GRC_MISC_CFG_KEEP_GPHY_POWER; 9167 9168 tw32(GRC_MISC_CFG, val); 9169 9170 /* restore 5701 hardware bug workaround write method */ 9171 tp->write32 = write_op; 9172 9173 /* Unfortunately, we have to delay before the PCI read back. 9174 * Some 575X chips even will not respond to a PCI cfg access 9175 * when the reset command is given to the chip. 9176 * 9177 * How do these hardware designers expect things to work 9178 * properly if the PCI write is posted for a long period 9179 * of time? It is always necessary to have some method by 9180 * which a register read back can occur to push the write 9181 * out which does the reset. 9182 * 9183 * For most tg3 variants the trick below was working. 9184 * Ho hum... 9185 */ 9186 udelay(120); 9187 9188 /* Flush PCI posted writes. The normal MMIO registers 9189 * are inaccessible at this time so this is the only 9190 * way to make this reliably (actually, this is no longer 9191 * the case, see above). I tried to use indirect 9192 * register read/write but this upset some 5701 variants. 9193 */ 9194 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); 9195 9196 udelay(120); 9197 9198 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) { 9199 u16 val16; 9200 9201 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) { 9202 int j; 9203 u32 cfg_val; 9204 9205 /* Wait for link training to complete. */ 9206 for (j = 0; j < 5000; j++) 9207 udelay(100); 9208 9209 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); 9210 pci_write_config_dword(tp->pdev, 0xc4, 9211 cfg_val | (1 << 15)); 9212 } 9213 9214 /* Clear the "no snoop" and "relaxed ordering" bits. */ 9215 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN; 9216 /* 9217 * Older PCIe devices only support the 128 byte 9218 * MPS setting. Enforce the restriction. 9219 */ 9220 if (!tg3_flag(tp, CPMU_PRESENT)) 9221 val16 |= PCI_EXP_DEVCTL_PAYLOAD; 9222 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16); 9223 9224 /* Clear error status */ 9225 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA, 9226 PCI_EXP_DEVSTA_CED | 9227 PCI_EXP_DEVSTA_NFED | 9228 PCI_EXP_DEVSTA_FED | 9229 PCI_EXP_DEVSTA_URD); 9230 } 9231 9232 tg3_restore_pci_state(tp); 9233 9234 tg3_flag_clear(tp, CHIP_RESETTING); 9235 tg3_flag_clear(tp, ERROR_PROCESSED); 9236 9237 val = 0; 9238 if (tg3_flag(tp, 5780_CLASS)) 9239 val = tr32(MEMARB_MODE); 9240 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 9241 9242 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) { 9243 tg3_stop_fw(tp); 9244 tw32(0x5000, 0x400); 9245 } 9246 9247 if (tg3_flag(tp, IS_SSB_CORE)) { 9248 /* 9249 * BCM4785: In order to avoid repercussions from using 9250 * potentially defective internal ROM, stop the Rx RISC CPU, 9251 * which is not required. 9252 */ 9253 tg3_stop_fw(tp); 9254 tg3_halt_cpu(tp, RX_CPU_BASE); 9255 } 9256 9257 err = tg3_poll_fw(tp); 9258 if (err) 9259 return err; 9260 9261 tw32(GRC_MODE, tp->grc_mode); 9262 9263 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) { 9264 val = tr32(0xc4); 9265 9266 tw32(0xc4, val | (1 << 15)); 9267 } 9268 9269 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && 9270 tg3_asic_rev(tp) == ASIC_REV_5705) { 9271 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; 9272 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) 9273 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; 9274 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 9275 } 9276 9277 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 9278 tp->mac_mode = MAC_MODE_PORT_MODE_TBI; 9279 val = tp->mac_mode; 9280 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 9281 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 9282 val = tp->mac_mode; 9283 } else 9284 val = 0; 9285 9286 tw32_f(MAC_MODE, val); 9287 udelay(40); 9288 9289 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); 9290 9291 tg3_mdio_start(tp); 9292 9293 if (tg3_flag(tp, PCI_EXPRESS) && 9294 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 9295 tg3_asic_rev(tp) != ASIC_REV_5785 && 9296 !tg3_flag(tp, 57765_PLUS)) { 9297 val = tr32(0x7c00); 9298 9299 tw32(0x7c00, val | (1 << 25)); 9300 } 9301 9302 tg3_restore_clk(tp); 9303 9304 /* Increase the core clock speed to fix tx timeout issue for 5762 9305 * with 100Mbps link speed. 9306 */ 9307 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 9308 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9309 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 9310 TG3_CPMU_MAC_ORIDE_ENABLE); 9311 } 9312 9313 /* Reprobe ASF enable state. */ 9314 tg3_flag_clear(tp, ENABLE_ASF); 9315 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 9316 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 9317 9318 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); 9319 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 9320 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 9321 u32 nic_cfg; 9322 9323 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 9324 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 9325 tg3_flag_set(tp, ENABLE_ASF); 9326 tp->last_event_jiffies = jiffies; 9327 if (tg3_flag(tp, 5750_PLUS)) 9328 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 9329 9330 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg); 9331 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK) 9332 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 9333 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID) 9334 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 9335 } 9336 } 9337 9338 return 0; 9339 } 9340 9341 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *); 9342 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *); 9343 static void __tg3_set_rx_mode(struct net_device *); 9344 9345 /* tp->lock is held. */ 9346 static int tg3_halt(struct tg3 *tp, int kind, bool silent) 9347 { 9348 int err; 9349 9350 tg3_stop_fw(tp); 9351 9352 tg3_write_sig_pre_reset(tp, kind); 9353 9354 tg3_abort_hw(tp, silent); 9355 err = tg3_chip_reset(tp); 9356 9357 __tg3_set_mac_addr(tp, false); 9358 9359 tg3_write_sig_legacy(tp, kind); 9360 tg3_write_sig_post_reset(tp, kind); 9361 9362 if (tp->hw_stats) { 9363 /* Save the stats across chip resets... */ 9364 tg3_get_nstats(tp, &tp->net_stats_prev); 9365 tg3_get_estats(tp, &tp->estats_prev); 9366 9367 /* And make sure the next sample is new data */ 9368 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); 9369 } 9370 9371 return err; 9372 } 9373 9374 static int tg3_set_mac_addr(struct net_device *dev, void *p) 9375 { 9376 struct tg3 *tp = netdev_priv(dev); 9377 struct sockaddr *addr = p; 9378 int err = 0; 9379 bool skip_mac_1 = false; 9380 9381 if (!is_valid_ether_addr(addr->sa_data)) 9382 return -EADDRNOTAVAIL; 9383 9384 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 9385 9386 if (!netif_running(dev)) 9387 return 0; 9388 9389 if (tg3_flag(tp, ENABLE_ASF)) { 9390 u32 addr0_high, addr0_low, addr1_high, addr1_low; 9391 9392 addr0_high = tr32(MAC_ADDR_0_HIGH); 9393 addr0_low = tr32(MAC_ADDR_0_LOW); 9394 addr1_high = tr32(MAC_ADDR_1_HIGH); 9395 addr1_low = tr32(MAC_ADDR_1_LOW); 9396 9397 /* Skip MAC addr 1 if ASF is using it. */ 9398 if ((addr0_high != addr1_high || addr0_low != addr1_low) && 9399 !(addr1_high == 0 && addr1_low == 0)) 9400 skip_mac_1 = true; 9401 } 9402 spin_lock_bh(&tp->lock); 9403 __tg3_set_mac_addr(tp, skip_mac_1); 9404 __tg3_set_rx_mode(dev); 9405 spin_unlock_bh(&tp->lock); 9406 9407 return err; 9408 } 9409 9410 /* tp->lock is held. */ 9411 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, 9412 dma_addr_t mapping, u32 maxlen_flags, 9413 u32 nic_addr) 9414 { 9415 tg3_write_mem(tp, 9416 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), 9417 ((u64) mapping >> 32)); 9418 tg3_write_mem(tp, 9419 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), 9420 ((u64) mapping & 0xffffffff)); 9421 tg3_write_mem(tp, 9422 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), 9423 maxlen_flags); 9424 9425 if (!tg3_flag(tp, 5705_PLUS)) 9426 tg3_write_mem(tp, 9427 (bdinfo_addr + TG3_BDINFO_NIC_ADDR), 9428 nic_addr); 9429 } 9430 9431 9432 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9433 { 9434 int i = 0; 9435 9436 if (!tg3_flag(tp, ENABLE_TSS)) { 9437 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); 9438 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); 9439 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); 9440 } else { 9441 tw32(HOSTCC_TXCOL_TICKS, 0); 9442 tw32(HOSTCC_TXMAX_FRAMES, 0); 9443 tw32(HOSTCC_TXCOAL_MAXF_INT, 0); 9444 9445 for (; i < tp->txq_cnt; i++) { 9446 u32 reg; 9447 9448 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; 9449 tw32(reg, ec->tx_coalesce_usecs); 9450 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; 9451 tw32(reg, ec->tx_max_coalesced_frames); 9452 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; 9453 tw32(reg, ec->tx_max_coalesced_frames_irq); 9454 } 9455 } 9456 9457 for (; i < tp->irq_max - 1; i++) { 9458 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); 9459 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); 9460 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9461 } 9462 } 9463 9464 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9465 { 9466 int i = 0; 9467 u32 limit = tp->rxq_cnt; 9468 9469 if (!tg3_flag(tp, ENABLE_RSS)) { 9470 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); 9471 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); 9472 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); 9473 limit--; 9474 } else { 9475 tw32(HOSTCC_RXCOL_TICKS, 0); 9476 tw32(HOSTCC_RXMAX_FRAMES, 0); 9477 tw32(HOSTCC_RXCOAL_MAXF_INT, 0); 9478 } 9479 9480 for (; i < limit; i++) { 9481 u32 reg; 9482 9483 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; 9484 tw32(reg, ec->rx_coalesce_usecs); 9485 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; 9486 tw32(reg, ec->rx_max_coalesced_frames); 9487 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; 9488 tw32(reg, ec->rx_max_coalesced_frames_irq); 9489 } 9490 9491 for (; i < tp->irq_max - 1; i++) { 9492 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); 9493 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); 9494 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9495 } 9496 } 9497 9498 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) 9499 { 9500 tg3_coal_tx_init(tp, ec); 9501 tg3_coal_rx_init(tp, ec); 9502 9503 if (!tg3_flag(tp, 5705_PLUS)) { 9504 u32 val = ec->stats_block_coalesce_usecs; 9505 9506 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); 9507 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); 9508 9509 if (!tp->link_up) 9510 val = 0; 9511 9512 tw32(HOSTCC_STAT_COAL_TICKS, val); 9513 } 9514 } 9515 9516 /* tp->lock is held. */ 9517 static void tg3_tx_rcbs_disable(struct tg3 *tp) 9518 { 9519 u32 txrcb, limit; 9520 9521 /* Disable all transmit rings but the first. */ 9522 if (!tg3_flag(tp, 5705_PLUS)) 9523 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; 9524 else if (tg3_flag(tp, 5717_PLUS)) 9525 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; 9526 else if (tg3_flag(tp, 57765_CLASS) || 9527 tg3_asic_rev(tp) == ASIC_REV_5762) 9528 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; 9529 else 9530 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9531 9532 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9533 txrcb < limit; txrcb += TG3_BDINFO_SIZE) 9534 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, 9535 BDINFO_FLAGS_DISABLED); 9536 } 9537 9538 /* tp->lock is held. */ 9539 static void tg3_tx_rcbs_init(struct tg3 *tp) 9540 { 9541 int i = 0; 9542 u32 txrcb = NIC_SRAM_SEND_RCB; 9543 9544 if (tg3_flag(tp, ENABLE_TSS)) 9545 i++; 9546 9547 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { 9548 struct tg3_napi *tnapi = &tp->napi[i]; 9549 9550 if (!tnapi->tx_ring) 9551 continue; 9552 9553 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, 9554 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), 9555 NIC_SRAM_TX_BUFFER_DESC); 9556 } 9557 } 9558 9559 /* tp->lock is held. */ 9560 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp) 9561 { 9562 u32 rxrcb, limit; 9563 9564 /* Disable all receive return rings but the first. */ 9565 if (tg3_flag(tp, 5717_PLUS)) 9566 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; 9567 else if (!tg3_flag(tp, 5705_PLUS)) 9568 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; 9569 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9570 tg3_asic_rev(tp) == ASIC_REV_5762 || 9571 tg3_flag(tp, 57765_CLASS)) 9572 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; 9573 else 9574 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9575 9576 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9577 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) 9578 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, 9579 BDINFO_FLAGS_DISABLED); 9580 } 9581 9582 /* tp->lock is held. */ 9583 static void tg3_rx_ret_rcbs_init(struct tg3 *tp) 9584 { 9585 int i = 0; 9586 u32 rxrcb = NIC_SRAM_RCV_RET_RCB; 9587 9588 if (tg3_flag(tp, ENABLE_RSS)) 9589 i++; 9590 9591 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { 9592 struct tg3_napi *tnapi = &tp->napi[i]; 9593 9594 if (!tnapi->rx_rcb) 9595 continue; 9596 9597 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, 9598 (tp->rx_ret_ring_mask + 1) << 9599 BDINFO_FLAGS_MAXLEN_SHIFT, 0); 9600 } 9601 } 9602 9603 /* tp->lock is held. */ 9604 static void tg3_rings_reset(struct tg3 *tp) 9605 { 9606 int i; 9607 u32 stblk; 9608 struct tg3_napi *tnapi = &tp->napi[0]; 9609 9610 tg3_tx_rcbs_disable(tp); 9611 9612 tg3_rx_ret_rcbs_disable(tp); 9613 9614 /* Disable interrupts */ 9615 tw32_mailbox_f(tp->napi[0].int_mbox, 1); 9616 tp->napi[0].chk_msi_cnt = 0; 9617 tp->napi[0].last_rx_cons = 0; 9618 tp->napi[0].last_tx_cons = 0; 9619 9620 /* Zero mailbox registers. */ 9621 if (tg3_flag(tp, SUPPORT_MSIX)) { 9622 for (i = 1; i < tp->irq_max; i++) { 9623 tp->napi[i].tx_prod = 0; 9624 tp->napi[i].tx_cons = 0; 9625 if (tg3_flag(tp, ENABLE_TSS)) 9626 tw32_mailbox(tp->napi[i].prodmbox, 0); 9627 tw32_rx_mbox(tp->napi[i].consmbox, 0); 9628 tw32_mailbox_f(tp->napi[i].int_mbox, 1); 9629 tp->napi[i].chk_msi_cnt = 0; 9630 tp->napi[i].last_rx_cons = 0; 9631 tp->napi[i].last_tx_cons = 0; 9632 } 9633 if (!tg3_flag(tp, ENABLE_TSS)) 9634 tw32_mailbox(tp->napi[0].prodmbox, 0); 9635 } else { 9636 tp->napi[0].tx_prod = 0; 9637 tp->napi[0].tx_cons = 0; 9638 tw32_mailbox(tp->napi[0].prodmbox, 0); 9639 tw32_rx_mbox(tp->napi[0].consmbox, 0); 9640 } 9641 9642 /* Make sure the NIC-based send BD rings are disabled. */ 9643 if (!tg3_flag(tp, 5705_PLUS)) { 9644 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; 9645 for (i = 0; i < 16; i++) 9646 tw32_tx_mbox(mbox + i * 8, 0); 9647 } 9648 9649 /* Clear status block in ram. */ 9650 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9651 9652 /* Set status block DMA address */ 9653 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 9654 ((u64) tnapi->status_mapping >> 32)); 9655 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 9656 ((u64) tnapi->status_mapping & 0xffffffff)); 9657 9658 stblk = HOSTCC_STATBLCK_RING1; 9659 9660 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { 9661 u64 mapping = (u64)tnapi->status_mapping; 9662 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); 9663 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); 9664 stblk += 8; 9665 9666 /* Clear status block in ram. */ 9667 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9668 } 9669 9670 tg3_tx_rcbs_init(tp); 9671 tg3_rx_ret_rcbs_init(tp); 9672 } 9673 9674 static void tg3_setup_rxbd_thresholds(struct tg3 *tp) 9675 { 9676 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; 9677 9678 if (!tg3_flag(tp, 5750_PLUS) || 9679 tg3_flag(tp, 5780_CLASS) || 9680 tg3_asic_rev(tp) == ASIC_REV_5750 || 9681 tg3_asic_rev(tp) == ASIC_REV_5752 || 9682 tg3_flag(tp, 57765_PLUS)) 9683 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; 9684 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9685 tg3_asic_rev(tp) == ASIC_REV_5787) 9686 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; 9687 else 9688 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; 9689 9690 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); 9691 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); 9692 9693 val = min(nic_rep_thresh, host_rep_thresh); 9694 tw32(RCVBDI_STD_THRESH, val); 9695 9696 if (tg3_flag(tp, 57765_PLUS)) 9697 tw32(STD_REPLENISH_LWM, bdcache_maxcnt); 9698 9699 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 9700 return; 9701 9702 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; 9703 9704 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); 9705 9706 val = min(bdcache_maxcnt / 2, host_rep_thresh); 9707 tw32(RCVBDI_JUMBO_THRESH, val); 9708 9709 if (tg3_flag(tp, 57765_PLUS)) 9710 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); 9711 } 9712 9713 static inline u32 calc_crc(unsigned char *buf, int len) 9714 { 9715 u32 reg; 9716 u32 tmp; 9717 int j, k; 9718 9719 reg = 0xffffffff; 9720 9721 for (j = 0; j < len; j++) { 9722 reg ^= buf[j]; 9723 9724 for (k = 0; k < 8; k++) { 9725 tmp = reg & 0x01; 9726 9727 reg >>= 1; 9728 9729 if (tmp) 9730 reg ^= CRC32_POLY_LE; 9731 } 9732 } 9733 9734 return ~reg; 9735 } 9736 9737 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) 9738 { 9739 /* accept or reject all multicast frames */ 9740 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); 9741 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); 9742 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); 9743 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); 9744 } 9745 9746 static void __tg3_set_rx_mode(struct net_device *dev) 9747 { 9748 struct tg3 *tp = netdev_priv(dev); 9749 u32 rx_mode; 9750 9751 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | 9752 RX_MODE_KEEP_VLAN_TAG); 9753 9754 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) 9755 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 9756 * flag clear. 9757 */ 9758 if (!tg3_flag(tp, ENABLE_ASF)) 9759 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 9760 #endif 9761 9762 if (dev->flags & IFF_PROMISC) { 9763 /* Promiscuous mode. */ 9764 rx_mode |= RX_MODE_PROMISC; 9765 } else if (dev->flags & IFF_ALLMULTI) { 9766 /* Accept all multicast. */ 9767 tg3_set_multi(tp, 1); 9768 } else if (netdev_mc_empty(dev)) { 9769 /* Reject all multicast. */ 9770 tg3_set_multi(tp, 0); 9771 } else { 9772 /* Accept one or more multicast(s). */ 9773 struct netdev_hw_addr *ha; 9774 u32 mc_filter[4] = { 0, }; 9775 u32 regidx; 9776 u32 bit; 9777 u32 crc; 9778 9779 netdev_for_each_mc_addr(ha, dev) { 9780 crc = calc_crc(ha->addr, ETH_ALEN); 9781 bit = ~crc & 0x7f; 9782 regidx = (bit & 0x60) >> 5; 9783 bit &= 0x1f; 9784 mc_filter[regidx] |= (1 << bit); 9785 } 9786 9787 tw32(MAC_HASH_REG_0, mc_filter[0]); 9788 tw32(MAC_HASH_REG_1, mc_filter[1]); 9789 tw32(MAC_HASH_REG_2, mc_filter[2]); 9790 tw32(MAC_HASH_REG_3, mc_filter[3]); 9791 } 9792 9793 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) { 9794 rx_mode |= RX_MODE_PROMISC; 9795 } else if (!(dev->flags & IFF_PROMISC)) { 9796 /* Add all entries into to the mac addr filter list */ 9797 int i = 0; 9798 struct netdev_hw_addr *ha; 9799 9800 netdev_for_each_uc_addr(ha, dev) { 9801 __tg3_set_one_mac_addr(tp, ha->addr, 9802 i + TG3_UCAST_ADDR_IDX(tp)); 9803 i++; 9804 } 9805 } 9806 9807 if (rx_mode != tp->rx_mode) { 9808 tp->rx_mode = rx_mode; 9809 tw32_f(MAC_RX_MODE, rx_mode); 9810 udelay(10); 9811 } 9812 } 9813 9814 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt) 9815 { 9816 int i; 9817 9818 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 9819 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt); 9820 } 9821 9822 static void tg3_rss_check_indir_tbl(struct tg3 *tp) 9823 { 9824 int i; 9825 9826 if (!tg3_flag(tp, SUPPORT_MSIX)) 9827 return; 9828 9829 if (tp->rxq_cnt == 1) { 9830 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); 9831 return; 9832 } 9833 9834 /* Validate table against current IRQ count */ 9835 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { 9836 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt) 9837 break; 9838 } 9839 9840 if (i != TG3_RSS_INDIR_TBL_SIZE) 9841 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt); 9842 } 9843 9844 static void tg3_rss_write_indir_tbl(struct tg3 *tp) 9845 { 9846 int i = 0; 9847 u32 reg = MAC_RSS_INDIR_TBL_0; 9848 9849 while (i < TG3_RSS_INDIR_TBL_SIZE) { 9850 u32 val = tp->rss_ind_tbl[i]; 9851 i++; 9852 for (; i % 8; i++) { 9853 val <<= 4; 9854 val |= tp->rss_ind_tbl[i]; 9855 } 9856 tw32(reg, val); 9857 reg += 4; 9858 } 9859 } 9860 9861 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp) 9862 { 9863 if (tg3_asic_rev(tp) == ASIC_REV_5719) 9864 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719; 9865 else 9866 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720; 9867 } 9868 9869 /* tp->lock is held. */ 9870 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) 9871 { 9872 u32 val, rdmac_mode; 9873 int i, err, limit; 9874 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 9875 9876 tg3_disable_ints(tp); 9877 9878 tg3_stop_fw(tp); 9879 9880 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); 9881 9882 if (tg3_flag(tp, INIT_COMPLETE)) 9883 tg3_abort_hw(tp, 1); 9884 9885 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 9886 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { 9887 tg3_phy_pull_config(tp); 9888 tg3_eee_pull_config(tp, NULL); 9889 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 9890 } 9891 9892 /* Enable MAC control of LPI */ 9893 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 9894 tg3_setup_eee(tp); 9895 9896 if (reset_phy) 9897 tg3_phy_reset(tp); 9898 9899 err = tg3_chip_reset(tp); 9900 if (err) 9901 return err; 9902 9903 tg3_write_sig_legacy(tp, RESET_KIND_INIT); 9904 9905 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 9906 val = tr32(TG3_CPMU_CTRL); 9907 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); 9908 tw32(TG3_CPMU_CTRL, val); 9909 9910 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 9911 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 9912 val |= CPMU_LSPD_10MB_MACCLK_6_25; 9913 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 9914 9915 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); 9916 val &= ~CPMU_LNK_AWARE_MACCLK_MASK; 9917 val |= CPMU_LNK_AWARE_MACCLK_6_25; 9918 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); 9919 9920 val = tr32(TG3_CPMU_HST_ACC); 9921 val &= ~CPMU_HST_ACC_MACCLK_MASK; 9922 val |= CPMU_HST_ACC_MACCLK_6_25; 9923 tw32(TG3_CPMU_HST_ACC, val); 9924 } 9925 9926 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9927 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; 9928 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | 9929 PCIE_PWR_MGMT_L1_THRESH_4MS; 9930 tw32(PCIE_PWR_MGMT_THRESH, val); 9931 9932 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; 9933 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); 9934 9935 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); 9936 9937 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9938 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9939 } 9940 9941 if (tg3_flag(tp, L1PLLPD_EN)) { 9942 u32 grc_mode = tr32(GRC_MODE); 9943 9944 /* Access the lower 1K of PL PCIE block registers. */ 9945 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9946 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 9947 9948 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); 9949 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, 9950 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); 9951 9952 tw32(GRC_MODE, grc_mode); 9953 } 9954 9955 if (tg3_flag(tp, 57765_CLASS)) { 9956 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 9957 u32 grc_mode = tr32(GRC_MODE); 9958 9959 /* Access the lower 1K of PL PCIE block registers. */ 9960 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9961 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 9962 9963 val = tr32(TG3_PCIE_TLDLPL_PORT + 9964 TG3_PCIE_PL_LO_PHYCTL5); 9965 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, 9966 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); 9967 9968 tw32(GRC_MODE, grc_mode); 9969 } 9970 9971 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) { 9972 u32 grc_mode; 9973 9974 /* Fix transmit hangs */ 9975 val = tr32(TG3_CPMU_PADRNG_CTL); 9976 val |= TG3_CPMU_PADRNG_CTL_RDIV2; 9977 tw32(TG3_CPMU_PADRNG_CTL, val); 9978 9979 grc_mode = tr32(GRC_MODE); 9980 9981 /* Access the lower 1K of DL PCIE block registers. */ 9982 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9983 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); 9984 9985 val = tr32(TG3_PCIE_TLDLPL_PORT + 9986 TG3_PCIE_DL_LO_FTSMAX); 9987 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; 9988 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, 9989 val | TG3_PCIE_DL_LO_FTSMAX_VAL); 9990 9991 tw32(GRC_MODE, grc_mode); 9992 } 9993 9994 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 9995 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 9996 val |= CPMU_LSPD_10MB_MACCLK_6_25; 9997 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 9998 } 9999 10000 /* This works around an issue with Athlon chipsets on 10001 * B3 tigon3 silicon. This bit has no effect on any 10002 * other revision. But do not set this on PCI Express 10003 * chips and don't even touch the clocks if the CPMU is present. 10004 */ 10005 if (!tg3_flag(tp, CPMU_PRESENT)) { 10006 if (!tg3_flag(tp, PCI_EXPRESS)) 10007 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; 10008 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 10009 } 10010 10011 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 10012 tg3_flag(tp, PCIX_MODE)) { 10013 val = tr32(TG3PCI_PCISTATE); 10014 val |= PCISTATE_RETRY_SAME_DMA; 10015 tw32(TG3PCI_PCISTATE, val); 10016 } 10017 10018 if (tg3_flag(tp, ENABLE_APE)) { 10019 /* Allow reads and writes to the 10020 * APE register and memory space. 10021 */ 10022 val = tr32(TG3PCI_PCISTATE); 10023 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 10024 PCISTATE_ALLOW_APE_SHMEM_WR | 10025 PCISTATE_ALLOW_APE_PSPACE_WR; 10026 tw32(TG3PCI_PCISTATE, val); 10027 } 10028 10029 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) { 10030 /* Enable some hw fixes. */ 10031 val = tr32(TG3PCI_MSI_DATA); 10032 val |= (1 << 26) | (1 << 28) | (1 << 29); 10033 tw32(TG3PCI_MSI_DATA, val); 10034 } 10035 10036 /* Descriptor ring init may make accesses to the 10037 * NIC SRAM area to setup the TX descriptors, so we 10038 * can only do this after the hardware has been 10039 * successfully reset. 10040 */ 10041 err = tg3_init_rings(tp); 10042 if (err) 10043 return err; 10044 10045 if (tg3_flag(tp, 57765_PLUS)) { 10046 val = tr32(TG3PCI_DMA_RW_CTRL) & 10047 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 10048 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 10049 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; 10050 if (!tg3_flag(tp, 57765_CLASS) && 10051 tg3_asic_rev(tp) != ASIC_REV_5717 && 10052 tg3_asic_rev(tp) != ASIC_REV_5762) 10053 val |= DMA_RWCTRL_TAGGED_STAT_WA; 10054 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); 10055 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 && 10056 tg3_asic_rev(tp) != ASIC_REV_5761) { 10057 /* This value is determined during the probe time DMA 10058 * engine test, tg3_test_dma. 10059 */ 10060 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 10061 } 10062 10063 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | 10064 GRC_MODE_4X_NIC_SEND_RINGS | 10065 GRC_MODE_NO_TX_PHDR_CSUM | 10066 GRC_MODE_NO_RX_PHDR_CSUM); 10067 tp->grc_mode |= GRC_MODE_HOST_SENDBDS; 10068 10069 /* Pseudo-header checksum is done by hardware logic and not 10070 * the offload processers, so make the chip do the pseudo- 10071 * header checksums on receive. For transmit it is more 10072 * convenient to do the pseudo-header checksum in software 10073 * as Linux does that on transmit for us in all cases. 10074 */ 10075 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; 10076 10077 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP; 10078 if (tp->rxptpctl) 10079 tw32(TG3_RX_PTP_CTL, 10080 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 10081 10082 if (tg3_flag(tp, PTP_CAPABLE)) 10083 val |= GRC_MODE_TIME_SYNC_ENABLE; 10084 10085 tw32(GRC_MODE, tp->grc_mode | val); 10086 10087 /* On one of the AMD platform, MRRS is restricted to 4000 because of 10088 * south bridge limitation. As a workaround, Driver is setting MRRS 10089 * to 2048 instead of default 4096. 10090 */ 10091 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 10092 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) { 10093 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK; 10094 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048); 10095 } 10096 10097 /* Setup the timer prescalar register. Clock is always 66Mhz. */ 10098 val = tr32(GRC_MISC_CFG); 10099 val &= ~0xff; 10100 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); 10101 tw32(GRC_MISC_CFG, val); 10102 10103 /* Initialize MBUF/DESC pool. */ 10104 if (tg3_flag(tp, 5750_PLUS)) { 10105 /* Do nothing. */ 10106 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) { 10107 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); 10108 if (tg3_asic_rev(tp) == ASIC_REV_5704) 10109 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); 10110 else 10111 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); 10112 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); 10113 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); 10114 } else if (tg3_flag(tp, TSO_CAPABLE)) { 10115 int fw_len; 10116 10117 fw_len = tp->fw_len; 10118 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); 10119 tw32(BUFMGR_MB_POOL_ADDR, 10120 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); 10121 tw32(BUFMGR_MB_POOL_SIZE, 10122 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); 10123 } 10124 10125 if (tp->dev->mtu <= ETH_DATA_LEN) { 10126 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10127 tp->bufmgr_config.mbuf_read_dma_low_water); 10128 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10129 tp->bufmgr_config.mbuf_mac_rx_low_water); 10130 tw32(BUFMGR_MB_HIGH_WATER, 10131 tp->bufmgr_config.mbuf_high_water); 10132 } else { 10133 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10134 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); 10135 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10136 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); 10137 tw32(BUFMGR_MB_HIGH_WATER, 10138 tp->bufmgr_config.mbuf_high_water_jumbo); 10139 } 10140 tw32(BUFMGR_DMA_LOW_WATER, 10141 tp->bufmgr_config.dma_low_water); 10142 tw32(BUFMGR_DMA_HIGH_WATER, 10143 tp->bufmgr_config.dma_high_water); 10144 10145 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; 10146 if (tg3_asic_rev(tp) == ASIC_REV_5719) 10147 val |= BUFMGR_MODE_NO_TX_UNDERRUN; 10148 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10149 tg3_asic_rev(tp) == ASIC_REV_5762 || 10150 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10151 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) 10152 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; 10153 tw32(BUFMGR_MODE, val); 10154 for (i = 0; i < 2000; i++) { 10155 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) 10156 break; 10157 udelay(10); 10158 } 10159 if (i >= 2000) { 10160 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); 10161 return -ENODEV; 10162 } 10163 10164 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1) 10165 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); 10166 10167 tg3_setup_rxbd_thresholds(tp); 10168 10169 /* Initialize TG3_BDINFO's at: 10170 * RCVDBDI_STD_BD: standard eth size rx ring 10171 * RCVDBDI_JUMBO_BD: jumbo frame rx ring 10172 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) 10173 * 10174 * like so: 10175 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring 10176 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | 10177 * ring attribute flags 10178 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM 10179 * 10180 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. 10181 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. 10182 * 10183 * The size of each ring is fixed in the firmware, but the location is 10184 * configurable. 10185 */ 10186 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10187 ((u64) tpr->rx_std_mapping >> 32)); 10188 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10189 ((u64) tpr->rx_std_mapping & 0xffffffff)); 10190 if (!tg3_flag(tp, 5717_PLUS)) 10191 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 10192 NIC_SRAM_RX_BUFFER_DESC); 10193 10194 /* Disable the mini ring */ 10195 if (!tg3_flag(tp, 5705_PLUS)) 10196 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, 10197 BDINFO_FLAGS_DISABLED); 10198 10199 /* Program the jumbo buffer descriptor ring control 10200 * blocks on those devices that have them. 10201 */ 10202 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10203 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { 10204 10205 if (tg3_flag(tp, JUMBO_RING_ENABLE)) { 10206 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10207 ((u64) tpr->rx_jmb_mapping >> 32)); 10208 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10209 ((u64) tpr->rx_jmb_mapping & 0xffffffff)); 10210 val = TG3_RX_JMB_RING_SIZE(tp) << 10211 BDINFO_FLAGS_MAXLEN_SHIFT; 10212 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10213 val | BDINFO_FLAGS_USE_EXT_RECV); 10214 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || 10215 tg3_flag(tp, 57765_CLASS) || 10216 tg3_asic_rev(tp) == ASIC_REV_5762) 10217 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 10218 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 10219 } else { 10220 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10221 BDINFO_FLAGS_DISABLED); 10222 } 10223 10224 if (tg3_flag(tp, 57765_PLUS)) { 10225 val = TG3_RX_STD_RING_SIZE(tp); 10226 val <<= BDINFO_FLAGS_MAXLEN_SHIFT; 10227 val |= (TG3_RX_STD_DMA_SZ << 2); 10228 } else 10229 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; 10230 } else 10231 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; 10232 10233 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); 10234 10235 tpr->rx_std_prod_idx = tp->rx_pending; 10236 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); 10237 10238 tpr->rx_jmb_prod_idx = 10239 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; 10240 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); 10241 10242 tg3_rings_reset(tp); 10243 10244 /* Initialize MAC address and backoff seed. */ 10245 __tg3_set_mac_addr(tp, false); 10246 10247 /* MTU + ethernet header + FCS + optional VLAN tag */ 10248 tw32(MAC_RX_MTU_SIZE, 10249 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 10250 10251 /* The slot time is changed by tg3_setup_phy if we 10252 * run at gigabit with half duplex. 10253 */ 10254 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 10255 (6 << TX_LENGTHS_IPG_SHIFT) | 10256 (32 << TX_LENGTHS_SLOT_TIME_SHIFT); 10257 10258 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10259 tg3_asic_rev(tp) == ASIC_REV_5762) 10260 val |= tr32(MAC_TX_LENGTHS) & 10261 (TX_LENGTHS_JMB_FRM_LEN_MSK | 10262 TX_LENGTHS_CNT_DWN_VAL_MSK); 10263 10264 tw32(MAC_TX_LENGTHS, val); 10265 10266 /* Receive rules. */ 10267 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); 10268 tw32(RCVLPC_CONFIG, 0x0181); 10269 10270 /* Calculate RDMAC_MODE setting early, we need it to determine 10271 * the RCVLPC_STATE_ENABLE mask. 10272 */ 10273 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | 10274 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | 10275 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | 10276 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 10277 RDMAC_MODE_LNGREAD_ENAB); 10278 10279 if (tg3_asic_rev(tp) == ASIC_REV_5717) 10280 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; 10281 10282 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 10283 tg3_asic_rev(tp) == ASIC_REV_5785 || 10284 tg3_asic_rev(tp) == ASIC_REV_57780) 10285 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | 10286 RDMAC_MODE_MBUF_RBD_CRPT_ENAB | 10287 RDMAC_MODE_MBUF_SBD_CRPT_ENAB; 10288 10289 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10290 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10291 if (tg3_flag(tp, TSO_CAPABLE) && 10292 tg3_asic_rev(tp) == ASIC_REV_5705) { 10293 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; 10294 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10295 !tg3_flag(tp, IS_5788)) { 10296 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10297 } 10298 } 10299 10300 if (tg3_flag(tp, PCI_EXPRESS)) 10301 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10302 10303 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10304 tp->dma_limit = 0; 10305 if (tp->dev->mtu <= ETH_DATA_LEN) { 10306 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; 10307 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K; 10308 } 10309 } 10310 10311 if (tg3_flag(tp, HW_TSO_1) || 10312 tg3_flag(tp, HW_TSO_2) || 10313 tg3_flag(tp, HW_TSO_3)) 10314 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; 10315 10316 if (tg3_flag(tp, 57765_PLUS) || 10317 tg3_asic_rev(tp) == ASIC_REV_5785 || 10318 tg3_asic_rev(tp) == ASIC_REV_57780) 10319 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 10320 10321 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10322 tg3_asic_rev(tp) == ASIC_REV_5762) 10323 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; 10324 10325 if (tg3_asic_rev(tp) == ASIC_REV_5761 || 10326 tg3_asic_rev(tp) == ASIC_REV_5784 || 10327 tg3_asic_rev(tp) == ASIC_REV_5785 || 10328 tg3_asic_rev(tp) == ASIC_REV_57780 || 10329 tg3_flag(tp, 57765_PLUS)) { 10330 u32 tgtreg; 10331 10332 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10333 tgtreg = TG3_RDMA_RSRVCTRL_REG2; 10334 else 10335 tgtreg = TG3_RDMA_RSRVCTRL_REG; 10336 10337 val = tr32(tgtreg); 10338 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10339 tg3_asic_rev(tp) == ASIC_REV_5762) { 10340 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | 10341 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | 10342 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); 10343 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | 10344 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 10345 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; 10346 } 10347 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 10348 } 10349 10350 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10351 tg3_asic_rev(tp) == ASIC_REV_5720 || 10352 tg3_asic_rev(tp) == ASIC_REV_5762) { 10353 u32 tgtreg; 10354 10355 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10356 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2; 10357 else 10358 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL; 10359 10360 val = tr32(tgtreg); 10361 tw32(tgtreg, val | 10362 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | 10363 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); 10364 } 10365 10366 /* Receive/send statistics. */ 10367 if (tg3_flag(tp, 5750_PLUS)) { 10368 val = tr32(RCVLPC_STATS_ENABLE); 10369 val &= ~RCVLPC_STATSENAB_DACK_FIX; 10370 tw32(RCVLPC_STATS_ENABLE, val); 10371 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && 10372 tg3_flag(tp, TSO_CAPABLE)) { 10373 val = tr32(RCVLPC_STATS_ENABLE); 10374 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; 10375 tw32(RCVLPC_STATS_ENABLE, val); 10376 } else { 10377 tw32(RCVLPC_STATS_ENABLE, 0xffffff); 10378 } 10379 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); 10380 tw32(SNDDATAI_STATSENAB, 0xffffff); 10381 tw32(SNDDATAI_STATSCTRL, 10382 (SNDDATAI_SCTRL_ENABLE | 10383 SNDDATAI_SCTRL_FASTUPD)); 10384 10385 /* Setup host coalescing engine. */ 10386 tw32(HOSTCC_MODE, 0); 10387 for (i = 0; i < 2000; i++) { 10388 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) 10389 break; 10390 udelay(10); 10391 } 10392 10393 __tg3_set_coalesce(tp, &tp->coal); 10394 10395 if (!tg3_flag(tp, 5705_PLUS)) { 10396 /* Status/statistics block address. See tg3_timer, 10397 * the tg3_periodic_fetch_stats call there, and 10398 * tg3_get_stats to see how this works for 5705/5750 chips. 10399 */ 10400 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 10401 ((u64) tp->stats_mapping >> 32)); 10402 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 10403 ((u64) tp->stats_mapping & 0xffffffff)); 10404 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); 10405 10406 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); 10407 10408 /* Clear statistics and status block memory areas */ 10409 for (i = NIC_SRAM_STATS_BLK; 10410 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; 10411 i += sizeof(u32)) { 10412 tg3_write_mem(tp, i, 0); 10413 udelay(40); 10414 } 10415 } 10416 10417 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); 10418 10419 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); 10420 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); 10421 if (!tg3_flag(tp, 5705_PLUS)) 10422 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); 10423 10424 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 10425 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 10426 /* reset to prevent losing 1st rx packet intermittently */ 10427 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10428 udelay(10); 10429 } 10430 10431 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 10432 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | 10433 MAC_MODE_FHDE_ENABLE; 10434 if (tg3_flag(tp, ENABLE_APE)) 10435 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 10436 if (!tg3_flag(tp, 5705_PLUS) && 10437 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10438 tg3_asic_rev(tp) != ASIC_REV_5700) 10439 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 10440 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); 10441 udelay(40); 10442 10443 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). 10444 * If TG3_FLAG_IS_NIC is zero, we should read the 10445 * register to preserve the GPIO settings for LOMs. The GPIOs, 10446 * whether used as inputs or outputs, are set by boot code after 10447 * reset. 10448 */ 10449 if (!tg3_flag(tp, IS_NIC)) { 10450 u32 gpio_mask; 10451 10452 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | 10453 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | 10454 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; 10455 10456 if (tg3_asic_rev(tp) == ASIC_REV_5752) 10457 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | 10458 GRC_LCLCTRL_GPIO_OUTPUT3; 10459 10460 if (tg3_asic_rev(tp) == ASIC_REV_5755) 10461 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; 10462 10463 tp->grc_local_ctrl &= ~gpio_mask; 10464 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; 10465 10466 /* GPIO1 must be driven high for eeprom write protect */ 10467 if (tg3_flag(tp, EEPROM_WRITE_PROT)) 10468 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 10469 GRC_LCLCTRL_GPIO_OUTPUT1); 10470 } 10471 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10472 udelay(100); 10473 10474 if (tg3_flag(tp, USING_MSIX)) { 10475 val = tr32(MSGINT_MODE); 10476 val |= MSGINT_MODE_ENABLE; 10477 if (tp->irq_cnt > 1) 10478 val |= MSGINT_MODE_MULTIVEC_EN; 10479 if (!tg3_flag(tp, 1SHOT_MSI)) 10480 val |= MSGINT_MODE_ONE_SHOT_DISABLE; 10481 tw32(MSGINT_MODE, val); 10482 } 10483 10484 if (!tg3_flag(tp, 5705_PLUS)) { 10485 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 10486 udelay(40); 10487 } 10488 10489 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | 10490 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | 10491 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | 10492 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | 10493 WDMAC_MODE_LNGREAD_ENAB); 10494 10495 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10496 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10497 if (tg3_flag(tp, TSO_CAPABLE) && 10498 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 || 10499 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) { 10500 /* nothing */ 10501 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10502 !tg3_flag(tp, IS_5788)) { 10503 val |= WDMAC_MODE_RX_ACCEL; 10504 } 10505 } 10506 10507 /* Enable host coalescing bug fix */ 10508 if (tg3_flag(tp, 5755_PLUS)) 10509 val |= WDMAC_MODE_STATUS_TAG_FIX; 10510 10511 if (tg3_asic_rev(tp) == ASIC_REV_5785) 10512 val |= WDMAC_MODE_BURST_ALL_DATA; 10513 10514 tw32_f(WDMAC_MODE, val); 10515 udelay(40); 10516 10517 if (tg3_flag(tp, PCIX_MODE)) { 10518 u16 pcix_cmd; 10519 10520 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10521 &pcix_cmd); 10522 if (tg3_asic_rev(tp) == ASIC_REV_5703) { 10523 pcix_cmd &= ~PCI_X_CMD_MAX_READ; 10524 pcix_cmd |= PCI_X_CMD_READ_2K; 10525 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) { 10526 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); 10527 pcix_cmd |= PCI_X_CMD_READ_2K; 10528 } 10529 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10530 pcix_cmd); 10531 } 10532 10533 tw32_f(RDMAC_MODE, rdmac_mode); 10534 udelay(40); 10535 10536 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10537 tg3_asic_rev(tp) == ASIC_REV_5720) { 10538 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) { 10539 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp)) 10540 break; 10541 } 10542 if (i < TG3_NUM_RDMA_CHANNELS) { 10543 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10544 val |= tg3_lso_rd_dma_workaround_bit(tp); 10545 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10546 tg3_flag_set(tp, 5719_5720_RDMA_BUG); 10547 } 10548 } 10549 10550 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); 10551 if (!tg3_flag(tp, 5705_PLUS)) 10552 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); 10553 10554 if (tg3_asic_rev(tp) == ASIC_REV_5761) 10555 tw32(SNDDATAC_MODE, 10556 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); 10557 else 10558 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); 10559 10560 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); 10561 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); 10562 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; 10563 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 10564 val |= RCVDBDI_MODE_LRG_RING_SZ; 10565 tw32(RCVDBDI_MODE, val); 10566 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 10567 if (tg3_flag(tp, HW_TSO_1) || 10568 tg3_flag(tp, HW_TSO_2) || 10569 tg3_flag(tp, HW_TSO_3)) 10570 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); 10571 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; 10572 if (tg3_flag(tp, ENABLE_TSS)) 10573 val |= SNDBDI_MODE_MULTI_TXQ_EN; 10574 tw32(SNDBDI_MODE, val); 10575 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); 10576 10577 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 10578 err = tg3_load_5701_a0_firmware_fix(tp); 10579 if (err) 10580 return err; 10581 } 10582 10583 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10584 /* Ignore any errors for the firmware download. If download 10585 * fails, the device will operate with EEE disabled 10586 */ 10587 tg3_load_57766_firmware(tp); 10588 } 10589 10590 if (tg3_flag(tp, TSO_CAPABLE)) { 10591 err = tg3_load_tso_firmware(tp); 10592 if (err) 10593 return err; 10594 } 10595 10596 tp->tx_mode = TX_MODE_ENABLE; 10597 10598 if (tg3_flag(tp, 5755_PLUS) || 10599 tg3_asic_rev(tp) == ASIC_REV_5906) 10600 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; 10601 10602 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10603 tg3_asic_rev(tp) == ASIC_REV_5762) { 10604 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; 10605 tp->tx_mode &= ~val; 10606 tp->tx_mode |= tr32(MAC_TX_MODE) & val; 10607 } 10608 10609 tw32_f(MAC_TX_MODE, tp->tx_mode); 10610 udelay(100); 10611 10612 if (tg3_flag(tp, ENABLE_RSS)) { 10613 u32 rss_key[10]; 10614 10615 tg3_rss_write_indir_tbl(tp); 10616 10617 netdev_rss_key_fill(rss_key, 10 * sizeof(u32)); 10618 10619 for (i = 0; i < 10 ; i++) 10620 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]); 10621 } 10622 10623 tp->rx_mode = RX_MODE_ENABLE; 10624 if (tg3_flag(tp, 5755_PLUS)) 10625 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 10626 10627 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10628 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX; 10629 10630 if (tg3_flag(tp, ENABLE_RSS)) 10631 tp->rx_mode |= RX_MODE_RSS_ENABLE | 10632 RX_MODE_RSS_ITBL_HASH_BITS_7 | 10633 RX_MODE_RSS_IPV6_HASH_EN | 10634 RX_MODE_RSS_TCP_IPV6_HASH_EN | 10635 RX_MODE_RSS_IPV4_HASH_EN | 10636 RX_MODE_RSS_TCP_IPV4_HASH_EN; 10637 10638 tw32_f(MAC_RX_MODE, tp->rx_mode); 10639 udelay(10); 10640 10641 tw32(MAC_LED_CTRL, tp->led_ctrl); 10642 10643 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 10644 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10645 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10646 udelay(10); 10647 } 10648 tw32_f(MAC_RX_MODE, tp->rx_mode); 10649 udelay(10); 10650 10651 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10652 if ((tg3_asic_rev(tp) == ASIC_REV_5704) && 10653 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { 10654 /* Set drive transmission level to 1.2V */ 10655 /* only if the signal pre-emphasis bit is not set */ 10656 val = tr32(MAC_SERDES_CFG); 10657 val &= 0xfffff000; 10658 val |= 0x880; 10659 tw32(MAC_SERDES_CFG, val); 10660 } 10661 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) 10662 tw32(MAC_SERDES_CFG, 0x616000); 10663 } 10664 10665 /* Prevent chip from dropping frames when flow control 10666 * is enabled. 10667 */ 10668 if (tg3_flag(tp, 57765_CLASS)) 10669 val = 1; 10670 else 10671 val = 2; 10672 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); 10673 10674 if (tg3_asic_rev(tp) == ASIC_REV_5704 && 10675 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 10676 /* Use hardware link auto-negotiation */ 10677 tg3_flag_set(tp, HW_AUTONEG); 10678 } 10679 10680 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 10681 tg3_asic_rev(tp) == ASIC_REV_5714) { 10682 u32 tmp; 10683 10684 tmp = tr32(SERDES_RX_CTRL); 10685 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); 10686 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; 10687 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; 10688 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10689 } 10690 10691 if (!tg3_flag(tp, USE_PHYLIB)) { 10692 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 10693 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 10694 10695 err = tg3_setup_phy(tp, false); 10696 if (err) 10697 return err; 10698 10699 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10700 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 10701 u32 tmp; 10702 10703 /* Clear CRC stats. */ 10704 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { 10705 tg3_writephy(tp, MII_TG3_TEST1, 10706 tmp | MII_TG3_TEST1_CRC_EN); 10707 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); 10708 } 10709 } 10710 } 10711 10712 __tg3_set_rx_mode(tp->dev); 10713 10714 /* Initialize receive rules. */ 10715 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); 10716 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); 10717 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); 10718 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); 10719 10720 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) 10721 limit = 8; 10722 else 10723 limit = 16; 10724 if (tg3_flag(tp, ENABLE_ASF)) 10725 limit -= 4; 10726 switch (limit) { 10727 case 16: 10728 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); 10729 /* fall through */ 10730 case 15: 10731 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); 10732 /* fall through */ 10733 case 14: 10734 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); 10735 /* fall through */ 10736 case 13: 10737 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); 10738 /* fall through */ 10739 case 12: 10740 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); 10741 /* fall through */ 10742 case 11: 10743 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); 10744 /* fall through */ 10745 case 10: 10746 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); 10747 /* fall through */ 10748 case 9: 10749 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); 10750 /* fall through */ 10751 case 8: 10752 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); 10753 /* fall through */ 10754 case 7: 10755 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); 10756 /* fall through */ 10757 case 6: 10758 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); 10759 /* fall through */ 10760 case 5: 10761 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); 10762 /* fall through */ 10763 case 4: 10764 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ 10765 case 3: 10766 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ 10767 case 2: 10768 case 1: 10769 10770 default: 10771 break; 10772 } 10773 10774 if (tg3_flag(tp, ENABLE_APE)) 10775 /* Write our heartbeat update interval to APE. */ 10776 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, 10777 APE_HOST_HEARTBEAT_INT_5SEC); 10778 10779 tg3_write_sig_post_reset(tp, RESET_KIND_INIT); 10780 10781 return 0; 10782 } 10783 10784 /* Called at device open time to get the chip ready for 10785 * packet processing. Invoked with tp->lock held. 10786 */ 10787 static int tg3_init_hw(struct tg3 *tp, bool reset_phy) 10788 { 10789 /* Chip may have been just powered on. If so, the boot code may still 10790 * be running initialization. Wait for it to finish to avoid races in 10791 * accessing the hardware. 10792 */ 10793 tg3_enable_register_access(tp); 10794 tg3_poll_fw(tp); 10795 10796 tg3_switch_clocks(tp); 10797 10798 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 10799 10800 return tg3_reset_hw(tp, reset_phy); 10801 } 10802 10803 #ifdef CONFIG_TIGON3_HWMON 10804 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) 10805 { 10806 int i; 10807 10808 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) { 10809 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN; 10810 10811 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len); 10812 off += len; 10813 10814 if (ocir->signature != TG3_OCIR_SIG_MAGIC || 10815 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE)) 10816 memset(ocir, 0, TG3_OCIR_LEN); 10817 } 10818 } 10819 10820 /* sysfs attributes for hwmon */ 10821 static ssize_t tg3_show_temp(struct device *dev, 10822 struct device_attribute *devattr, char *buf) 10823 { 10824 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 10825 struct tg3 *tp = dev_get_drvdata(dev); 10826 u32 temperature; 10827 10828 spin_lock_bh(&tp->lock); 10829 tg3_ape_scratchpad_read(tp, &temperature, attr->index, 10830 sizeof(temperature)); 10831 spin_unlock_bh(&tp->lock); 10832 return sprintf(buf, "%u\n", temperature * 1000); 10833 } 10834 10835 10836 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL, 10837 TG3_TEMP_SENSOR_OFFSET); 10838 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL, 10839 TG3_TEMP_CAUTION_OFFSET); 10840 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL, 10841 TG3_TEMP_MAX_OFFSET); 10842 10843 static struct attribute *tg3_attrs[] = { 10844 &sensor_dev_attr_temp1_input.dev_attr.attr, 10845 &sensor_dev_attr_temp1_crit.dev_attr.attr, 10846 &sensor_dev_attr_temp1_max.dev_attr.attr, 10847 NULL 10848 }; 10849 ATTRIBUTE_GROUPS(tg3); 10850 10851 static void tg3_hwmon_close(struct tg3 *tp) 10852 { 10853 if (tp->hwmon_dev) { 10854 hwmon_device_unregister(tp->hwmon_dev); 10855 tp->hwmon_dev = NULL; 10856 } 10857 } 10858 10859 static void tg3_hwmon_open(struct tg3 *tp) 10860 { 10861 int i; 10862 u32 size = 0; 10863 struct pci_dev *pdev = tp->pdev; 10864 struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; 10865 10866 tg3_sd_scan_scratchpad(tp, ocirs); 10867 10868 for (i = 0; i < TG3_SD_NUM_RECS; i++) { 10869 if (!ocirs[i].src_data_length) 10870 continue; 10871 10872 size += ocirs[i].src_hdr_length; 10873 size += ocirs[i].src_data_length; 10874 } 10875 10876 if (!size) 10877 return; 10878 10879 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", 10880 tp, tg3_groups); 10881 if (IS_ERR(tp->hwmon_dev)) { 10882 tp->hwmon_dev = NULL; 10883 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); 10884 } 10885 } 10886 #else 10887 static inline void tg3_hwmon_close(struct tg3 *tp) { } 10888 static inline void tg3_hwmon_open(struct tg3 *tp) { } 10889 #endif /* CONFIG_TIGON3_HWMON */ 10890 10891 10892 #define TG3_STAT_ADD32(PSTAT, REG) \ 10893 do { u32 __val = tr32(REG); \ 10894 (PSTAT)->low += __val; \ 10895 if ((PSTAT)->low < __val) \ 10896 (PSTAT)->high += 1; \ 10897 } while (0) 10898 10899 static void tg3_periodic_fetch_stats(struct tg3 *tp) 10900 { 10901 struct tg3_hw_stats *sp = tp->hw_stats; 10902 10903 if (!tp->link_up) 10904 return; 10905 10906 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); 10907 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); 10908 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); 10909 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); 10910 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); 10911 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); 10912 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); 10913 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); 10914 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); 10915 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); 10916 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); 10917 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); 10918 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); 10919 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) && 10920 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low + 10921 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) { 10922 u32 val; 10923 10924 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10925 val &= ~tg3_lso_rd_dma_workaround_bit(tp); 10926 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10927 tg3_flag_clear(tp, 5719_5720_RDMA_BUG); 10928 } 10929 10930 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); 10931 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); 10932 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); 10933 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); 10934 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); 10935 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); 10936 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); 10937 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); 10938 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); 10939 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); 10940 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); 10941 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); 10942 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); 10943 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); 10944 10945 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); 10946 if (tg3_asic_rev(tp) != ASIC_REV_5717 && 10947 tg3_asic_rev(tp) != ASIC_REV_5762 && 10948 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 && 10949 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) { 10950 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); 10951 } else { 10952 u32 val = tr32(HOSTCC_FLOW_ATTN); 10953 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; 10954 if (val) { 10955 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); 10956 sp->rx_discards.low += val; 10957 if (sp->rx_discards.low < val) 10958 sp->rx_discards.high += 1; 10959 } 10960 sp->mbuf_lwm_thresh_hit = sp->rx_discards; 10961 } 10962 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); 10963 } 10964 10965 static void tg3_chk_missed_msi(struct tg3 *tp) 10966 { 10967 u32 i; 10968 10969 for (i = 0; i < tp->irq_cnt; i++) { 10970 struct tg3_napi *tnapi = &tp->napi[i]; 10971 10972 if (tg3_has_work(tnapi)) { 10973 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && 10974 tnapi->last_tx_cons == tnapi->tx_cons) { 10975 if (tnapi->chk_msi_cnt < 1) { 10976 tnapi->chk_msi_cnt++; 10977 return; 10978 } 10979 tg3_msi(0, tnapi); 10980 } 10981 } 10982 tnapi->chk_msi_cnt = 0; 10983 tnapi->last_rx_cons = tnapi->rx_rcb_ptr; 10984 tnapi->last_tx_cons = tnapi->tx_cons; 10985 } 10986 } 10987 10988 static void tg3_timer(struct timer_list *t) 10989 { 10990 struct tg3 *tp = from_timer(tp, t, timer); 10991 10992 spin_lock(&tp->lock); 10993 10994 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) { 10995 spin_unlock(&tp->lock); 10996 goto restart_timer; 10997 } 10998 10999 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 11000 tg3_flag(tp, 57765_CLASS)) 11001 tg3_chk_missed_msi(tp); 11002 11003 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 11004 /* BCM4785: Flush posted writes from GbE to host memory. */ 11005 tr32(HOSTCC_MODE); 11006 } 11007 11008 if (!tg3_flag(tp, TAGGED_STATUS)) { 11009 /* All of this garbage is because when using non-tagged 11010 * IRQ status the mailbox/status_block protocol the chip 11011 * uses with the cpu is race prone. 11012 */ 11013 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { 11014 tw32(GRC_LOCAL_CTRL, 11015 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 11016 } else { 11017 tw32(HOSTCC_MODE, tp->coalesce_mode | 11018 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); 11019 } 11020 11021 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 11022 spin_unlock(&tp->lock); 11023 tg3_reset_task_schedule(tp); 11024 goto restart_timer; 11025 } 11026 } 11027 11028 /* This part only runs once per second. */ 11029 if (!--tp->timer_counter) { 11030 if (tg3_flag(tp, 5705_PLUS)) 11031 tg3_periodic_fetch_stats(tp); 11032 11033 if (tp->setlpicnt && !--tp->setlpicnt) 11034 tg3_phy_eee_enable(tp); 11035 11036 if (tg3_flag(tp, USE_LINKCHG_REG)) { 11037 u32 mac_stat; 11038 int phy_event; 11039 11040 mac_stat = tr32(MAC_STATUS); 11041 11042 phy_event = 0; 11043 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { 11044 if (mac_stat & MAC_STATUS_MI_INTERRUPT) 11045 phy_event = 1; 11046 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) 11047 phy_event = 1; 11048 11049 if (phy_event) 11050 tg3_setup_phy(tp, false); 11051 } else if (tg3_flag(tp, POLL_SERDES)) { 11052 u32 mac_stat = tr32(MAC_STATUS); 11053 int need_setup = 0; 11054 11055 if (tp->link_up && 11056 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { 11057 need_setup = 1; 11058 } 11059 if (!tp->link_up && 11060 (mac_stat & (MAC_STATUS_PCS_SYNCED | 11061 MAC_STATUS_SIGNAL_DET))) { 11062 need_setup = 1; 11063 } 11064 if (need_setup) { 11065 if (!tp->serdes_counter) { 11066 tw32_f(MAC_MODE, 11067 (tp->mac_mode & 11068 ~MAC_MODE_PORT_MODE_MASK)); 11069 udelay(40); 11070 tw32_f(MAC_MODE, tp->mac_mode); 11071 udelay(40); 11072 } 11073 tg3_setup_phy(tp, false); 11074 } 11075 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 11076 tg3_flag(tp, 5780_CLASS)) { 11077 tg3_serdes_parallel_detect(tp); 11078 } else if (tg3_flag(tp, POLL_CPMU_LINK)) { 11079 u32 cpmu = tr32(TG3_CPMU_STATUS); 11080 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) == 11081 TG3_CPMU_STATUS_LINK_MASK); 11082 11083 if (link_up != tp->link_up) 11084 tg3_setup_phy(tp, false); 11085 } 11086 11087 tp->timer_counter = tp->timer_multiplier; 11088 } 11089 11090 /* Heartbeat is only sent once every 2 seconds. 11091 * 11092 * The heartbeat is to tell the ASF firmware that the host 11093 * driver is still alive. In the event that the OS crashes, 11094 * ASF needs to reset the hardware to free up the FIFO space 11095 * that may be filled with rx packets destined for the host. 11096 * If the FIFO is full, ASF will no longer function properly. 11097 * 11098 * Unintended resets have been reported on real time kernels 11099 * where the timer doesn't run on time. Netpoll will also have 11100 * same problem. 11101 * 11102 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware 11103 * to check the ring condition when the heartbeat is expiring 11104 * before doing the reset. This will prevent most unintended 11105 * resets. 11106 */ 11107 if (!--tp->asf_counter) { 11108 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 11109 tg3_wait_for_event_ack(tp); 11110 11111 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, 11112 FWCMD_NICDRV_ALIVE3); 11113 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 11114 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 11115 TG3_FW_UPDATE_TIMEOUT_SEC); 11116 11117 tg3_generate_fw_event(tp); 11118 } 11119 tp->asf_counter = tp->asf_multiplier; 11120 } 11121 11122 /* Update the APE heartbeat every 5 seconds.*/ 11123 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL); 11124 11125 spin_unlock(&tp->lock); 11126 11127 restart_timer: 11128 tp->timer.expires = jiffies + tp->timer_offset; 11129 add_timer(&tp->timer); 11130 } 11131 11132 static void tg3_timer_init(struct tg3 *tp) 11133 { 11134 if (tg3_flag(tp, TAGGED_STATUS) && 11135 tg3_asic_rev(tp) != ASIC_REV_5717 && 11136 !tg3_flag(tp, 57765_CLASS)) 11137 tp->timer_offset = HZ; 11138 else 11139 tp->timer_offset = HZ / 10; 11140 11141 BUG_ON(tp->timer_offset > HZ); 11142 11143 tp->timer_multiplier = (HZ / tp->timer_offset); 11144 tp->asf_multiplier = (HZ / tp->timer_offset) * 11145 TG3_FW_UPDATE_FREQ_SEC; 11146 11147 timer_setup(&tp->timer, tg3_timer, 0); 11148 } 11149 11150 static void tg3_timer_start(struct tg3 *tp) 11151 { 11152 tp->asf_counter = tp->asf_multiplier; 11153 tp->timer_counter = tp->timer_multiplier; 11154 11155 tp->timer.expires = jiffies + tp->timer_offset; 11156 add_timer(&tp->timer); 11157 } 11158 11159 static void tg3_timer_stop(struct tg3 *tp) 11160 { 11161 del_timer_sync(&tp->timer); 11162 } 11163 11164 /* Restart hardware after configuration changes, self-test, etc. 11165 * Invoked with tp->lock held. 11166 */ 11167 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy) 11168 __releases(tp->lock) 11169 __acquires(tp->lock) 11170 { 11171 int err; 11172 11173 err = tg3_init_hw(tp, reset_phy); 11174 if (err) { 11175 netdev_err(tp->dev, 11176 "Failed to re-initialize device, aborting\n"); 11177 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11178 tg3_full_unlock(tp); 11179 tg3_timer_stop(tp); 11180 tp->irq_sync = 0; 11181 tg3_napi_enable(tp); 11182 dev_close(tp->dev); 11183 tg3_full_lock(tp, 0); 11184 } 11185 return err; 11186 } 11187 11188 static void tg3_reset_task(struct work_struct *work) 11189 { 11190 struct tg3 *tp = container_of(work, struct tg3, reset_task); 11191 int err; 11192 11193 rtnl_lock(); 11194 tg3_full_lock(tp, 0); 11195 11196 if (!netif_running(tp->dev)) { 11197 tg3_flag_clear(tp, RESET_TASK_PENDING); 11198 tg3_full_unlock(tp); 11199 rtnl_unlock(); 11200 return; 11201 } 11202 11203 tg3_full_unlock(tp); 11204 11205 tg3_phy_stop(tp); 11206 11207 tg3_netif_stop(tp); 11208 11209 tg3_full_lock(tp, 1); 11210 11211 if (tg3_flag(tp, TX_RECOVERY_PENDING)) { 11212 tp->write32_tx_mbox = tg3_write32_tx_mbox; 11213 tp->write32_rx_mbox = tg3_write_flush_reg32; 11214 tg3_flag_set(tp, MBOX_WRITE_REORDER); 11215 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 11216 } 11217 11218 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 11219 err = tg3_init_hw(tp, true); 11220 if (err) 11221 goto out; 11222 11223 tg3_netif_start(tp); 11224 11225 out: 11226 tg3_full_unlock(tp); 11227 11228 if (!err) 11229 tg3_phy_start(tp); 11230 11231 tg3_flag_clear(tp, RESET_TASK_PENDING); 11232 rtnl_unlock(); 11233 } 11234 11235 static int tg3_request_irq(struct tg3 *tp, int irq_num) 11236 { 11237 irq_handler_t fn; 11238 unsigned long flags; 11239 char *name; 11240 struct tg3_napi *tnapi = &tp->napi[irq_num]; 11241 11242 if (tp->irq_cnt == 1) 11243 name = tp->dev->name; 11244 else { 11245 name = &tnapi->irq_lbl[0]; 11246 if (tnapi->tx_buffers && tnapi->rx_rcb) 11247 snprintf(name, IFNAMSIZ, 11248 "%s-txrx-%d", tp->dev->name, irq_num); 11249 else if (tnapi->tx_buffers) 11250 snprintf(name, IFNAMSIZ, 11251 "%s-tx-%d", tp->dev->name, irq_num); 11252 else if (tnapi->rx_rcb) 11253 snprintf(name, IFNAMSIZ, 11254 "%s-rx-%d", tp->dev->name, irq_num); 11255 else 11256 snprintf(name, IFNAMSIZ, 11257 "%s-%d", tp->dev->name, irq_num); 11258 name[IFNAMSIZ-1] = 0; 11259 } 11260 11261 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11262 fn = tg3_msi; 11263 if (tg3_flag(tp, 1SHOT_MSI)) 11264 fn = tg3_msi_1shot; 11265 flags = 0; 11266 } else { 11267 fn = tg3_interrupt; 11268 if (tg3_flag(tp, TAGGED_STATUS)) 11269 fn = tg3_interrupt_tagged; 11270 flags = IRQF_SHARED; 11271 } 11272 11273 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); 11274 } 11275 11276 static int tg3_test_interrupt(struct tg3 *tp) 11277 { 11278 struct tg3_napi *tnapi = &tp->napi[0]; 11279 struct net_device *dev = tp->dev; 11280 int err, i, intr_ok = 0; 11281 u32 val; 11282 11283 if (!netif_running(dev)) 11284 return -ENODEV; 11285 11286 tg3_disable_ints(tp); 11287 11288 free_irq(tnapi->irq_vec, tnapi); 11289 11290 /* 11291 * Turn off MSI one shot mode. Otherwise this test has no 11292 * observable way to know whether the interrupt was delivered. 11293 */ 11294 if (tg3_flag(tp, 57765_PLUS)) { 11295 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; 11296 tw32(MSGINT_MODE, val); 11297 } 11298 11299 err = request_irq(tnapi->irq_vec, tg3_test_isr, 11300 IRQF_SHARED, dev->name, tnapi); 11301 if (err) 11302 return err; 11303 11304 tnapi->hw_status->status &= ~SD_STATUS_UPDATED; 11305 tg3_enable_ints(tp); 11306 11307 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 11308 tnapi->coal_now); 11309 11310 for (i = 0; i < 5; i++) { 11311 u32 int_mbox, misc_host_ctrl; 11312 11313 int_mbox = tr32_mailbox(tnapi->int_mbox); 11314 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 11315 11316 if ((int_mbox != 0) || 11317 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { 11318 intr_ok = 1; 11319 break; 11320 } 11321 11322 if (tg3_flag(tp, 57765_PLUS) && 11323 tnapi->hw_status->status_tag != tnapi->last_tag) 11324 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 11325 11326 msleep(10); 11327 } 11328 11329 tg3_disable_ints(tp); 11330 11331 free_irq(tnapi->irq_vec, tnapi); 11332 11333 err = tg3_request_irq(tp, 0); 11334 11335 if (err) 11336 return err; 11337 11338 if (intr_ok) { 11339 /* Reenable MSI one shot mode. */ 11340 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) { 11341 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; 11342 tw32(MSGINT_MODE, val); 11343 } 11344 return 0; 11345 } 11346 11347 return -EIO; 11348 } 11349 11350 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is 11351 * successfully restored 11352 */ 11353 static int tg3_test_msi(struct tg3 *tp) 11354 { 11355 int err; 11356 u16 pci_cmd; 11357 11358 if (!tg3_flag(tp, USING_MSI)) 11359 return 0; 11360 11361 /* Turn off SERR reporting in case MSI terminates with Master 11362 * Abort. 11363 */ 11364 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 11365 pci_write_config_word(tp->pdev, PCI_COMMAND, 11366 pci_cmd & ~PCI_COMMAND_SERR); 11367 11368 err = tg3_test_interrupt(tp); 11369 11370 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 11371 11372 if (!err) 11373 return 0; 11374 11375 /* other failures */ 11376 if (err != -EIO) 11377 return err; 11378 11379 /* MSI test failed, go back to INTx mode */ 11380 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " 11381 "to INTx mode. Please report this failure to the PCI " 11382 "maintainer and include system chipset information\n"); 11383 11384 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11385 11386 pci_disable_msi(tp->pdev); 11387 11388 tg3_flag_clear(tp, USING_MSI); 11389 tp->napi[0].irq_vec = tp->pdev->irq; 11390 11391 err = tg3_request_irq(tp, 0); 11392 if (err) 11393 return err; 11394 11395 /* Need to reset the chip because the MSI cycle may have terminated 11396 * with Master Abort. 11397 */ 11398 tg3_full_lock(tp, 1); 11399 11400 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11401 err = tg3_init_hw(tp, true); 11402 11403 tg3_full_unlock(tp); 11404 11405 if (err) 11406 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11407 11408 return err; 11409 } 11410 11411 static int tg3_request_firmware(struct tg3 *tp) 11412 { 11413 const struct tg3_firmware_hdr *fw_hdr; 11414 11415 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { 11416 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", 11417 tp->fw_needed); 11418 return -ENOENT; 11419 } 11420 11421 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 11422 11423 /* Firmware blob starts with version numbers, followed by 11424 * start address and _full_ length including BSS sections 11425 * (which must be longer than the actual data, of course 11426 */ 11427 11428 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */ 11429 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) { 11430 netdev_err(tp->dev, "bogus length %d in \"%s\"\n", 11431 tp->fw_len, tp->fw_needed); 11432 release_firmware(tp->fw); 11433 tp->fw = NULL; 11434 return -EINVAL; 11435 } 11436 11437 /* We no longer need firmware; we have it. */ 11438 tp->fw_needed = NULL; 11439 return 0; 11440 } 11441 11442 static u32 tg3_irq_count(struct tg3 *tp) 11443 { 11444 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt); 11445 11446 if (irq_cnt > 1) { 11447 /* We want as many rx rings enabled as there are cpus. 11448 * In multiqueue MSI-X mode, the first MSI-X vector 11449 * only deals with link interrupts, etc, so we add 11450 * one to the number of vectors we are requesting. 11451 */ 11452 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max); 11453 } 11454 11455 return irq_cnt; 11456 } 11457 11458 static bool tg3_enable_msix(struct tg3 *tp) 11459 { 11460 int i, rc; 11461 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS]; 11462 11463 tp->txq_cnt = tp->txq_req; 11464 tp->rxq_cnt = tp->rxq_req; 11465 if (!tp->rxq_cnt) 11466 tp->rxq_cnt = netif_get_num_default_rss_queues(); 11467 if (tp->rxq_cnt > tp->rxq_max) 11468 tp->rxq_cnt = tp->rxq_max; 11469 11470 /* Disable multiple TX rings by default. Simple round-robin hardware 11471 * scheduling of the TX rings can cause starvation of rings with 11472 * small packets when other rings have TSO or jumbo packets. 11473 */ 11474 if (!tp->txq_req) 11475 tp->txq_cnt = 1; 11476 11477 tp->irq_cnt = tg3_irq_count(tp); 11478 11479 for (i = 0; i < tp->irq_max; i++) { 11480 msix_ent[i].entry = i; 11481 msix_ent[i].vector = 0; 11482 } 11483 11484 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt); 11485 if (rc < 0) { 11486 return false; 11487 } else if (rc < tp->irq_cnt) { 11488 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", 11489 tp->irq_cnt, rc); 11490 tp->irq_cnt = rc; 11491 tp->rxq_cnt = max(rc - 1, 1); 11492 if (tp->txq_cnt) 11493 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max); 11494 } 11495 11496 for (i = 0; i < tp->irq_max; i++) 11497 tp->napi[i].irq_vec = msix_ent[i].vector; 11498 11499 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) { 11500 pci_disable_msix(tp->pdev); 11501 return false; 11502 } 11503 11504 if (tp->irq_cnt == 1) 11505 return true; 11506 11507 tg3_flag_set(tp, ENABLE_RSS); 11508 11509 if (tp->txq_cnt > 1) 11510 tg3_flag_set(tp, ENABLE_TSS); 11511 11512 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt); 11513 11514 return true; 11515 } 11516 11517 static void tg3_ints_init(struct tg3 *tp) 11518 { 11519 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && 11520 !tg3_flag(tp, TAGGED_STATUS)) { 11521 /* All MSI supporting chips should support tagged 11522 * status. Assert that this is the case. 11523 */ 11524 netdev_warn(tp->dev, 11525 "MSI without TAGGED_STATUS? Not using MSI\n"); 11526 goto defcfg; 11527 } 11528 11529 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) 11530 tg3_flag_set(tp, USING_MSIX); 11531 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) 11532 tg3_flag_set(tp, USING_MSI); 11533 11534 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11535 u32 msi_mode = tr32(MSGINT_MODE); 11536 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) 11537 msi_mode |= MSGINT_MODE_MULTIVEC_EN; 11538 if (!tg3_flag(tp, 1SHOT_MSI)) 11539 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE; 11540 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); 11541 } 11542 defcfg: 11543 if (!tg3_flag(tp, USING_MSIX)) { 11544 tp->irq_cnt = 1; 11545 tp->napi[0].irq_vec = tp->pdev->irq; 11546 } 11547 11548 if (tp->irq_cnt == 1) { 11549 tp->txq_cnt = 1; 11550 tp->rxq_cnt = 1; 11551 netif_set_real_num_tx_queues(tp->dev, 1); 11552 netif_set_real_num_rx_queues(tp->dev, 1); 11553 } 11554 } 11555 11556 static void tg3_ints_fini(struct tg3 *tp) 11557 { 11558 if (tg3_flag(tp, USING_MSIX)) 11559 pci_disable_msix(tp->pdev); 11560 else if (tg3_flag(tp, USING_MSI)) 11561 pci_disable_msi(tp->pdev); 11562 tg3_flag_clear(tp, USING_MSI); 11563 tg3_flag_clear(tp, USING_MSIX); 11564 tg3_flag_clear(tp, ENABLE_RSS); 11565 tg3_flag_clear(tp, ENABLE_TSS); 11566 } 11567 11568 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, 11569 bool init) 11570 { 11571 struct net_device *dev = tp->dev; 11572 int i, err; 11573 11574 /* 11575 * Setup interrupts first so we know how 11576 * many NAPI resources to allocate 11577 */ 11578 tg3_ints_init(tp); 11579 11580 tg3_rss_check_indir_tbl(tp); 11581 11582 /* The placement of this call is tied 11583 * to the setup and use of Host TX descriptors. 11584 */ 11585 err = tg3_alloc_consistent(tp); 11586 if (err) 11587 goto out_ints_fini; 11588 11589 tg3_napi_init(tp); 11590 11591 tg3_napi_enable(tp); 11592 11593 for (i = 0; i < tp->irq_cnt; i++) { 11594 err = tg3_request_irq(tp, i); 11595 if (err) { 11596 for (i--; i >= 0; i--) { 11597 struct tg3_napi *tnapi = &tp->napi[i]; 11598 11599 free_irq(tnapi->irq_vec, tnapi); 11600 } 11601 goto out_napi_fini; 11602 } 11603 } 11604 11605 tg3_full_lock(tp, 0); 11606 11607 if (init) 11608 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 11609 11610 err = tg3_init_hw(tp, reset_phy); 11611 if (err) { 11612 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11613 tg3_free_rings(tp); 11614 } 11615 11616 tg3_full_unlock(tp); 11617 11618 if (err) 11619 goto out_free_irq; 11620 11621 if (test_irq && tg3_flag(tp, USING_MSI)) { 11622 err = tg3_test_msi(tp); 11623 11624 if (err) { 11625 tg3_full_lock(tp, 0); 11626 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11627 tg3_free_rings(tp); 11628 tg3_full_unlock(tp); 11629 11630 goto out_napi_fini; 11631 } 11632 11633 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { 11634 u32 val = tr32(PCIE_TRANSACTION_CFG); 11635 11636 tw32(PCIE_TRANSACTION_CFG, 11637 val | PCIE_TRANS_CFG_1SHOT_MSI); 11638 } 11639 } 11640 11641 tg3_phy_start(tp); 11642 11643 tg3_hwmon_open(tp); 11644 11645 tg3_full_lock(tp, 0); 11646 11647 tg3_timer_start(tp); 11648 tg3_flag_set(tp, INIT_COMPLETE); 11649 tg3_enable_ints(tp); 11650 11651 tg3_ptp_resume(tp); 11652 11653 tg3_full_unlock(tp); 11654 11655 netif_tx_start_all_queues(dev); 11656 11657 /* 11658 * Reset loopback feature if it was turned on while the device was down 11659 * make sure that it's installed properly now. 11660 */ 11661 if (dev->features & NETIF_F_LOOPBACK) 11662 tg3_set_loopback(dev, dev->features); 11663 11664 return 0; 11665 11666 out_free_irq: 11667 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11668 struct tg3_napi *tnapi = &tp->napi[i]; 11669 free_irq(tnapi->irq_vec, tnapi); 11670 } 11671 11672 out_napi_fini: 11673 tg3_napi_disable(tp); 11674 tg3_napi_fini(tp); 11675 tg3_free_consistent(tp); 11676 11677 out_ints_fini: 11678 tg3_ints_fini(tp); 11679 11680 return err; 11681 } 11682 11683 static void tg3_stop(struct tg3 *tp) 11684 { 11685 int i; 11686 11687 tg3_reset_task_cancel(tp); 11688 tg3_netif_stop(tp); 11689 11690 tg3_timer_stop(tp); 11691 11692 tg3_hwmon_close(tp); 11693 11694 tg3_phy_stop(tp); 11695 11696 tg3_full_lock(tp, 1); 11697 11698 tg3_disable_ints(tp); 11699 11700 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11701 tg3_free_rings(tp); 11702 tg3_flag_clear(tp, INIT_COMPLETE); 11703 11704 tg3_full_unlock(tp); 11705 11706 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11707 struct tg3_napi *tnapi = &tp->napi[i]; 11708 free_irq(tnapi->irq_vec, tnapi); 11709 } 11710 11711 tg3_ints_fini(tp); 11712 11713 tg3_napi_fini(tp); 11714 11715 tg3_free_consistent(tp); 11716 } 11717 11718 static int tg3_open(struct net_device *dev) 11719 { 11720 struct tg3 *tp = netdev_priv(dev); 11721 int err; 11722 11723 if (tp->pcierr_recovery) { 11724 netdev_err(dev, "Failed to open device. PCI error recovery " 11725 "in progress\n"); 11726 return -EAGAIN; 11727 } 11728 11729 if (tp->fw_needed) { 11730 err = tg3_request_firmware(tp); 11731 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 11732 if (err) { 11733 netdev_warn(tp->dev, "EEE capability disabled\n"); 11734 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 11735 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 11736 netdev_warn(tp->dev, "EEE capability restored\n"); 11737 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 11738 } 11739 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 11740 if (err) 11741 return err; 11742 } else if (err) { 11743 netdev_warn(tp->dev, "TSO capability disabled\n"); 11744 tg3_flag_clear(tp, TSO_CAPABLE); 11745 } else if (!tg3_flag(tp, TSO_CAPABLE)) { 11746 netdev_notice(tp->dev, "TSO capability restored\n"); 11747 tg3_flag_set(tp, TSO_CAPABLE); 11748 } 11749 } 11750 11751 tg3_carrier_off(tp); 11752 11753 err = tg3_power_up(tp); 11754 if (err) 11755 return err; 11756 11757 tg3_full_lock(tp, 0); 11758 11759 tg3_disable_ints(tp); 11760 tg3_flag_clear(tp, INIT_COMPLETE); 11761 11762 tg3_full_unlock(tp); 11763 11764 err = tg3_start(tp, 11765 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN), 11766 true, true); 11767 if (err) { 11768 tg3_frob_aux_power(tp, false); 11769 pci_set_power_state(tp->pdev, PCI_D3hot); 11770 } 11771 11772 return err; 11773 } 11774 11775 static int tg3_close(struct net_device *dev) 11776 { 11777 struct tg3 *tp = netdev_priv(dev); 11778 11779 if (tp->pcierr_recovery) { 11780 netdev_err(dev, "Failed to close device. PCI error recovery " 11781 "in progress\n"); 11782 return -EAGAIN; 11783 } 11784 11785 tg3_stop(tp); 11786 11787 if (pci_device_is_present(tp->pdev)) { 11788 tg3_power_down_prepare(tp); 11789 11790 tg3_carrier_off(tp); 11791 } 11792 return 0; 11793 } 11794 11795 static inline u64 get_stat64(tg3_stat64_t *val) 11796 { 11797 return ((u64)val->high << 32) | ((u64)val->low); 11798 } 11799 11800 static u64 tg3_calc_crc_errors(struct tg3 *tp) 11801 { 11802 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11803 11804 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 11805 (tg3_asic_rev(tp) == ASIC_REV_5700 || 11806 tg3_asic_rev(tp) == ASIC_REV_5701)) { 11807 u32 val; 11808 11809 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { 11810 tg3_writephy(tp, MII_TG3_TEST1, 11811 val | MII_TG3_TEST1_CRC_EN); 11812 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); 11813 } else 11814 val = 0; 11815 11816 tp->phy_crc_errors += val; 11817 11818 return tp->phy_crc_errors; 11819 } 11820 11821 return get_stat64(&hw_stats->rx_fcs_errors); 11822 } 11823 11824 #define ESTAT_ADD(member) \ 11825 estats->member = old_estats->member + \ 11826 get_stat64(&hw_stats->member) 11827 11828 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats) 11829 { 11830 struct tg3_ethtool_stats *old_estats = &tp->estats_prev; 11831 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11832 11833 ESTAT_ADD(rx_octets); 11834 ESTAT_ADD(rx_fragments); 11835 ESTAT_ADD(rx_ucast_packets); 11836 ESTAT_ADD(rx_mcast_packets); 11837 ESTAT_ADD(rx_bcast_packets); 11838 ESTAT_ADD(rx_fcs_errors); 11839 ESTAT_ADD(rx_align_errors); 11840 ESTAT_ADD(rx_xon_pause_rcvd); 11841 ESTAT_ADD(rx_xoff_pause_rcvd); 11842 ESTAT_ADD(rx_mac_ctrl_rcvd); 11843 ESTAT_ADD(rx_xoff_entered); 11844 ESTAT_ADD(rx_frame_too_long_errors); 11845 ESTAT_ADD(rx_jabbers); 11846 ESTAT_ADD(rx_undersize_packets); 11847 ESTAT_ADD(rx_in_length_errors); 11848 ESTAT_ADD(rx_out_length_errors); 11849 ESTAT_ADD(rx_64_or_less_octet_packets); 11850 ESTAT_ADD(rx_65_to_127_octet_packets); 11851 ESTAT_ADD(rx_128_to_255_octet_packets); 11852 ESTAT_ADD(rx_256_to_511_octet_packets); 11853 ESTAT_ADD(rx_512_to_1023_octet_packets); 11854 ESTAT_ADD(rx_1024_to_1522_octet_packets); 11855 ESTAT_ADD(rx_1523_to_2047_octet_packets); 11856 ESTAT_ADD(rx_2048_to_4095_octet_packets); 11857 ESTAT_ADD(rx_4096_to_8191_octet_packets); 11858 ESTAT_ADD(rx_8192_to_9022_octet_packets); 11859 11860 ESTAT_ADD(tx_octets); 11861 ESTAT_ADD(tx_collisions); 11862 ESTAT_ADD(tx_xon_sent); 11863 ESTAT_ADD(tx_xoff_sent); 11864 ESTAT_ADD(tx_flow_control); 11865 ESTAT_ADD(tx_mac_errors); 11866 ESTAT_ADD(tx_single_collisions); 11867 ESTAT_ADD(tx_mult_collisions); 11868 ESTAT_ADD(tx_deferred); 11869 ESTAT_ADD(tx_excessive_collisions); 11870 ESTAT_ADD(tx_late_collisions); 11871 ESTAT_ADD(tx_collide_2times); 11872 ESTAT_ADD(tx_collide_3times); 11873 ESTAT_ADD(tx_collide_4times); 11874 ESTAT_ADD(tx_collide_5times); 11875 ESTAT_ADD(tx_collide_6times); 11876 ESTAT_ADD(tx_collide_7times); 11877 ESTAT_ADD(tx_collide_8times); 11878 ESTAT_ADD(tx_collide_9times); 11879 ESTAT_ADD(tx_collide_10times); 11880 ESTAT_ADD(tx_collide_11times); 11881 ESTAT_ADD(tx_collide_12times); 11882 ESTAT_ADD(tx_collide_13times); 11883 ESTAT_ADD(tx_collide_14times); 11884 ESTAT_ADD(tx_collide_15times); 11885 ESTAT_ADD(tx_ucast_packets); 11886 ESTAT_ADD(tx_mcast_packets); 11887 ESTAT_ADD(tx_bcast_packets); 11888 ESTAT_ADD(tx_carrier_sense_errors); 11889 ESTAT_ADD(tx_discards); 11890 ESTAT_ADD(tx_errors); 11891 11892 ESTAT_ADD(dma_writeq_full); 11893 ESTAT_ADD(dma_write_prioq_full); 11894 ESTAT_ADD(rxbds_empty); 11895 ESTAT_ADD(rx_discards); 11896 ESTAT_ADD(rx_errors); 11897 ESTAT_ADD(rx_threshold_hit); 11898 11899 ESTAT_ADD(dma_readq_full); 11900 ESTAT_ADD(dma_read_prioq_full); 11901 ESTAT_ADD(tx_comp_queue_full); 11902 11903 ESTAT_ADD(ring_set_send_prod_index); 11904 ESTAT_ADD(ring_status_update); 11905 ESTAT_ADD(nic_irqs); 11906 ESTAT_ADD(nic_avoided_irqs); 11907 ESTAT_ADD(nic_tx_threshold_hit); 11908 11909 ESTAT_ADD(mbuf_lwm_thresh_hit); 11910 } 11911 11912 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) 11913 { 11914 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; 11915 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11916 11917 stats->rx_packets = old_stats->rx_packets + 11918 get_stat64(&hw_stats->rx_ucast_packets) + 11919 get_stat64(&hw_stats->rx_mcast_packets) + 11920 get_stat64(&hw_stats->rx_bcast_packets); 11921 11922 stats->tx_packets = old_stats->tx_packets + 11923 get_stat64(&hw_stats->tx_ucast_packets) + 11924 get_stat64(&hw_stats->tx_mcast_packets) + 11925 get_stat64(&hw_stats->tx_bcast_packets); 11926 11927 stats->rx_bytes = old_stats->rx_bytes + 11928 get_stat64(&hw_stats->rx_octets); 11929 stats->tx_bytes = old_stats->tx_bytes + 11930 get_stat64(&hw_stats->tx_octets); 11931 11932 stats->rx_errors = old_stats->rx_errors + 11933 get_stat64(&hw_stats->rx_errors); 11934 stats->tx_errors = old_stats->tx_errors + 11935 get_stat64(&hw_stats->tx_errors) + 11936 get_stat64(&hw_stats->tx_mac_errors) + 11937 get_stat64(&hw_stats->tx_carrier_sense_errors) + 11938 get_stat64(&hw_stats->tx_discards); 11939 11940 stats->multicast = old_stats->multicast + 11941 get_stat64(&hw_stats->rx_mcast_packets); 11942 stats->collisions = old_stats->collisions + 11943 get_stat64(&hw_stats->tx_collisions); 11944 11945 stats->rx_length_errors = old_stats->rx_length_errors + 11946 get_stat64(&hw_stats->rx_frame_too_long_errors) + 11947 get_stat64(&hw_stats->rx_undersize_packets); 11948 11949 stats->rx_frame_errors = old_stats->rx_frame_errors + 11950 get_stat64(&hw_stats->rx_align_errors); 11951 stats->tx_aborted_errors = old_stats->tx_aborted_errors + 11952 get_stat64(&hw_stats->tx_discards); 11953 stats->tx_carrier_errors = old_stats->tx_carrier_errors + 11954 get_stat64(&hw_stats->tx_carrier_sense_errors); 11955 11956 stats->rx_crc_errors = old_stats->rx_crc_errors + 11957 tg3_calc_crc_errors(tp); 11958 11959 stats->rx_missed_errors = old_stats->rx_missed_errors + 11960 get_stat64(&hw_stats->rx_discards); 11961 11962 stats->rx_dropped = tp->rx_dropped; 11963 stats->tx_dropped = tp->tx_dropped; 11964 } 11965 11966 static int tg3_get_regs_len(struct net_device *dev) 11967 { 11968 return TG3_REG_BLK_SIZE; 11969 } 11970 11971 static void tg3_get_regs(struct net_device *dev, 11972 struct ethtool_regs *regs, void *_p) 11973 { 11974 struct tg3 *tp = netdev_priv(dev); 11975 11976 regs->version = 0; 11977 11978 memset(_p, 0, TG3_REG_BLK_SIZE); 11979 11980 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11981 return; 11982 11983 tg3_full_lock(tp, 0); 11984 11985 tg3_dump_legacy_regs(tp, (u32 *)_p); 11986 11987 tg3_full_unlock(tp); 11988 } 11989 11990 static int tg3_get_eeprom_len(struct net_device *dev) 11991 { 11992 struct tg3 *tp = netdev_priv(dev); 11993 11994 return tp->nvram_size; 11995 } 11996 11997 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 11998 { 11999 struct tg3 *tp = netdev_priv(dev); 12000 int ret, cpmu_restore = 0; 12001 u8 *pd; 12002 u32 i, offset, len, b_offset, b_count, cpmu_val = 0; 12003 __be32 val; 12004 12005 if (tg3_flag(tp, NO_NVRAM)) 12006 return -EINVAL; 12007 12008 offset = eeprom->offset; 12009 len = eeprom->len; 12010 eeprom->len = 0; 12011 12012 eeprom->magic = TG3_EEPROM_MAGIC; 12013 12014 /* Override clock, link aware and link idle modes */ 12015 if (tg3_flag(tp, CPMU_PRESENT)) { 12016 cpmu_val = tr32(TG3_CPMU_CTRL); 12017 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE | 12018 CPMU_CTRL_LINK_IDLE_MODE)) { 12019 tw32(TG3_CPMU_CTRL, cpmu_val & 12020 ~(CPMU_CTRL_LINK_AWARE_MODE | 12021 CPMU_CTRL_LINK_IDLE_MODE)); 12022 cpmu_restore = 1; 12023 } 12024 } 12025 tg3_override_clk(tp); 12026 12027 if (offset & 3) { 12028 /* adjustments to start on required 4 byte boundary */ 12029 b_offset = offset & 3; 12030 b_count = 4 - b_offset; 12031 if (b_count > len) { 12032 /* i.e. offset=1 len=2 */ 12033 b_count = len; 12034 } 12035 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); 12036 if (ret) 12037 goto eeprom_done; 12038 memcpy(data, ((char *)&val) + b_offset, b_count); 12039 len -= b_count; 12040 offset += b_count; 12041 eeprom->len += b_count; 12042 } 12043 12044 /* read bytes up to the last 4 byte boundary */ 12045 pd = &data[eeprom->len]; 12046 for (i = 0; i < (len - (len & 3)); i += 4) { 12047 ret = tg3_nvram_read_be32(tp, offset + i, &val); 12048 if (ret) { 12049 if (i) 12050 i -= 4; 12051 eeprom->len += i; 12052 goto eeprom_done; 12053 } 12054 memcpy(pd + i, &val, 4); 12055 if (need_resched()) { 12056 if (signal_pending(current)) { 12057 eeprom->len += i; 12058 ret = -EINTR; 12059 goto eeprom_done; 12060 } 12061 cond_resched(); 12062 } 12063 } 12064 eeprom->len += i; 12065 12066 if (len & 3) { 12067 /* read last bytes not ending on 4 byte boundary */ 12068 pd = &data[eeprom->len]; 12069 b_count = len & 3; 12070 b_offset = offset + len - b_count; 12071 ret = tg3_nvram_read_be32(tp, b_offset, &val); 12072 if (ret) 12073 goto eeprom_done; 12074 memcpy(pd, &val, b_count); 12075 eeprom->len += b_count; 12076 } 12077 ret = 0; 12078 12079 eeprom_done: 12080 /* Restore clock, link aware and link idle modes */ 12081 tg3_restore_clk(tp); 12082 if (cpmu_restore) 12083 tw32(TG3_CPMU_CTRL, cpmu_val); 12084 12085 return ret; 12086 } 12087 12088 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 12089 { 12090 struct tg3 *tp = netdev_priv(dev); 12091 int ret; 12092 u32 offset, len, b_offset, odd_len; 12093 u8 *buf; 12094 __be32 start = 0, end; 12095 12096 if (tg3_flag(tp, NO_NVRAM) || 12097 eeprom->magic != TG3_EEPROM_MAGIC) 12098 return -EINVAL; 12099 12100 offset = eeprom->offset; 12101 len = eeprom->len; 12102 12103 if ((b_offset = (offset & 3))) { 12104 /* adjustments to start on required 4 byte boundary */ 12105 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); 12106 if (ret) 12107 return ret; 12108 len += b_offset; 12109 offset &= ~3; 12110 if (len < 4) 12111 len = 4; 12112 } 12113 12114 odd_len = 0; 12115 if (len & 3) { 12116 /* adjustments to end on required 4 byte boundary */ 12117 odd_len = 1; 12118 len = (len + 3) & ~3; 12119 ret = tg3_nvram_read_be32(tp, offset+len-4, &end); 12120 if (ret) 12121 return ret; 12122 } 12123 12124 buf = data; 12125 if (b_offset || odd_len) { 12126 buf = kmalloc(len, GFP_KERNEL); 12127 if (!buf) 12128 return -ENOMEM; 12129 if (b_offset) 12130 memcpy(buf, &start, 4); 12131 if (odd_len) 12132 memcpy(buf+len-4, &end, 4); 12133 memcpy(buf + b_offset, data, eeprom->len); 12134 } 12135 12136 ret = tg3_nvram_write_block(tp, offset, len, buf); 12137 12138 if (buf != data) 12139 kfree(buf); 12140 12141 return ret; 12142 } 12143 12144 static int tg3_get_link_ksettings(struct net_device *dev, 12145 struct ethtool_link_ksettings *cmd) 12146 { 12147 struct tg3 *tp = netdev_priv(dev); 12148 u32 supported, advertising; 12149 12150 if (tg3_flag(tp, USE_PHYLIB)) { 12151 struct phy_device *phydev; 12152 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12153 return -EAGAIN; 12154 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12155 phy_ethtool_ksettings_get(phydev, cmd); 12156 12157 return 0; 12158 } 12159 12160 supported = (SUPPORTED_Autoneg); 12161 12162 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12163 supported |= (SUPPORTED_1000baseT_Half | 12164 SUPPORTED_1000baseT_Full); 12165 12166 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12167 supported |= (SUPPORTED_100baseT_Half | 12168 SUPPORTED_100baseT_Full | 12169 SUPPORTED_10baseT_Half | 12170 SUPPORTED_10baseT_Full | 12171 SUPPORTED_TP); 12172 cmd->base.port = PORT_TP; 12173 } else { 12174 supported |= SUPPORTED_FIBRE; 12175 cmd->base.port = PORT_FIBRE; 12176 } 12177 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 12178 supported); 12179 12180 advertising = tp->link_config.advertising; 12181 if (tg3_flag(tp, PAUSE_AUTONEG)) { 12182 if (tp->link_config.flowctrl & FLOW_CTRL_RX) { 12183 if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12184 advertising |= ADVERTISED_Pause; 12185 } else { 12186 advertising |= ADVERTISED_Pause | 12187 ADVERTISED_Asym_Pause; 12188 } 12189 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12190 advertising |= ADVERTISED_Asym_Pause; 12191 } 12192 } 12193 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 12194 advertising); 12195 12196 if (netif_running(dev) && tp->link_up) { 12197 cmd->base.speed = tp->link_config.active_speed; 12198 cmd->base.duplex = tp->link_config.active_duplex; 12199 ethtool_convert_legacy_u32_to_link_mode( 12200 cmd->link_modes.lp_advertising, 12201 tp->link_config.rmt_adv); 12202 12203 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12204 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE) 12205 cmd->base.eth_tp_mdix = ETH_TP_MDI_X; 12206 else 12207 cmd->base.eth_tp_mdix = ETH_TP_MDI; 12208 } 12209 } else { 12210 cmd->base.speed = SPEED_UNKNOWN; 12211 cmd->base.duplex = DUPLEX_UNKNOWN; 12212 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; 12213 } 12214 cmd->base.phy_address = tp->phy_addr; 12215 cmd->base.autoneg = tp->link_config.autoneg; 12216 return 0; 12217 } 12218 12219 static int tg3_set_link_ksettings(struct net_device *dev, 12220 const struct ethtool_link_ksettings *cmd) 12221 { 12222 struct tg3 *tp = netdev_priv(dev); 12223 u32 speed = cmd->base.speed; 12224 u32 advertising; 12225 12226 if (tg3_flag(tp, USE_PHYLIB)) { 12227 struct phy_device *phydev; 12228 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12229 return -EAGAIN; 12230 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12231 return phy_ethtool_ksettings_set(phydev, cmd); 12232 } 12233 12234 if (cmd->base.autoneg != AUTONEG_ENABLE && 12235 cmd->base.autoneg != AUTONEG_DISABLE) 12236 return -EINVAL; 12237 12238 if (cmd->base.autoneg == AUTONEG_DISABLE && 12239 cmd->base.duplex != DUPLEX_FULL && 12240 cmd->base.duplex != DUPLEX_HALF) 12241 return -EINVAL; 12242 12243 ethtool_convert_link_mode_to_legacy_u32(&advertising, 12244 cmd->link_modes.advertising); 12245 12246 if (cmd->base.autoneg == AUTONEG_ENABLE) { 12247 u32 mask = ADVERTISED_Autoneg | 12248 ADVERTISED_Pause | 12249 ADVERTISED_Asym_Pause; 12250 12251 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12252 mask |= ADVERTISED_1000baseT_Half | 12253 ADVERTISED_1000baseT_Full; 12254 12255 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 12256 mask |= ADVERTISED_100baseT_Half | 12257 ADVERTISED_100baseT_Full | 12258 ADVERTISED_10baseT_Half | 12259 ADVERTISED_10baseT_Full | 12260 ADVERTISED_TP; 12261 else 12262 mask |= ADVERTISED_FIBRE; 12263 12264 if (advertising & ~mask) 12265 return -EINVAL; 12266 12267 mask &= (ADVERTISED_1000baseT_Half | 12268 ADVERTISED_1000baseT_Full | 12269 ADVERTISED_100baseT_Half | 12270 ADVERTISED_100baseT_Full | 12271 ADVERTISED_10baseT_Half | 12272 ADVERTISED_10baseT_Full); 12273 12274 advertising &= mask; 12275 } else { 12276 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { 12277 if (speed != SPEED_1000) 12278 return -EINVAL; 12279 12280 if (cmd->base.duplex != DUPLEX_FULL) 12281 return -EINVAL; 12282 } else { 12283 if (speed != SPEED_100 && 12284 speed != SPEED_10) 12285 return -EINVAL; 12286 } 12287 } 12288 12289 tg3_full_lock(tp, 0); 12290 12291 tp->link_config.autoneg = cmd->base.autoneg; 12292 if (cmd->base.autoneg == AUTONEG_ENABLE) { 12293 tp->link_config.advertising = (advertising | 12294 ADVERTISED_Autoneg); 12295 tp->link_config.speed = SPEED_UNKNOWN; 12296 tp->link_config.duplex = DUPLEX_UNKNOWN; 12297 } else { 12298 tp->link_config.advertising = 0; 12299 tp->link_config.speed = speed; 12300 tp->link_config.duplex = cmd->base.duplex; 12301 } 12302 12303 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12304 12305 tg3_warn_mgmt_link_flap(tp); 12306 12307 if (netif_running(dev)) 12308 tg3_setup_phy(tp, true); 12309 12310 tg3_full_unlock(tp); 12311 12312 return 0; 12313 } 12314 12315 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 12316 { 12317 struct tg3 *tp = netdev_priv(dev); 12318 12319 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 12320 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 12321 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version)); 12322 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info)); 12323 } 12324 12325 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12326 { 12327 struct tg3 *tp = netdev_priv(dev); 12328 12329 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) 12330 wol->supported = WAKE_MAGIC; 12331 else 12332 wol->supported = 0; 12333 wol->wolopts = 0; 12334 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) 12335 wol->wolopts = WAKE_MAGIC; 12336 memset(&wol->sopass, 0, sizeof(wol->sopass)); 12337 } 12338 12339 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12340 { 12341 struct tg3 *tp = netdev_priv(dev); 12342 struct device *dp = &tp->pdev->dev; 12343 12344 if (wol->wolopts & ~WAKE_MAGIC) 12345 return -EINVAL; 12346 if ((wol->wolopts & WAKE_MAGIC) && 12347 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) 12348 return -EINVAL; 12349 12350 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); 12351 12352 if (device_may_wakeup(dp)) 12353 tg3_flag_set(tp, WOL_ENABLE); 12354 else 12355 tg3_flag_clear(tp, WOL_ENABLE); 12356 12357 return 0; 12358 } 12359 12360 static u32 tg3_get_msglevel(struct net_device *dev) 12361 { 12362 struct tg3 *tp = netdev_priv(dev); 12363 return tp->msg_enable; 12364 } 12365 12366 static void tg3_set_msglevel(struct net_device *dev, u32 value) 12367 { 12368 struct tg3 *tp = netdev_priv(dev); 12369 tp->msg_enable = value; 12370 } 12371 12372 static int tg3_nway_reset(struct net_device *dev) 12373 { 12374 struct tg3 *tp = netdev_priv(dev); 12375 int r; 12376 12377 if (!netif_running(dev)) 12378 return -EAGAIN; 12379 12380 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 12381 return -EINVAL; 12382 12383 tg3_warn_mgmt_link_flap(tp); 12384 12385 if (tg3_flag(tp, USE_PHYLIB)) { 12386 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12387 return -EAGAIN; 12388 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 12389 } else { 12390 u32 bmcr; 12391 12392 spin_lock_bh(&tp->lock); 12393 r = -EINVAL; 12394 tg3_readphy(tp, MII_BMCR, &bmcr); 12395 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 12396 ((bmcr & BMCR_ANENABLE) || 12397 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { 12398 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | 12399 BMCR_ANENABLE); 12400 r = 0; 12401 } 12402 spin_unlock_bh(&tp->lock); 12403 } 12404 12405 return r; 12406 } 12407 12408 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 12409 { 12410 struct tg3 *tp = netdev_priv(dev); 12411 12412 ering->rx_max_pending = tp->rx_std_ring_mask; 12413 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12414 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; 12415 else 12416 ering->rx_jumbo_max_pending = 0; 12417 12418 ering->tx_max_pending = TG3_TX_RING_SIZE - 1; 12419 12420 ering->rx_pending = tp->rx_pending; 12421 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12422 ering->rx_jumbo_pending = tp->rx_jumbo_pending; 12423 else 12424 ering->rx_jumbo_pending = 0; 12425 12426 ering->tx_pending = tp->napi[0].tx_pending; 12427 } 12428 12429 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 12430 { 12431 struct tg3 *tp = netdev_priv(dev); 12432 int i, irq_sync = 0, err = 0; 12433 bool reset_phy = false; 12434 12435 if ((ering->rx_pending > tp->rx_std_ring_mask) || 12436 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || 12437 (ering->tx_pending > TG3_TX_RING_SIZE - 1) || 12438 (ering->tx_pending <= MAX_SKB_FRAGS) || 12439 (tg3_flag(tp, TSO_BUG) && 12440 (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) 12441 return -EINVAL; 12442 12443 if (netif_running(dev)) { 12444 tg3_phy_stop(tp); 12445 tg3_netif_stop(tp); 12446 irq_sync = 1; 12447 } 12448 12449 tg3_full_lock(tp, irq_sync); 12450 12451 tp->rx_pending = ering->rx_pending; 12452 12453 if (tg3_flag(tp, MAX_RXPEND_64) && 12454 tp->rx_pending > 63) 12455 tp->rx_pending = 63; 12456 12457 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12458 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 12459 12460 for (i = 0; i < tp->irq_max; i++) 12461 tp->napi[i].tx_pending = ering->tx_pending; 12462 12463 if (netif_running(dev)) { 12464 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12465 /* Reset PHY to avoid PHY lock up */ 12466 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 12467 tg3_asic_rev(tp) == ASIC_REV_5719 || 12468 tg3_asic_rev(tp) == ASIC_REV_5720) 12469 reset_phy = true; 12470 12471 err = tg3_restart_hw(tp, reset_phy); 12472 if (!err) 12473 tg3_netif_start(tp); 12474 } 12475 12476 tg3_full_unlock(tp); 12477 12478 if (irq_sync && !err) 12479 tg3_phy_start(tp); 12480 12481 return err; 12482 } 12483 12484 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12485 { 12486 struct tg3 *tp = netdev_priv(dev); 12487 12488 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); 12489 12490 if (tp->link_config.flowctrl & FLOW_CTRL_RX) 12491 epause->rx_pause = 1; 12492 else 12493 epause->rx_pause = 0; 12494 12495 if (tp->link_config.flowctrl & FLOW_CTRL_TX) 12496 epause->tx_pause = 1; 12497 else 12498 epause->tx_pause = 0; 12499 } 12500 12501 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12502 { 12503 struct tg3 *tp = netdev_priv(dev); 12504 int err = 0; 12505 bool reset_phy = false; 12506 12507 if (tp->link_config.autoneg == AUTONEG_ENABLE) 12508 tg3_warn_mgmt_link_flap(tp); 12509 12510 if (tg3_flag(tp, USE_PHYLIB)) { 12511 struct phy_device *phydev; 12512 12513 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12514 12515 if (!phy_validate_pause(phydev, epause)) 12516 return -EINVAL; 12517 12518 tp->link_config.flowctrl = 0; 12519 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); 12520 if (epause->rx_pause) { 12521 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12522 12523 if (epause->tx_pause) { 12524 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12525 } 12526 } else if (epause->tx_pause) { 12527 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12528 } 12529 12530 if (epause->autoneg) 12531 tg3_flag_set(tp, PAUSE_AUTONEG); 12532 else 12533 tg3_flag_clear(tp, PAUSE_AUTONEG); 12534 12535 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 12536 if (phydev->autoneg) { 12537 /* phy_set_asym_pause() will 12538 * renegotiate the link to inform our 12539 * link partner of our flow control 12540 * settings, even if the flow control 12541 * is forced. Let tg3_adjust_link() 12542 * do the final flow control setup. 12543 */ 12544 return 0; 12545 } 12546 12547 if (!epause->autoneg) 12548 tg3_setup_flow_control(tp, 0, 0); 12549 } 12550 } else { 12551 int irq_sync = 0; 12552 12553 if (netif_running(dev)) { 12554 tg3_netif_stop(tp); 12555 irq_sync = 1; 12556 } 12557 12558 tg3_full_lock(tp, irq_sync); 12559 12560 if (epause->autoneg) 12561 tg3_flag_set(tp, PAUSE_AUTONEG); 12562 else 12563 tg3_flag_clear(tp, PAUSE_AUTONEG); 12564 if (epause->rx_pause) 12565 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12566 else 12567 tp->link_config.flowctrl &= ~FLOW_CTRL_RX; 12568 if (epause->tx_pause) 12569 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12570 else 12571 tp->link_config.flowctrl &= ~FLOW_CTRL_TX; 12572 12573 if (netif_running(dev)) { 12574 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12575 /* Reset PHY to avoid PHY lock up */ 12576 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 12577 tg3_asic_rev(tp) == ASIC_REV_5719 || 12578 tg3_asic_rev(tp) == ASIC_REV_5720) 12579 reset_phy = true; 12580 12581 err = tg3_restart_hw(tp, reset_phy); 12582 if (!err) 12583 tg3_netif_start(tp); 12584 } 12585 12586 tg3_full_unlock(tp); 12587 } 12588 12589 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12590 12591 return err; 12592 } 12593 12594 static int tg3_get_sset_count(struct net_device *dev, int sset) 12595 { 12596 switch (sset) { 12597 case ETH_SS_TEST: 12598 return TG3_NUM_TEST; 12599 case ETH_SS_STATS: 12600 return TG3_NUM_STATS; 12601 default: 12602 return -EOPNOTSUPP; 12603 } 12604 } 12605 12606 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 12607 u32 *rules __always_unused) 12608 { 12609 struct tg3 *tp = netdev_priv(dev); 12610 12611 if (!tg3_flag(tp, SUPPORT_MSIX)) 12612 return -EOPNOTSUPP; 12613 12614 switch (info->cmd) { 12615 case ETHTOOL_GRXRINGS: 12616 if (netif_running(tp->dev)) 12617 info->data = tp->rxq_cnt; 12618 else { 12619 info->data = num_online_cpus(); 12620 if (info->data > TG3_RSS_MAX_NUM_QS) 12621 info->data = TG3_RSS_MAX_NUM_QS; 12622 } 12623 12624 return 0; 12625 12626 default: 12627 return -EOPNOTSUPP; 12628 } 12629 } 12630 12631 static u32 tg3_get_rxfh_indir_size(struct net_device *dev) 12632 { 12633 u32 size = 0; 12634 struct tg3 *tp = netdev_priv(dev); 12635 12636 if (tg3_flag(tp, SUPPORT_MSIX)) 12637 size = TG3_RSS_INDIR_TBL_SIZE; 12638 12639 return size; 12640 } 12641 12642 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) 12643 { 12644 struct tg3 *tp = netdev_priv(dev); 12645 int i; 12646 12647 if (hfunc) 12648 *hfunc = ETH_RSS_HASH_TOP; 12649 if (!indir) 12650 return 0; 12651 12652 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12653 indir[i] = tp->rss_ind_tbl[i]; 12654 12655 return 0; 12656 } 12657 12658 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, 12659 const u8 hfunc) 12660 { 12661 struct tg3 *tp = netdev_priv(dev); 12662 size_t i; 12663 12664 /* We require at least one supported parameter to be changed and no 12665 * change in any of the unsupported parameters 12666 */ 12667 if (key || 12668 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 12669 return -EOPNOTSUPP; 12670 12671 if (!indir) 12672 return 0; 12673 12674 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12675 tp->rss_ind_tbl[i] = indir[i]; 12676 12677 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS)) 12678 return 0; 12679 12680 /* It is legal to write the indirection 12681 * table while the device is running. 12682 */ 12683 tg3_full_lock(tp, 0); 12684 tg3_rss_write_indir_tbl(tp); 12685 tg3_full_unlock(tp); 12686 12687 return 0; 12688 } 12689 12690 static void tg3_get_channels(struct net_device *dev, 12691 struct ethtool_channels *channel) 12692 { 12693 struct tg3 *tp = netdev_priv(dev); 12694 u32 deflt_qs = netif_get_num_default_rss_queues(); 12695 12696 channel->max_rx = tp->rxq_max; 12697 channel->max_tx = tp->txq_max; 12698 12699 if (netif_running(dev)) { 12700 channel->rx_count = tp->rxq_cnt; 12701 channel->tx_count = tp->txq_cnt; 12702 } else { 12703 if (tp->rxq_req) 12704 channel->rx_count = tp->rxq_req; 12705 else 12706 channel->rx_count = min(deflt_qs, tp->rxq_max); 12707 12708 if (tp->txq_req) 12709 channel->tx_count = tp->txq_req; 12710 else 12711 channel->tx_count = min(deflt_qs, tp->txq_max); 12712 } 12713 } 12714 12715 static int tg3_set_channels(struct net_device *dev, 12716 struct ethtool_channels *channel) 12717 { 12718 struct tg3 *tp = netdev_priv(dev); 12719 12720 if (!tg3_flag(tp, SUPPORT_MSIX)) 12721 return -EOPNOTSUPP; 12722 12723 if (channel->rx_count > tp->rxq_max || 12724 channel->tx_count > tp->txq_max) 12725 return -EINVAL; 12726 12727 tp->rxq_req = channel->rx_count; 12728 tp->txq_req = channel->tx_count; 12729 12730 if (!netif_running(dev)) 12731 return 0; 12732 12733 tg3_stop(tp); 12734 12735 tg3_carrier_off(tp); 12736 12737 tg3_start(tp, true, false, false); 12738 12739 return 0; 12740 } 12741 12742 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 12743 { 12744 switch (stringset) { 12745 case ETH_SS_STATS: 12746 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); 12747 break; 12748 case ETH_SS_TEST: 12749 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); 12750 break; 12751 default: 12752 WARN_ON(1); /* we need a WARN() */ 12753 break; 12754 } 12755 } 12756 12757 static int tg3_set_phys_id(struct net_device *dev, 12758 enum ethtool_phys_id_state state) 12759 { 12760 struct tg3 *tp = netdev_priv(dev); 12761 12762 switch (state) { 12763 case ETHTOOL_ID_ACTIVE: 12764 return 1; /* cycle on/off once per second */ 12765 12766 case ETHTOOL_ID_ON: 12767 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12768 LED_CTRL_1000MBPS_ON | 12769 LED_CTRL_100MBPS_ON | 12770 LED_CTRL_10MBPS_ON | 12771 LED_CTRL_TRAFFIC_OVERRIDE | 12772 LED_CTRL_TRAFFIC_BLINK | 12773 LED_CTRL_TRAFFIC_LED); 12774 break; 12775 12776 case ETHTOOL_ID_OFF: 12777 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12778 LED_CTRL_TRAFFIC_OVERRIDE); 12779 break; 12780 12781 case ETHTOOL_ID_INACTIVE: 12782 tw32(MAC_LED_CTRL, tp->led_ctrl); 12783 break; 12784 } 12785 12786 return 0; 12787 } 12788 12789 static void tg3_get_ethtool_stats(struct net_device *dev, 12790 struct ethtool_stats *estats, u64 *tmp_stats) 12791 { 12792 struct tg3 *tp = netdev_priv(dev); 12793 12794 if (tp->hw_stats) 12795 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats); 12796 else 12797 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats)); 12798 } 12799 12800 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen) 12801 { 12802 int i; 12803 __be32 *buf; 12804 u32 offset = 0, len = 0; 12805 u32 magic, val; 12806 12807 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) 12808 return NULL; 12809 12810 if (magic == TG3_EEPROM_MAGIC) { 12811 for (offset = TG3_NVM_DIR_START; 12812 offset < TG3_NVM_DIR_END; 12813 offset += TG3_NVM_DIRENT_SIZE) { 12814 if (tg3_nvram_read(tp, offset, &val)) 12815 return NULL; 12816 12817 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == 12818 TG3_NVM_DIRTYPE_EXTVPD) 12819 break; 12820 } 12821 12822 if (offset != TG3_NVM_DIR_END) { 12823 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; 12824 if (tg3_nvram_read(tp, offset + 4, &offset)) 12825 return NULL; 12826 12827 offset = tg3_nvram_logical_addr(tp, offset); 12828 } 12829 } 12830 12831 if (!offset || !len) { 12832 offset = TG3_NVM_VPD_OFF; 12833 len = TG3_NVM_VPD_LEN; 12834 } 12835 12836 buf = kmalloc(len, GFP_KERNEL); 12837 if (buf == NULL) 12838 return NULL; 12839 12840 if (magic == TG3_EEPROM_MAGIC) { 12841 for (i = 0; i < len; i += 4) { 12842 /* The data is in little-endian format in NVRAM. 12843 * Use the big-endian read routines to preserve 12844 * the byte order as it exists in NVRAM. 12845 */ 12846 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) 12847 goto error; 12848 } 12849 } else { 12850 u8 *ptr; 12851 ssize_t cnt; 12852 unsigned int pos = 0; 12853 12854 ptr = (u8 *)&buf[0]; 12855 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) { 12856 cnt = pci_read_vpd(tp->pdev, pos, 12857 len - pos, ptr); 12858 if (cnt == -ETIMEDOUT || cnt == -EINTR) 12859 cnt = 0; 12860 else if (cnt < 0) 12861 goto error; 12862 } 12863 if (pos != len) 12864 goto error; 12865 } 12866 12867 *vpdlen = len; 12868 12869 return buf; 12870 12871 error: 12872 kfree(buf); 12873 return NULL; 12874 } 12875 12876 #define NVRAM_TEST_SIZE 0x100 12877 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 12878 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 12879 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c 12880 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 12881 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 12882 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 12883 #define NVRAM_SELFBOOT_HW_SIZE 0x20 12884 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c 12885 12886 static int tg3_test_nvram(struct tg3 *tp) 12887 { 12888 u32 csum, magic, len; 12889 __be32 *buf; 12890 int i, j, k, err = 0, size; 12891 12892 if (tg3_flag(tp, NO_NVRAM)) 12893 return 0; 12894 12895 if (tg3_nvram_read(tp, 0, &magic) != 0) 12896 return -EIO; 12897 12898 if (magic == TG3_EEPROM_MAGIC) 12899 size = NVRAM_TEST_SIZE; 12900 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { 12901 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == 12902 TG3_EEPROM_SB_FORMAT_1) { 12903 switch (magic & TG3_EEPROM_SB_REVISION_MASK) { 12904 case TG3_EEPROM_SB_REVISION_0: 12905 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; 12906 break; 12907 case TG3_EEPROM_SB_REVISION_2: 12908 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; 12909 break; 12910 case TG3_EEPROM_SB_REVISION_3: 12911 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; 12912 break; 12913 case TG3_EEPROM_SB_REVISION_4: 12914 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; 12915 break; 12916 case TG3_EEPROM_SB_REVISION_5: 12917 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; 12918 break; 12919 case TG3_EEPROM_SB_REVISION_6: 12920 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; 12921 break; 12922 default: 12923 return -EIO; 12924 } 12925 } else 12926 return 0; 12927 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 12928 size = NVRAM_SELFBOOT_HW_SIZE; 12929 else 12930 return -EIO; 12931 12932 buf = kmalloc(size, GFP_KERNEL); 12933 if (buf == NULL) 12934 return -ENOMEM; 12935 12936 err = -EIO; 12937 for (i = 0, j = 0; i < size; i += 4, j++) { 12938 err = tg3_nvram_read_be32(tp, i, &buf[j]); 12939 if (err) 12940 break; 12941 } 12942 if (i < size) 12943 goto out; 12944 12945 /* Selfboot format */ 12946 magic = be32_to_cpu(buf[0]); 12947 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == 12948 TG3_EEPROM_MAGIC_FW) { 12949 u8 *buf8 = (u8 *) buf, csum8 = 0; 12950 12951 if ((magic & TG3_EEPROM_SB_REVISION_MASK) == 12952 TG3_EEPROM_SB_REVISION_2) { 12953 /* For rev 2, the csum doesn't include the MBA. */ 12954 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) 12955 csum8 += buf8[i]; 12956 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) 12957 csum8 += buf8[i]; 12958 } else { 12959 for (i = 0; i < size; i++) 12960 csum8 += buf8[i]; 12961 } 12962 12963 if (csum8 == 0) { 12964 err = 0; 12965 goto out; 12966 } 12967 12968 err = -EIO; 12969 goto out; 12970 } 12971 12972 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == 12973 TG3_EEPROM_MAGIC_HW) { 12974 u8 data[NVRAM_SELFBOOT_DATA_SIZE]; 12975 u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; 12976 u8 *buf8 = (u8 *) buf; 12977 12978 /* Separate the parity bits and the data bytes. */ 12979 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { 12980 if ((i == 0) || (i == 8)) { 12981 int l; 12982 u8 msk; 12983 12984 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) 12985 parity[k++] = buf8[i] & msk; 12986 i++; 12987 } else if (i == 16) { 12988 int l; 12989 u8 msk; 12990 12991 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) 12992 parity[k++] = buf8[i] & msk; 12993 i++; 12994 12995 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) 12996 parity[k++] = buf8[i] & msk; 12997 i++; 12998 } 12999 data[j++] = buf8[i]; 13000 } 13001 13002 err = -EIO; 13003 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { 13004 u8 hw8 = hweight8(data[i]); 13005 13006 if ((hw8 & 0x1) && parity[i]) 13007 goto out; 13008 else if (!(hw8 & 0x1) && !parity[i]) 13009 goto out; 13010 } 13011 err = 0; 13012 goto out; 13013 } 13014 13015 err = -EIO; 13016 13017 /* Bootstrap checksum at offset 0x10 */ 13018 csum = calc_crc((unsigned char *) buf, 0x10); 13019 if (csum != le32_to_cpu(buf[0x10/4])) 13020 goto out; 13021 13022 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ 13023 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); 13024 if (csum != le32_to_cpu(buf[0xfc/4])) 13025 goto out; 13026 13027 kfree(buf); 13028 13029 buf = tg3_vpd_readblock(tp, &len); 13030 if (!buf) 13031 return -ENOMEM; 13032 13033 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA); 13034 if (i > 0) { 13035 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]); 13036 if (j < 0) 13037 goto out; 13038 13039 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len) 13040 goto out; 13041 13042 i += PCI_VPD_LRDT_TAG_SIZE; 13043 j = pci_vpd_find_info_keyword((u8 *)buf, i, j, 13044 PCI_VPD_RO_KEYWORD_CHKSUM); 13045 if (j > 0) { 13046 u8 csum8 = 0; 13047 13048 j += PCI_VPD_INFO_FLD_HDR_SIZE; 13049 13050 for (i = 0; i <= j; i++) 13051 csum8 += ((u8 *)buf)[i]; 13052 13053 if (csum8) 13054 goto out; 13055 } 13056 } 13057 13058 err = 0; 13059 13060 out: 13061 kfree(buf); 13062 return err; 13063 } 13064 13065 #define TG3_SERDES_TIMEOUT_SEC 2 13066 #define TG3_COPPER_TIMEOUT_SEC 6 13067 13068 static int tg3_test_link(struct tg3 *tp) 13069 { 13070 int i, max; 13071 13072 if (!netif_running(tp->dev)) 13073 return -ENODEV; 13074 13075 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 13076 max = TG3_SERDES_TIMEOUT_SEC; 13077 else 13078 max = TG3_COPPER_TIMEOUT_SEC; 13079 13080 for (i = 0; i < max; i++) { 13081 if (tp->link_up) 13082 return 0; 13083 13084 if (msleep_interruptible(1000)) 13085 break; 13086 } 13087 13088 return -EIO; 13089 } 13090 13091 /* Only test the commonly used registers */ 13092 static int tg3_test_registers(struct tg3 *tp) 13093 { 13094 int i, is_5705, is_5750; 13095 u32 offset, read_mask, write_mask, val, save_val, read_val; 13096 static struct { 13097 u16 offset; 13098 u16 flags; 13099 #define TG3_FL_5705 0x1 13100 #define TG3_FL_NOT_5705 0x2 13101 #define TG3_FL_NOT_5788 0x4 13102 #define TG3_FL_NOT_5750 0x8 13103 u32 read_mask; 13104 u32 write_mask; 13105 } reg_tbl[] = { 13106 /* MAC Control Registers */ 13107 { MAC_MODE, TG3_FL_NOT_5705, 13108 0x00000000, 0x00ef6f8c }, 13109 { MAC_MODE, TG3_FL_5705, 13110 0x00000000, 0x01ef6b8c }, 13111 { MAC_STATUS, TG3_FL_NOT_5705, 13112 0x03800107, 0x00000000 }, 13113 { MAC_STATUS, TG3_FL_5705, 13114 0x03800100, 0x00000000 }, 13115 { MAC_ADDR_0_HIGH, 0x0000, 13116 0x00000000, 0x0000ffff }, 13117 { MAC_ADDR_0_LOW, 0x0000, 13118 0x00000000, 0xffffffff }, 13119 { MAC_RX_MTU_SIZE, 0x0000, 13120 0x00000000, 0x0000ffff }, 13121 { MAC_TX_MODE, 0x0000, 13122 0x00000000, 0x00000070 }, 13123 { MAC_TX_LENGTHS, 0x0000, 13124 0x00000000, 0x00003fff }, 13125 { MAC_RX_MODE, TG3_FL_NOT_5705, 13126 0x00000000, 0x000007fc }, 13127 { MAC_RX_MODE, TG3_FL_5705, 13128 0x00000000, 0x000007dc }, 13129 { MAC_HASH_REG_0, 0x0000, 13130 0x00000000, 0xffffffff }, 13131 { MAC_HASH_REG_1, 0x0000, 13132 0x00000000, 0xffffffff }, 13133 { MAC_HASH_REG_2, 0x0000, 13134 0x00000000, 0xffffffff }, 13135 { MAC_HASH_REG_3, 0x0000, 13136 0x00000000, 0xffffffff }, 13137 13138 /* Receive Data and Receive BD Initiator Control Registers. */ 13139 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, 13140 0x00000000, 0xffffffff }, 13141 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, 13142 0x00000000, 0xffffffff }, 13143 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, 13144 0x00000000, 0x00000003 }, 13145 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, 13146 0x00000000, 0xffffffff }, 13147 { RCVDBDI_STD_BD+0, 0x0000, 13148 0x00000000, 0xffffffff }, 13149 { RCVDBDI_STD_BD+4, 0x0000, 13150 0x00000000, 0xffffffff }, 13151 { RCVDBDI_STD_BD+8, 0x0000, 13152 0x00000000, 0xffff0002 }, 13153 { RCVDBDI_STD_BD+0xc, 0x0000, 13154 0x00000000, 0xffffffff }, 13155 13156 /* Receive BD Initiator Control Registers. */ 13157 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, 13158 0x00000000, 0xffffffff }, 13159 { RCVBDI_STD_THRESH, TG3_FL_5705, 13160 0x00000000, 0x000003ff }, 13161 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, 13162 0x00000000, 0xffffffff }, 13163 13164 /* Host Coalescing Control Registers. */ 13165 { HOSTCC_MODE, TG3_FL_NOT_5705, 13166 0x00000000, 0x00000004 }, 13167 { HOSTCC_MODE, TG3_FL_5705, 13168 0x00000000, 0x000000f6 }, 13169 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, 13170 0x00000000, 0xffffffff }, 13171 { HOSTCC_RXCOL_TICKS, TG3_FL_5705, 13172 0x00000000, 0x000003ff }, 13173 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, 13174 0x00000000, 0xffffffff }, 13175 { HOSTCC_TXCOL_TICKS, TG3_FL_5705, 13176 0x00000000, 0x000003ff }, 13177 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, 13178 0x00000000, 0xffffffff }, 13179 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13180 0x00000000, 0x000000ff }, 13181 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, 13182 0x00000000, 0xffffffff }, 13183 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13184 0x00000000, 0x000000ff }, 13185 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, 13186 0x00000000, 0xffffffff }, 13187 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, 13188 0x00000000, 0xffffffff }, 13189 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13190 0x00000000, 0xffffffff }, 13191 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13192 0x00000000, 0x000000ff }, 13193 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13194 0x00000000, 0xffffffff }, 13195 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13196 0x00000000, 0x000000ff }, 13197 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, 13198 0x00000000, 0xffffffff }, 13199 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, 13200 0x00000000, 0xffffffff }, 13201 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, 13202 0x00000000, 0xffffffff }, 13203 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, 13204 0x00000000, 0xffffffff }, 13205 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, 13206 0x00000000, 0xffffffff }, 13207 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, 13208 0xffffffff, 0x00000000 }, 13209 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, 13210 0xffffffff, 0x00000000 }, 13211 13212 /* Buffer Manager Control Registers. */ 13213 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, 13214 0x00000000, 0x007fff80 }, 13215 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, 13216 0x00000000, 0x007fffff }, 13217 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, 13218 0x00000000, 0x0000003f }, 13219 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, 13220 0x00000000, 0x000001ff }, 13221 { BUFMGR_MB_HIGH_WATER, 0x0000, 13222 0x00000000, 0x000001ff }, 13223 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, 13224 0xffffffff, 0x00000000 }, 13225 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, 13226 0xffffffff, 0x00000000 }, 13227 13228 /* Mailbox Registers */ 13229 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, 13230 0x00000000, 0x000001ff }, 13231 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, 13232 0x00000000, 0x000001ff }, 13233 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, 13234 0x00000000, 0x000007ff }, 13235 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, 13236 0x00000000, 0x000001ff }, 13237 13238 { 0xffff, 0x0000, 0x00000000, 0x00000000 }, 13239 }; 13240 13241 is_5705 = is_5750 = 0; 13242 if (tg3_flag(tp, 5705_PLUS)) { 13243 is_5705 = 1; 13244 if (tg3_flag(tp, 5750_PLUS)) 13245 is_5750 = 1; 13246 } 13247 13248 for (i = 0; reg_tbl[i].offset != 0xffff; i++) { 13249 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) 13250 continue; 13251 13252 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) 13253 continue; 13254 13255 if (tg3_flag(tp, IS_5788) && 13256 (reg_tbl[i].flags & TG3_FL_NOT_5788)) 13257 continue; 13258 13259 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) 13260 continue; 13261 13262 offset = (u32) reg_tbl[i].offset; 13263 read_mask = reg_tbl[i].read_mask; 13264 write_mask = reg_tbl[i].write_mask; 13265 13266 /* Save the original register content */ 13267 save_val = tr32(offset); 13268 13269 /* Determine the read-only value. */ 13270 read_val = save_val & read_mask; 13271 13272 /* Write zero to the register, then make sure the read-only bits 13273 * are not changed and the read/write bits are all zeros. 13274 */ 13275 tw32(offset, 0); 13276 13277 val = tr32(offset); 13278 13279 /* Test the read-only and read/write bits. */ 13280 if (((val & read_mask) != read_val) || (val & write_mask)) 13281 goto out; 13282 13283 /* Write ones to all the bits defined by RdMask and WrMask, then 13284 * make sure the read-only bits are not changed and the 13285 * read/write bits are all ones. 13286 */ 13287 tw32(offset, read_mask | write_mask); 13288 13289 val = tr32(offset); 13290 13291 /* Test the read-only bits. */ 13292 if ((val & read_mask) != read_val) 13293 goto out; 13294 13295 /* Test the read/write bits. */ 13296 if ((val & write_mask) != write_mask) 13297 goto out; 13298 13299 tw32(offset, save_val); 13300 } 13301 13302 return 0; 13303 13304 out: 13305 if (netif_msg_hw(tp)) 13306 netdev_err(tp->dev, 13307 "Register test failed at offset %x\n", offset); 13308 tw32(offset, save_val); 13309 return -EIO; 13310 } 13311 13312 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) 13313 { 13314 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; 13315 int i; 13316 u32 j; 13317 13318 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { 13319 for (j = 0; j < len; j += 4) { 13320 u32 val; 13321 13322 tg3_write_mem(tp, offset + j, test_pattern[i]); 13323 tg3_read_mem(tp, offset + j, &val); 13324 if (val != test_pattern[i]) 13325 return -EIO; 13326 } 13327 } 13328 return 0; 13329 } 13330 13331 static int tg3_test_memory(struct tg3 *tp) 13332 { 13333 static struct mem_entry { 13334 u32 offset; 13335 u32 len; 13336 } mem_tbl_570x[] = { 13337 { 0x00000000, 0x00b50}, 13338 { 0x00002000, 0x1c000}, 13339 { 0xffffffff, 0x00000} 13340 }, mem_tbl_5705[] = { 13341 { 0x00000100, 0x0000c}, 13342 { 0x00000200, 0x00008}, 13343 { 0x00004000, 0x00800}, 13344 { 0x00006000, 0x01000}, 13345 { 0x00008000, 0x02000}, 13346 { 0x00010000, 0x0e000}, 13347 { 0xffffffff, 0x00000} 13348 }, mem_tbl_5755[] = { 13349 { 0x00000200, 0x00008}, 13350 { 0x00004000, 0x00800}, 13351 { 0x00006000, 0x00800}, 13352 { 0x00008000, 0x02000}, 13353 { 0x00010000, 0x0c000}, 13354 { 0xffffffff, 0x00000} 13355 }, mem_tbl_5906[] = { 13356 { 0x00000200, 0x00008}, 13357 { 0x00004000, 0x00400}, 13358 { 0x00006000, 0x00400}, 13359 { 0x00008000, 0x01000}, 13360 { 0x00010000, 0x01000}, 13361 { 0xffffffff, 0x00000} 13362 }, mem_tbl_5717[] = { 13363 { 0x00000200, 0x00008}, 13364 { 0x00010000, 0x0a000}, 13365 { 0x00020000, 0x13c00}, 13366 { 0xffffffff, 0x00000} 13367 }, mem_tbl_57765[] = { 13368 { 0x00000200, 0x00008}, 13369 { 0x00004000, 0x00800}, 13370 { 0x00006000, 0x09800}, 13371 { 0x00010000, 0x0a000}, 13372 { 0xffffffff, 0x00000} 13373 }; 13374 struct mem_entry *mem_tbl; 13375 int err = 0; 13376 int i; 13377 13378 if (tg3_flag(tp, 5717_PLUS)) 13379 mem_tbl = mem_tbl_5717; 13380 else if (tg3_flag(tp, 57765_CLASS) || 13381 tg3_asic_rev(tp) == ASIC_REV_5762) 13382 mem_tbl = mem_tbl_57765; 13383 else if (tg3_flag(tp, 5755_PLUS)) 13384 mem_tbl = mem_tbl_5755; 13385 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 13386 mem_tbl = mem_tbl_5906; 13387 else if (tg3_flag(tp, 5705_PLUS)) 13388 mem_tbl = mem_tbl_5705; 13389 else 13390 mem_tbl = mem_tbl_570x; 13391 13392 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { 13393 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); 13394 if (err) 13395 break; 13396 } 13397 13398 return err; 13399 } 13400 13401 #define TG3_TSO_MSS 500 13402 13403 #define TG3_TSO_IP_HDR_LEN 20 13404 #define TG3_TSO_TCP_HDR_LEN 20 13405 #define TG3_TSO_TCP_OPT_LEN 12 13406 13407 static const u8 tg3_tso_header[] = { 13408 0x08, 0x00, 13409 0x45, 0x00, 0x00, 0x00, 13410 0x00, 0x00, 0x40, 0x00, 13411 0x40, 0x06, 0x00, 0x00, 13412 0x0a, 0x00, 0x00, 0x01, 13413 0x0a, 0x00, 0x00, 0x02, 13414 0x0d, 0x00, 0xe0, 0x00, 13415 0x00, 0x00, 0x01, 0x00, 13416 0x00, 0x00, 0x02, 0x00, 13417 0x80, 0x10, 0x10, 0x00, 13418 0x14, 0x09, 0x00, 0x00, 13419 0x01, 0x01, 0x08, 0x0a, 13420 0x11, 0x11, 0x11, 0x11, 13421 0x11, 0x11, 0x11, 0x11, 13422 }; 13423 13424 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) 13425 { 13426 u32 rx_start_idx, rx_idx, tx_idx, opaque_key; 13427 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; 13428 u32 budget; 13429 struct sk_buff *skb; 13430 u8 *tx_data, *rx_data; 13431 dma_addr_t map; 13432 int num_pkts, tx_len, rx_len, i, err; 13433 struct tg3_rx_buffer_desc *desc; 13434 struct tg3_napi *tnapi, *rnapi; 13435 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 13436 13437 tnapi = &tp->napi[0]; 13438 rnapi = &tp->napi[0]; 13439 if (tp->irq_cnt > 1) { 13440 if (tg3_flag(tp, ENABLE_RSS)) 13441 rnapi = &tp->napi[1]; 13442 if (tg3_flag(tp, ENABLE_TSS)) 13443 tnapi = &tp->napi[1]; 13444 } 13445 coal_now = tnapi->coal_now | rnapi->coal_now; 13446 13447 err = -EIO; 13448 13449 tx_len = pktsz; 13450 skb = netdev_alloc_skb(tp->dev, tx_len); 13451 if (!skb) 13452 return -ENOMEM; 13453 13454 tx_data = skb_put(skb, tx_len); 13455 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN); 13456 memset(tx_data + ETH_ALEN, 0x0, 8); 13457 13458 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); 13459 13460 if (tso_loopback) { 13461 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; 13462 13463 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + 13464 TG3_TSO_TCP_OPT_LEN; 13465 13466 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, 13467 sizeof(tg3_tso_header)); 13468 mss = TG3_TSO_MSS; 13469 13470 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); 13471 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); 13472 13473 /* Set the total length field in the IP header */ 13474 iph->tot_len = htons((u16)(mss + hdr_len)); 13475 13476 base_flags = (TXD_FLAG_CPU_PRE_DMA | 13477 TXD_FLAG_CPU_POST_DMA); 13478 13479 if (tg3_flag(tp, HW_TSO_1) || 13480 tg3_flag(tp, HW_TSO_2) || 13481 tg3_flag(tp, HW_TSO_3)) { 13482 struct tcphdr *th; 13483 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; 13484 th = (struct tcphdr *)&tx_data[val]; 13485 th->check = 0; 13486 } else 13487 base_flags |= TXD_FLAG_TCPUDP_CSUM; 13488 13489 if (tg3_flag(tp, HW_TSO_3)) { 13490 mss |= (hdr_len & 0xc) << 12; 13491 if (hdr_len & 0x10) 13492 base_flags |= 0x00000010; 13493 base_flags |= (hdr_len & 0x3e0) << 5; 13494 } else if (tg3_flag(tp, HW_TSO_2)) 13495 mss |= hdr_len << 9; 13496 else if (tg3_flag(tp, HW_TSO_1) || 13497 tg3_asic_rev(tp) == ASIC_REV_5705) { 13498 mss |= (TG3_TSO_TCP_OPT_LEN << 9); 13499 } else { 13500 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); 13501 } 13502 13503 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); 13504 } else { 13505 num_pkts = 1; 13506 data_off = ETH_HLEN; 13507 13508 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 13509 tx_len > VLAN_ETH_FRAME_LEN) 13510 base_flags |= TXD_FLAG_JMB_PKT; 13511 } 13512 13513 for (i = data_off; i < tx_len; i++) 13514 tx_data[i] = (u8) (i & 0xff); 13515 13516 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); 13517 if (pci_dma_mapping_error(tp->pdev, map)) { 13518 dev_kfree_skb(skb); 13519 return -EIO; 13520 } 13521 13522 val = tnapi->tx_prod; 13523 tnapi->tx_buffers[val].skb = skb; 13524 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); 13525 13526 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13527 rnapi->coal_now); 13528 13529 udelay(10); 13530 13531 rx_start_idx = rnapi->hw_status->idx[0].rx_producer; 13532 13533 budget = tg3_tx_avail(tnapi); 13534 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, 13535 base_flags | TXD_FLAG_END, mss, 0)) { 13536 tnapi->tx_buffers[val].skb = NULL; 13537 dev_kfree_skb(skb); 13538 return -EIO; 13539 } 13540 13541 tnapi->tx_prod++; 13542 13543 /* Sync BD data before updating mailbox */ 13544 wmb(); 13545 13546 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); 13547 tr32_mailbox(tnapi->prodmbox); 13548 13549 udelay(10); 13550 13551 /* 350 usec to allow enough time on some 10/100 Mbps devices. */ 13552 for (i = 0; i < 35; i++) { 13553 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13554 coal_now); 13555 13556 udelay(10); 13557 13558 tx_idx = tnapi->hw_status->idx[0].tx_consumer; 13559 rx_idx = rnapi->hw_status->idx[0].rx_producer; 13560 if ((tx_idx == tnapi->tx_prod) && 13561 (rx_idx == (rx_start_idx + num_pkts))) 13562 break; 13563 } 13564 13565 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1); 13566 dev_kfree_skb(skb); 13567 13568 if (tx_idx != tnapi->tx_prod) 13569 goto out; 13570 13571 if (rx_idx != rx_start_idx + num_pkts) 13572 goto out; 13573 13574 val = data_off; 13575 while (rx_idx != rx_start_idx) { 13576 desc = &rnapi->rx_rcb[rx_start_idx++]; 13577 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 13578 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 13579 13580 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 13581 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) 13582 goto out; 13583 13584 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) 13585 - ETH_FCS_LEN; 13586 13587 if (!tso_loopback) { 13588 if (rx_len != tx_len) 13589 goto out; 13590 13591 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { 13592 if (opaque_key != RXD_OPAQUE_RING_STD) 13593 goto out; 13594 } else { 13595 if (opaque_key != RXD_OPAQUE_RING_JUMBO) 13596 goto out; 13597 } 13598 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 13599 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 13600 >> RXD_TCPCSUM_SHIFT != 0xffff) { 13601 goto out; 13602 } 13603 13604 if (opaque_key == RXD_OPAQUE_RING_STD) { 13605 rx_data = tpr->rx_std_buffers[desc_idx].data; 13606 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], 13607 mapping); 13608 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 13609 rx_data = tpr->rx_jmb_buffers[desc_idx].data; 13610 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], 13611 mapping); 13612 } else 13613 goto out; 13614 13615 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, 13616 PCI_DMA_FROMDEVICE); 13617 13618 rx_data += TG3_RX_OFFSET(tp); 13619 for (i = data_off; i < rx_len; i++, val++) { 13620 if (*(rx_data + i) != (u8) (val & 0xff)) 13621 goto out; 13622 } 13623 } 13624 13625 err = 0; 13626 13627 /* tg3_free_rings will unmap and free the rx_data */ 13628 out: 13629 return err; 13630 } 13631 13632 #define TG3_STD_LOOPBACK_FAILED 1 13633 #define TG3_JMB_LOOPBACK_FAILED 2 13634 #define TG3_TSO_LOOPBACK_FAILED 4 13635 #define TG3_LOOPBACK_FAILED \ 13636 (TG3_STD_LOOPBACK_FAILED | \ 13637 TG3_JMB_LOOPBACK_FAILED | \ 13638 TG3_TSO_LOOPBACK_FAILED) 13639 13640 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk) 13641 { 13642 int err = -EIO; 13643 u32 eee_cap; 13644 u32 jmb_pkt_sz = 9000; 13645 13646 if (tp->dma_limit) 13647 jmb_pkt_sz = tp->dma_limit - ETH_HLEN; 13648 13649 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; 13650 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 13651 13652 if (!netif_running(tp->dev)) { 13653 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13654 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13655 if (do_extlpbk) 13656 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13657 goto done; 13658 } 13659 13660 err = tg3_reset_hw(tp, true); 13661 if (err) { 13662 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13663 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13664 if (do_extlpbk) 13665 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13666 goto done; 13667 } 13668 13669 if (tg3_flag(tp, ENABLE_RSS)) { 13670 int i; 13671 13672 /* Reroute all rx packets to the 1st queue */ 13673 for (i = MAC_RSS_INDIR_TBL_0; 13674 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) 13675 tw32(i, 0x0); 13676 } 13677 13678 /* HW errata - mac loopback fails in some cases on 5780. 13679 * Normal traffic and PHY loopback are not affected by 13680 * errata. Also, the MAC loopback test is deprecated for 13681 * all newer ASIC revisions. 13682 */ 13683 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 13684 !tg3_flag(tp, CPMU_PRESENT)) { 13685 tg3_mac_loopback(tp, true); 13686 13687 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13688 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13689 13690 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13691 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13692 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13693 13694 tg3_mac_loopback(tp, false); 13695 } 13696 13697 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 13698 !tg3_flag(tp, USE_PHYLIB)) { 13699 int i; 13700 13701 tg3_phy_lpbk_set(tp, 0, false); 13702 13703 /* Wait for link */ 13704 for (i = 0; i < 100; i++) { 13705 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 13706 break; 13707 mdelay(1); 13708 } 13709 13710 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13711 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13712 if (tg3_flag(tp, TSO_CAPABLE) && 13713 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13714 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED; 13715 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13716 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13717 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13718 13719 if (do_extlpbk) { 13720 tg3_phy_lpbk_set(tp, 0, true); 13721 13722 /* All link indications report up, but the hardware 13723 * isn't really ready for about 20 msec. Double it 13724 * to be sure. 13725 */ 13726 mdelay(40); 13727 13728 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13729 data[TG3_EXT_LOOPB_TEST] |= 13730 TG3_STD_LOOPBACK_FAILED; 13731 if (tg3_flag(tp, TSO_CAPABLE) && 13732 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13733 data[TG3_EXT_LOOPB_TEST] |= 13734 TG3_TSO_LOOPBACK_FAILED; 13735 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13736 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13737 data[TG3_EXT_LOOPB_TEST] |= 13738 TG3_JMB_LOOPBACK_FAILED; 13739 } 13740 13741 /* Re-enable gphy autopowerdown. */ 13742 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 13743 tg3_phy_toggle_apd(tp, true); 13744 } 13745 13746 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] | 13747 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0; 13748 13749 done: 13750 tp->phy_flags |= eee_cap; 13751 13752 return err; 13753 } 13754 13755 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, 13756 u64 *data) 13757 { 13758 struct tg3 *tp = netdev_priv(dev); 13759 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; 13760 13761 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 13762 if (tg3_power_up(tp)) { 13763 etest->flags |= ETH_TEST_FL_FAILED; 13764 memset(data, 1, sizeof(u64) * TG3_NUM_TEST); 13765 return; 13766 } 13767 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 13768 } 13769 13770 memset(data, 0, sizeof(u64) * TG3_NUM_TEST); 13771 13772 if (tg3_test_nvram(tp) != 0) { 13773 etest->flags |= ETH_TEST_FL_FAILED; 13774 data[TG3_NVRAM_TEST] = 1; 13775 } 13776 if (!doextlpbk && tg3_test_link(tp)) { 13777 etest->flags |= ETH_TEST_FL_FAILED; 13778 data[TG3_LINK_TEST] = 1; 13779 } 13780 if (etest->flags & ETH_TEST_FL_OFFLINE) { 13781 int err, err2 = 0, irq_sync = 0; 13782 13783 if (netif_running(dev)) { 13784 tg3_phy_stop(tp); 13785 tg3_netif_stop(tp); 13786 irq_sync = 1; 13787 } 13788 13789 tg3_full_lock(tp, irq_sync); 13790 tg3_halt(tp, RESET_KIND_SUSPEND, 1); 13791 err = tg3_nvram_lock(tp); 13792 tg3_halt_cpu(tp, RX_CPU_BASE); 13793 if (!tg3_flag(tp, 5705_PLUS)) 13794 tg3_halt_cpu(tp, TX_CPU_BASE); 13795 if (!err) 13796 tg3_nvram_unlock(tp); 13797 13798 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 13799 tg3_phy_reset(tp); 13800 13801 if (tg3_test_registers(tp) != 0) { 13802 etest->flags |= ETH_TEST_FL_FAILED; 13803 data[TG3_REGISTER_TEST] = 1; 13804 } 13805 13806 if (tg3_test_memory(tp) != 0) { 13807 etest->flags |= ETH_TEST_FL_FAILED; 13808 data[TG3_MEMORY_TEST] = 1; 13809 } 13810 13811 if (doextlpbk) 13812 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 13813 13814 if (tg3_test_loopback(tp, data, doextlpbk)) 13815 etest->flags |= ETH_TEST_FL_FAILED; 13816 13817 tg3_full_unlock(tp); 13818 13819 if (tg3_test_interrupt(tp) != 0) { 13820 etest->flags |= ETH_TEST_FL_FAILED; 13821 data[TG3_INTERRUPT_TEST] = 1; 13822 } 13823 13824 tg3_full_lock(tp, 0); 13825 13826 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 13827 if (netif_running(dev)) { 13828 tg3_flag_set(tp, INIT_COMPLETE); 13829 err2 = tg3_restart_hw(tp, true); 13830 if (!err2) 13831 tg3_netif_start(tp); 13832 } 13833 13834 tg3_full_unlock(tp); 13835 13836 if (irq_sync && !err2) 13837 tg3_phy_start(tp); 13838 } 13839 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 13840 tg3_power_down_prepare(tp); 13841 13842 } 13843 13844 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 13845 { 13846 struct tg3 *tp = netdev_priv(dev); 13847 struct hwtstamp_config stmpconf; 13848 13849 if (!tg3_flag(tp, PTP_CAPABLE)) 13850 return -EOPNOTSUPP; 13851 13852 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) 13853 return -EFAULT; 13854 13855 if (stmpconf.flags) 13856 return -EINVAL; 13857 13858 if (stmpconf.tx_type != HWTSTAMP_TX_ON && 13859 stmpconf.tx_type != HWTSTAMP_TX_OFF) 13860 return -ERANGE; 13861 13862 switch (stmpconf.rx_filter) { 13863 case HWTSTAMP_FILTER_NONE: 13864 tp->rxptpctl = 0; 13865 break; 13866 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 13867 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13868 TG3_RX_PTP_CTL_ALL_V1_EVENTS; 13869 break; 13870 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 13871 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13872 TG3_RX_PTP_CTL_SYNC_EVNT; 13873 break; 13874 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 13875 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13876 TG3_RX_PTP_CTL_DELAY_REQ; 13877 break; 13878 case HWTSTAMP_FILTER_PTP_V2_EVENT: 13879 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13880 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13881 break; 13882 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 13883 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13884 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13885 break; 13886 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 13887 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13888 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13889 break; 13890 case HWTSTAMP_FILTER_PTP_V2_SYNC: 13891 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13892 TG3_RX_PTP_CTL_SYNC_EVNT; 13893 break; 13894 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 13895 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13896 TG3_RX_PTP_CTL_SYNC_EVNT; 13897 break; 13898 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 13899 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13900 TG3_RX_PTP_CTL_SYNC_EVNT; 13901 break; 13902 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 13903 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13904 TG3_RX_PTP_CTL_DELAY_REQ; 13905 break; 13906 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 13907 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13908 TG3_RX_PTP_CTL_DELAY_REQ; 13909 break; 13910 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 13911 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13912 TG3_RX_PTP_CTL_DELAY_REQ; 13913 break; 13914 default: 13915 return -ERANGE; 13916 } 13917 13918 if (netif_running(dev) && tp->rxptpctl) 13919 tw32(TG3_RX_PTP_CTL, 13920 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 13921 13922 if (stmpconf.tx_type == HWTSTAMP_TX_ON) 13923 tg3_flag_set(tp, TX_TSTAMP_EN); 13924 else 13925 tg3_flag_clear(tp, TX_TSTAMP_EN); 13926 13927 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13928 -EFAULT : 0; 13929 } 13930 13931 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 13932 { 13933 struct tg3 *tp = netdev_priv(dev); 13934 struct hwtstamp_config stmpconf; 13935 13936 if (!tg3_flag(tp, PTP_CAPABLE)) 13937 return -EOPNOTSUPP; 13938 13939 stmpconf.flags = 0; 13940 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ? 13941 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF); 13942 13943 switch (tp->rxptpctl) { 13944 case 0: 13945 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE; 13946 break; 13947 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS: 13948 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 13949 break; 13950 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13951 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 13952 break; 13953 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13954 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 13955 break; 13956 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13957 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 13958 break; 13959 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13960 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 13961 break; 13962 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13963 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 13964 break; 13965 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13966 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 13967 break; 13968 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13969 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC; 13970 break; 13971 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13972 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 13973 break; 13974 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13975 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 13976 break; 13977 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13978 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ; 13979 break; 13980 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13981 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 13982 break; 13983 default: 13984 WARN_ON_ONCE(1); 13985 return -ERANGE; 13986 } 13987 13988 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13989 -EFAULT : 0; 13990 } 13991 13992 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 13993 { 13994 struct mii_ioctl_data *data = if_mii(ifr); 13995 struct tg3 *tp = netdev_priv(dev); 13996 int err; 13997 13998 if (tg3_flag(tp, USE_PHYLIB)) { 13999 struct phy_device *phydev; 14000 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 14001 return -EAGAIN; 14002 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 14003 return phy_mii_ioctl(phydev, ifr, cmd); 14004 } 14005 14006 switch (cmd) { 14007 case SIOCGMIIPHY: 14008 data->phy_id = tp->phy_addr; 14009 14010 /* fall through */ 14011 case SIOCGMIIREG: { 14012 u32 mii_regval; 14013 14014 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 14015 break; /* We have no PHY */ 14016 14017 if (!netif_running(dev)) 14018 return -EAGAIN; 14019 14020 spin_lock_bh(&tp->lock); 14021 err = __tg3_readphy(tp, data->phy_id & 0x1f, 14022 data->reg_num & 0x1f, &mii_regval); 14023 spin_unlock_bh(&tp->lock); 14024 14025 data->val_out = mii_regval; 14026 14027 return err; 14028 } 14029 14030 case SIOCSMIIREG: 14031 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 14032 break; /* We have no PHY */ 14033 14034 if (!netif_running(dev)) 14035 return -EAGAIN; 14036 14037 spin_lock_bh(&tp->lock); 14038 err = __tg3_writephy(tp, data->phy_id & 0x1f, 14039 data->reg_num & 0x1f, data->val_in); 14040 spin_unlock_bh(&tp->lock); 14041 14042 return err; 14043 14044 case SIOCSHWTSTAMP: 14045 return tg3_hwtstamp_set(dev, ifr); 14046 14047 case SIOCGHWTSTAMP: 14048 return tg3_hwtstamp_get(dev, ifr); 14049 14050 default: 14051 /* do nothing */ 14052 break; 14053 } 14054 return -EOPNOTSUPP; 14055 } 14056 14057 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 14058 { 14059 struct tg3 *tp = netdev_priv(dev); 14060 14061 memcpy(ec, &tp->coal, sizeof(*ec)); 14062 return 0; 14063 } 14064 14065 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 14066 { 14067 struct tg3 *tp = netdev_priv(dev); 14068 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; 14069 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; 14070 14071 if (!tg3_flag(tp, 5705_PLUS)) { 14072 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; 14073 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; 14074 max_stat_coal_ticks = MAX_STAT_COAL_TICKS; 14075 min_stat_coal_ticks = MIN_STAT_COAL_TICKS; 14076 } 14077 14078 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || 14079 (!ec->rx_coalesce_usecs) || 14080 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || 14081 (!ec->tx_coalesce_usecs) || 14082 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || 14083 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || 14084 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || 14085 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || 14086 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || 14087 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || 14088 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || 14089 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) 14090 return -EINVAL; 14091 14092 /* Only copy relevant parameters, ignore all others. */ 14093 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; 14094 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; 14095 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; 14096 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; 14097 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; 14098 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; 14099 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; 14100 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; 14101 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; 14102 14103 if (netif_running(dev)) { 14104 tg3_full_lock(tp, 0); 14105 __tg3_set_coalesce(tp, &tp->coal); 14106 tg3_full_unlock(tp); 14107 } 14108 return 0; 14109 } 14110 14111 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) 14112 { 14113 struct tg3 *tp = netdev_priv(dev); 14114 14115 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14116 netdev_warn(tp->dev, "Board does not support EEE!\n"); 14117 return -EOPNOTSUPP; 14118 } 14119 14120 if (edata->advertised != tp->eee.advertised) { 14121 netdev_warn(tp->dev, 14122 "Direct manipulation of EEE advertisement is not supported\n"); 14123 return -EINVAL; 14124 } 14125 14126 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) { 14127 netdev_warn(tp->dev, 14128 "Maximal Tx Lpi timer supported is %#x(u)\n", 14129 TG3_CPMU_DBTMR1_LNKIDLE_MAX); 14130 return -EINVAL; 14131 } 14132 14133 tp->eee = *edata; 14134 14135 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 14136 tg3_warn_mgmt_link_flap(tp); 14137 14138 if (netif_running(tp->dev)) { 14139 tg3_full_lock(tp, 0); 14140 tg3_setup_eee(tp); 14141 tg3_phy_reset(tp); 14142 tg3_full_unlock(tp); 14143 } 14144 14145 return 0; 14146 } 14147 14148 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata) 14149 { 14150 struct tg3 *tp = netdev_priv(dev); 14151 14152 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14153 netdev_warn(tp->dev, 14154 "Board does not support EEE!\n"); 14155 return -EOPNOTSUPP; 14156 } 14157 14158 *edata = tp->eee; 14159 return 0; 14160 } 14161 14162 static const struct ethtool_ops tg3_ethtool_ops = { 14163 .get_drvinfo = tg3_get_drvinfo, 14164 .get_regs_len = tg3_get_regs_len, 14165 .get_regs = tg3_get_regs, 14166 .get_wol = tg3_get_wol, 14167 .set_wol = tg3_set_wol, 14168 .get_msglevel = tg3_get_msglevel, 14169 .set_msglevel = tg3_set_msglevel, 14170 .nway_reset = tg3_nway_reset, 14171 .get_link = ethtool_op_get_link, 14172 .get_eeprom_len = tg3_get_eeprom_len, 14173 .get_eeprom = tg3_get_eeprom, 14174 .set_eeprom = tg3_set_eeprom, 14175 .get_ringparam = tg3_get_ringparam, 14176 .set_ringparam = tg3_set_ringparam, 14177 .get_pauseparam = tg3_get_pauseparam, 14178 .set_pauseparam = tg3_set_pauseparam, 14179 .self_test = tg3_self_test, 14180 .get_strings = tg3_get_strings, 14181 .set_phys_id = tg3_set_phys_id, 14182 .get_ethtool_stats = tg3_get_ethtool_stats, 14183 .get_coalesce = tg3_get_coalesce, 14184 .set_coalesce = tg3_set_coalesce, 14185 .get_sset_count = tg3_get_sset_count, 14186 .get_rxnfc = tg3_get_rxnfc, 14187 .get_rxfh_indir_size = tg3_get_rxfh_indir_size, 14188 .get_rxfh = tg3_get_rxfh, 14189 .set_rxfh = tg3_set_rxfh, 14190 .get_channels = tg3_get_channels, 14191 .set_channels = tg3_set_channels, 14192 .get_ts_info = tg3_get_ts_info, 14193 .get_eee = tg3_get_eee, 14194 .set_eee = tg3_set_eee, 14195 .get_link_ksettings = tg3_get_link_ksettings, 14196 .set_link_ksettings = tg3_set_link_ksettings, 14197 }; 14198 14199 static void tg3_get_stats64(struct net_device *dev, 14200 struct rtnl_link_stats64 *stats) 14201 { 14202 struct tg3 *tp = netdev_priv(dev); 14203 14204 spin_lock_bh(&tp->lock); 14205 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) { 14206 *stats = tp->net_stats_prev; 14207 spin_unlock_bh(&tp->lock); 14208 return; 14209 } 14210 14211 tg3_get_nstats(tp, stats); 14212 spin_unlock_bh(&tp->lock); 14213 } 14214 14215 static void tg3_set_rx_mode(struct net_device *dev) 14216 { 14217 struct tg3 *tp = netdev_priv(dev); 14218 14219 if (!netif_running(dev)) 14220 return; 14221 14222 tg3_full_lock(tp, 0); 14223 __tg3_set_rx_mode(dev); 14224 tg3_full_unlock(tp); 14225 } 14226 14227 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, 14228 int new_mtu) 14229 { 14230 dev->mtu = new_mtu; 14231 14232 if (new_mtu > ETH_DATA_LEN) { 14233 if (tg3_flag(tp, 5780_CLASS)) { 14234 netdev_update_features(dev); 14235 tg3_flag_clear(tp, TSO_CAPABLE); 14236 } else { 14237 tg3_flag_set(tp, JUMBO_RING_ENABLE); 14238 } 14239 } else { 14240 if (tg3_flag(tp, 5780_CLASS)) { 14241 tg3_flag_set(tp, TSO_CAPABLE); 14242 netdev_update_features(dev); 14243 } 14244 tg3_flag_clear(tp, JUMBO_RING_ENABLE); 14245 } 14246 } 14247 14248 static int tg3_change_mtu(struct net_device *dev, int new_mtu) 14249 { 14250 struct tg3 *tp = netdev_priv(dev); 14251 int err; 14252 bool reset_phy = false; 14253 14254 if (!netif_running(dev)) { 14255 /* We'll just catch it later when the 14256 * device is up'd. 14257 */ 14258 tg3_set_mtu(dev, tp, new_mtu); 14259 return 0; 14260 } 14261 14262 tg3_phy_stop(tp); 14263 14264 tg3_netif_stop(tp); 14265 14266 tg3_set_mtu(dev, tp, new_mtu); 14267 14268 tg3_full_lock(tp, 1); 14269 14270 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 14271 14272 /* Reset PHY, otherwise the read DMA engine will be in a mode that 14273 * breaks all requests to 256 bytes. 14274 */ 14275 if (tg3_asic_rev(tp) == ASIC_REV_57766 || 14276 tg3_asic_rev(tp) == ASIC_REV_5717 || 14277 tg3_asic_rev(tp) == ASIC_REV_5719 || 14278 tg3_asic_rev(tp) == ASIC_REV_5720) 14279 reset_phy = true; 14280 14281 err = tg3_restart_hw(tp, reset_phy); 14282 14283 if (!err) 14284 tg3_netif_start(tp); 14285 14286 tg3_full_unlock(tp); 14287 14288 if (!err) 14289 tg3_phy_start(tp); 14290 14291 return err; 14292 } 14293 14294 static const struct net_device_ops tg3_netdev_ops = { 14295 .ndo_open = tg3_open, 14296 .ndo_stop = tg3_close, 14297 .ndo_start_xmit = tg3_start_xmit, 14298 .ndo_get_stats64 = tg3_get_stats64, 14299 .ndo_validate_addr = eth_validate_addr, 14300 .ndo_set_rx_mode = tg3_set_rx_mode, 14301 .ndo_set_mac_address = tg3_set_mac_addr, 14302 .ndo_do_ioctl = tg3_ioctl, 14303 .ndo_tx_timeout = tg3_tx_timeout, 14304 .ndo_change_mtu = tg3_change_mtu, 14305 .ndo_fix_features = tg3_fix_features, 14306 .ndo_set_features = tg3_set_features, 14307 #ifdef CONFIG_NET_POLL_CONTROLLER 14308 .ndo_poll_controller = tg3_poll_controller, 14309 #endif 14310 }; 14311 14312 static void tg3_get_eeprom_size(struct tg3 *tp) 14313 { 14314 u32 cursize, val, magic; 14315 14316 tp->nvram_size = EEPROM_CHIP_SIZE; 14317 14318 if (tg3_nvram_read(tp, 0, &magic) != 0) 14319 return; 14320 14321 if ((magic != TG3_EEPROM_MAGIC) && 14322 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && 14323 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) 14324 return; 14325 14326 /* 14327 * Size the chip by reading offsets at increasing powers of two. 14328 * When we encounter our validation signature, we know the addressing 14329 * has wrapped around, and thus have our chip size. 14330 */ 14331 cursize = 0x10; 14332 14333 while (cursize < tp->nvram_size) { 14334 if (tg3_nvram_read(tp, cursize, &val) != 0) 14335 return; 14336 14337 if (val == magic) 14338 break; 14339 14340 cursize <<= 1; 14341 } 14342 14343 tp->nvram_size = cursize; 14344 } 14345 14346 static void tg3_get_nvram_size(struct tg3 *tp) 14347 { 14348 u32 val; 14349 14350 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) 14351 return; 14352 14353 /* Selfboot format */ 14354 if (val != TG3_EEPROM_MAGIC) { 14355 tg3_get_eeprom_size(tp); 14356 return; 14357 } 14358 14359 if (tg3_nvram_read(tp, 0xf0, &val) == 0) { 14360 if (val != 0) { 14361 /* This is confusing. We want to operate on the 14362 * 16-bit value at offset 0xf2. The tg3_nvram_read() 14363 * call will read from NVRAM and byteswap the data 14364 * according to the byteswapping settings for all 14365 * other register accesses. This ensures the data we 14366 * want will always reside in the lower 16-bits. 14367 * However, the data in NVRAM is in LE format, which 14368 * means the data from the NVRAM read will always be 14369 * opposite the endianness of the CPU. The 16-bit 14370 * byteswap then brings the data to CPU endianness. 14371 */ 14372 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; 14373 return; 14374 } 14375 } 14376 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14377 } 14378 14379 static void tg3_get_nvram_info(struct tg3 *tp) 14380 { 14381 u32 nvcfg1; 14382 14383 nvcfg1 = tr32(NVRAM_CFG1); 14384 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { 14385 tg3_flag_set(tp, FLASH); 14386 } else { 14387 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14388 tw32(NVRAM_CFG1, nvcfg1); 14389 } 14390 14391 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 14392 tg3_flag(tp, 5780_CLASS)) { 14393 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { 14394 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: 14395 tp->nvram_jedecnum = JEDEC_ATMEL; 14396 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14397 tg3_flag_set(tp, NVRAM_BUFFERED); 14398 break; 14399 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: 14400 tp->nvram_jedecnum = JEDEC_ATMEL; 14401 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; 14402 break; 14403 case FLASH_VENDOR_ATMEL_EEPROM: 14404 tp->nvram_jedecnum = JEDEC_ATMEL; 14405 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14406 tg3_flag_set(tp, NVRAM_BUFFERED); 14407 break; 14408 case FLASH_VENDOR_ST: 14409 tp->nvram_jedecnum = JEDEC_ST; 14410 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; 14411 tg3_flag_set(tp, NVRAM_BUFFERED); 14412 break; 14413 case FLASH_VENDOR_SAIFUN: 14414 tp->nvram_jedecnum = JEDEC_SAIFUN; 14415 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; 14416 break; 14417 case FLASH_VENDOR_SST_SMALL: 14418 case FLASH_VENDOR_SST_LARGE: 14419 tp->nvram_jedecnum = JEDEC_SST; 14420 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; 14421 break; 14422 } 14423 } else { 14424 tp->nvram_jedecnum = JEDEC_ATMEL; 14425 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14426 tg3_flag_set(tp, NVRAM_BUFFERED); 14427 } 14428 } 14429 14430 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) 14431 { 14432 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { 14433 case FLASH_5752PAGE_SIZE_256: 14434 tp->nvram_pagesize = 256; 14435 break; 14436 case FLASH_5752PAGE_SIZE_512: 14437 tp->nvram_pagesize = 512; 14438 break; 14439 case FLASH_5752PAGE_SIZE_1K: 14440 tp->nvram_pagesize = 1024; 14441 break; 14442 case FLASH_5752PAGE_SIZE_2K: 14443 tp->nvram_pagesize = 2048; 14444 break; 14445 case FLASH_5752PAGE_SIZE_4K: 14446 tp->nvram_pagesize = 4096; 14447 break; 14448 case FLASH_5752PAGE_SIZE_264: 14449 tp->nvram_pagesize = 264; 14450 break; 14451 case FLASH_5752PAGE_SIZE_528: 14452 tp->nvram_pagesize = 528; 14453 break; 14454 } 14455 } 14456 14457 static void tg3_get_5752_nvram_info(struct tg3 *tp) 14458 { 14459 u32 nvcfg1; 14460 14461 nvcfg1 = tr32(NVRAM_CFG1); 14462 14463 /* NVRAM protection for TPM */ 14464 if (nvcfg1 & (1 << 27)) 14465 tg3_flag_set(tp, PROTECTED_NVRAM); 14466 14467 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14468 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 14469 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: 14470 tp->nvram_jedecnum = JEDEC_ATMEL; 14471 tg3_flag_set(tp, NVRAM_BUFFERED); 14472 break; 14473 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14474 tp->nvram_jedecnum = JEDEC_ATMEL; 14475 tg3_flag_set(tp, NVRAM_BUFFERED); 14476 tg3_flag_set(tp, FLASH); 14477 break; 14478 case FLASH_5752VENDOR_ST_M45PE10: 14479 case FLASH_5752VENDOR_ST_M45PE20: 14480 case FLASH_5752VENDOR_ST_M45PE40: 14481 tp->nvram_jedecnum = JEDEC_ST; 14482 tg3_flag_set(tp, NVRAM_BUFFERED); 14483 tg3_flag_set(tp, FLASH); 14484 break; 14485 } 14486 14487 if (tg3_flag(tp, FLASH)) { 14488 tg3_nvram_get_pagesize(tp, nvcfg1); 14489 } else { 14490 /* For eeprom, set pagesize to maximum eeprom size */ 14491 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14492 14493 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14494 tw32(NVRAM_CFG1, nvcfg1); 14495 } 14496 } 14497 14498 static void tg3_get_5755_nvram_info(struct tg3 *tp) 14499 { 14500 u32 nvcfg1, protect = 0; 14501 14502 nvcfg1 = tr32(NVRAM_CFG1); 14503 14504 /* NVRAM protection for TPM */ 14505 if (nvcfg1 & (1 << 27)) { 14506 tg3_flag_set(tp, PROTECTED_NVRAM); 14507 protect = 1; 14508 } 14509 14510 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14511 switch (nvcfg1) { 14512 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14513 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14514 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14515 case FLASH_5755VENDOR_ATMEL_FLASH_5: 14516 tp->nvram_jedecnum = JEDEC_ATMEL; 14517 tg3_flag_set(tp, NVRAM_BUFFERED); 14518 tg3_flag_set(tp, FLASH); 14519 tp->nvram_pagesize = 264; 14520 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || 14521 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) 14522 tp->nvram_size = (protect ? 0x3e200 : 14523 TG3_NVRAM_SIZE_512KB); 14524 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) 14525 tp->nvram_size = (protect ? 0x1f200 : 14526 TG3_NVRAM_SIZE_256KB); 14527 else 14528 tp->nvram_size = (protect ? 0x1f200 : 14529 TG3_NVRAM_SIZE_128KB); 14530 break; 14531 case FLASH_5752VENDOR_ST_M45PE10: 14532 case FLASH_5752VENDOR_ST_M45PE20: 14533 case FLASH_5752VENDOR_ST_M45PE40: 14534 tp->nvram_jedecnum = JEDEC_ST; 14535 tg3_flag_set(tp, NVRAM_BUFFERED); 14536 tg3_flag_set(tp, FLASH); 14537 tp->nvram_pagesize = 256; 14538 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) 14539 tp->nvram_size = (protect ? 14540 TG3_NVRAM_SIZE_64KB : 14541 TG3_NVRAM_SIZE_128KB); 14542 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) 14543 tp->nvram_size = (protect ? 14544 TG3_NVRAM_SIZE_64KB : 14545 TG3_NVRAM_SIZE_256KB); 14546 else 14547 tp->nvram_size = (protect ? 14548 TG3_NVRAM_SIZE_128KB : 14549 TG3_NVRAM_SIZE_512KB); 14550 break; 14551 } 14552 } 14553 14554 static void tg3_get_5787_nvram_info(struct tg3 *tp) 14555 { 14556 u32 nvcfg1; 14557 14558 nvcfg1 = tr32(NVRAM_CFG1); 14559 14560 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14561 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: 14562 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14563 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: 14564 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14565 tp->nvram_jedecnum = JEDEC_ATMEL; 14566 tg3_flag_set(tp, NVRAM_BUFFERED); 14567 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14568 14569 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14570 tw32(NVRAM_CFG1, nvcfg1); 14571 break; 14572 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14573 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14574 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14575 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14576 tp->nvram_jedecnum = JEDEC_ATMEL; 14577 tg3_flag_set(tp, NVRAM_BUFFERED); 14578 tg3_flag_set(tp, FLASH); 14579 tp->nvram_pagesize = 264; 14580 break; 14581 case FLASH_5752VENDOR_ST_M45PE10: 14582 case FLASH_5752VENDOR_ST_M45PE20: 14583 case FLASH_5752VENDOR_ST_M45PE40: 14584 tp->nvram_jedecnum = JEDEC_ST; 14585 tg3_flag_set(tp, NVRAM_BUFFERED); 14586 tg3_flag_set(tp, FLASH); 14587 tp->nvram_pagesize = 256; 14588 break; 14589 } 14590 } 14591 14592 static void tg3_get_5761_nvram_info(struct tg3 *tp) 14593 { 14594 u32 nvcfg1, protect = 0; 14595 14596 nvcfg1 = tr32(NVRAM_CFG1); 14597 14598 /* NVRAM protection for TPM */ 14599 if (nvcfg1 & (1 << 27)) { 14600 tg3_flag_set(tp, PROTECTED_NVRAM); 14601 protect = 1; 14602 } 14603 14604 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14605 switch (nvcfg1) { 14606 case FLASH_5761VENDOR_ATMEL_ADB021D: 14607 case FLASH_5761VENDOR_ATMEL_ADB041D: 14608 case FLASH_5761VENDOR_ATMEL_ADB081D: 14609 case FLASH_5761VENDOR_ATMEL_ADB161D: 14610 case FLASH_5761VENDOR_ATMEL_MDB021D: 14611 case FLASH_5761VENDOR_ATMEL_MDB041D: 14612 case FLASH_5761VENDOR_ATMEL_MDB081D: 14613 case FLASH_5761VENDOR_ATMEL_MDB161D: 14614 tp->nvram_jedecnum = JEDEC_ATMEL; 14615 tg3_flag_set(tp, NVRAM_BUFFERED); 14616 tg3_flag_set(tp, FLASH); 14617 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14618 tp->nvram_pagesize = 256; 14619 break; 14620 case FLASH_5761VENDOR_ST_A_M45PE20: 14621 case FLASH_5761VENDOR_ST_A_M45PE40: 14622 case FLASH_5761VENDOR_ST_A_M45PE80: 14623 case FLASH_5761VENDOR_ST_A_M45PE16: 14624 case FLASH_5761VENDOR_ST_M_M45PE20: 14625 case FLASH_5761VENDOR_ST_M_M45PE40: 14626 case FLASH_5761VENDOR_ST_M_M45PE80: 14627 case FLASH_5761VENDOR_ST_M_M45PE16: 14628 tp->nvram_jedecnum = JEDEC_ST; 14629 tg3_flag_set(tp, NVRAM_BUFFERED); 14630 tg3_flag_set(tp, FLASH); 14631 tp->nvram_pagesize = 256; 14632 break; 14633 } 14634 14635 if (protect) { 14636 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); 14637 } else { 14638 switch (nvcfg1) { 14639 case FLASH_5761VENDOR_ATMEL_ADB161D: 14640 case FLASH_5761VENDOR_ATMEL_MDB161D: 14641 case FLASH_5761VENDOR_ST_A_M45PE16: 14642 case FLASH_5761VENDOR_ST_M_M45PE16: 14643 tp->nvram_size = TG3_NVRAM_SIZE_2MB; 14644 break; 14645 case FLASH_5761VENDOR_ATMEL_ADB081D: 14646 case FLASH_5761VENDOR_ATMEL_MDB081D: 14647 case FLASH_5761VENDOR_ST_A_M45PE80: 14648 case FLASH_5761VENDOR_ST_M_M45PE80: 14649 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14650 break; 14651 case FLASH_5761VENDOR_ATMEL_ADB041D: 14652 case FLASH_5761VENDOR_ATMEL_MDB041D: 14653 case FLASH_5761VENDOR_ST_A_M45PE40: 14654 case FLASH_5761VENDOR_ST_M_M45PE40: 14655 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14656 break; 14657 case FLASH_5761VENDOR_ATMEL_ADB021D: 14658 case FLASH_5761VENDOR_ATMEL_MDB021D: 14659 case FLASH_5761VENDOR_ST_A_M45PE20: 14660 case FLASH_5761VENDOR_ST_M_M45PE20: 14661 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14662 break; 14663 } 14664 } 14665 } 14666 14667 static void tg3_get_5906_nvram_info(struct tg3 *tp) 14668 { 14669 tp->nvram_jedecnum = JEDEC_ATMEL; 14670 tg3_flag_set(tp, NVRAM_BUFFERED); 14671 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14672 } 14673 14674 static void tg3_get_57780_nvram_info(struct tg3 *tp) 14675 { 14676 u32 nvcfg1; 14677 14678 nvcfg1 = tr32(NVRAM_CFG1); 14679 14680 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14681 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14682 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14683 tp->nvram_jedecnum = JEDEC_ATMEL; 14684 tg3_flag_set(tp, NVRAM_BUFFERED); 14685 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14686 14687 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14688 tw32(NVRAM_CFG1, nvcfg1); 14689 return; 14690 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14691 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14692 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14693 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14694 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14695 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14696 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14697 tp->nvram_jedecnum = JEDEC_ATMEL; 14698 tg3_flag_set(tp, NVRAM_BUFFERED); 14699 tg3_flag_set(tp, FLASH); 14700 14701 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14702 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14703 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14704 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14705 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14706 break; 14707 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14708 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14709 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14710 break; 14711 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14712 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14713 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14714 break; 14715 } 14716 break; 14717 case FLASH_5752VENDOR_ST_M45PE10: 14718 case FLASH_5752VENDOR_ST_M45PE20: 14719 case FLASH_5752VENDOR_ST_M45PE40: 14720 tp->nvram_jedecnum = JEDEC_ST; 14721 tg3_flag_set(tp, NVRAM_BUFFERED); 14722 tg3_flag_set(tp, FLASH); 14723 14724 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14725 case FLASH_5752VENDOR_ST_M45PE10: 14726 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14727 break; 14728 case FLASH_5752VENDOR_ST_M45PE20: 14729 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14730 break; 14731 case FLASH_5752VENDOR_ST_M45PE40: 14732 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14733 break; 14734 } 14735 break; 14736 default: 14737 tg3_flag_set(tp, NO_NVRAM); 14738 return; 14739 } 14740 14741 tg3_nvram_get_pagesize(tp, nvcfg1); 14742 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14743 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14744 } 14745 14746 14747 static void tg3_get_5717_nvram_info(struct tg3 *tp) 14748 { 14749 u32 nvcfg1; 14750 14751 nvcfg1 = tr32(NVRAM_CFG1); 14752 14753 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14754 case FLASH_5717VENDOR_ATMEL_EEPROM: 14755 case FLASH_5717VENDOR_MICRO_EEPROM: 14756 tp->nvram_jedecnum = JEDEC_ATMEL; 14757 tg3_flag_set(tp, NVRAM_BUFFERED); 14758 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14759 14760 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14761 tw32(NVRAM_CFG1, nvcfg1); 14762 return; 14763 case FLASH_5717VENDOR_ATMEL_MDB011D: 14764 case FLASH_5717VENDOR_ATMEL_ADB011B: 14765 case FLASH_5717VENDOR_ATMEL_ADB011D: 14766 case FLASH_5717VENDOR_ATMEL_MDB021D: 14767 case FLASH_5717VENDOR_ATMEL_ADB021B: 14768 case FLASH_5717VENDOR_ATMEL_ADB021D: 14769 case FLASH_5717VENDOR_ATMEL_45USPT: 14770 tp->nvram_jedecnum = JEDEC_ATMEL; 14771 tg3_flag_set(tp, NVRAM_BUFFERED); 14772 tg3_flag_set(tp, FLASH); 14773 14774 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14775 case FLASH_5717VENDOR_ATMEL_MDB021D: 14776 /* Detect size with tg3_nvram_get_size() */ 14777 break; 14778 case FLASH_5717VENDOR_ATMEL_ADB021B: 14779 case FLASH_5717VENDOR_ATMEL_ADB021D: 14780 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14781 break; 14782 default: 14783 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14784 break; 14785 } 14786 break; 14787 case FLASH_5717VENDOR_ST_M_M25PE10: 14788 case FLASH_5717VENDOR_ST_A_M25PE10: 14789 case FLASH_5717VENDOR_ST_M_M45PE10: 14790 case FLASH_5717VENDOR_ST_A_M45PE10: 14791 case FLASH_5717VENDOR_ST_M_M25PE20: 14792 case FLASH_5717VENDOR_ST_A_M25PE20: 14793 case FLASH_5717VENDOR_ST_M_M45PE20: 14794 case FLASH_5717VENDOR_ST_A_M45PE20: 14795 case FLASH_5717VENDOR_ST_25USPT: 14796 case FLASH_5717VENDOR_ST_45USPT: 14797 tp->nvram_jedecnum = JEDEC_ST; 14798 tg3_flag_set(tp, NVRAM_BUFFERED); 14799 tg3_flag_set(tp, FLASH); 14800 14801 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14802 case FLASH_5717VENDOR_ST_M_M25PE20: 14803 case FLASH_5717VENDOR_ST_M_M45PE20: 14804 /* Detect size with tg3_nvram_get_size() */ 14805 break; 14806 case FLASH_5717VENDOR_ST_A_M25PE20: 14807 case FLASH_5717VENDOR_ST_A_M45PE20: 14808 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14809 break; 14810 default: 14811 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14812 break; 14813 } 14814 break; 14815 default: 14816 tg3_flag_set(tp, NO_NVRAM); 14817 return; 14818 } 14819 14820 tg3_nvram_get_pagesize(tp, nvcfg1); 14821 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14822 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14823 } 14824 14825 static void tg3_get_5720_nvram_info(struct tg3 *tp) 14826 { 14827 u32 nvcfg1, nvmpinstrp, nv_status; 14828 14829 nvcfg1 = tr32(NVRAM_CFG1); 14830 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; 14831 14832 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14833 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) { 14834 tg3_flag_set(tp, NO_NVRAM); 14835 return; 14836 } 14837 14838 switch (nvmpinstrp) { 14839 case FLASH_5762_MX25L_100: 14840 case FLASH_5762_MX25L_200: 14841 case FLASH_5762_MX25L_400: 14842 case FLASH_5762_MX25L_800: 14843 case FLASH_5762_MX25L_160_320: 14844 tp->nvram_pagesize = 4096; 14845 tp->nvram_jedecnum = JEDEC_MACRONIX; 14846 tg3_flag_set(tp, NVRAM_BUFFERED); 14847 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14848 tg3_flag_set(tp, FLASH); 14849 nv_status = tr32(NVRAM_AUTOSENSE_STATUS); 14850 tp->nvram_size = 14851 (1 << (nv_status >> AUTOSENSE_DEVID & 14852 AUTOSENSE_DEVID_MASK) 14853 << AUTOSENSE_SIZE_IN_MB); 14854 return; 14855 14856 case FLASH_5762_EEPROM_HD: 14857 nvmpinstrp = FLASH_5720_EEPROM_HD; 14858 break; 14859 case FLASH_5762_EEPROM_LD: 14860 nvmpinstrp = FLASH_5720_EEPROM_LD; 14861 break; 14862 case FLASH_5720VENDOR_M_ST_M45PE20: 14863 /* This pinstrap supports multiple sizes, so force it 14864 * to read the actual size from location 0xf0. 14865 */ 14866 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT; 14867 break; 14868 } 14869 } 14870 14871 switch (nvmpinstrp) { 14872 case FLASH_5720_EEPROM_HD: 14873 case FLASH_5720_EEPROM_LD: 14874 tp->nvram_jedecnum = JEDEC_ATMEL; 14875 tg3_flag_set(tp, NVRAM_BUFFERED); 14876 14877 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14878 tw32(NVRAM_CFG1, nvcfg1); 14879 if (nvmpinstrp == FLASH_5720_EEPROM_HD) 14880 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14881 else 14882 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; 14883 return; 14884 case FLASH_5720VENDOR_M_ATMEL_DB011D: 14885 case FLASH_5720VENDOR_A_ATMEL_DB011B: 14886 case FLASH_5720VENDOR_A_ATMEL_DB011D: 14887 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14888 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14889 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14890 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14891 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14892 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14893 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14894 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14895 case FLASH_5720VENDOR_ATMEL_45USPT: 14896 tp->nvram_jedecnum = JEDEC_ATMEL; 14897 tg3_flag_set(tp, NVRAM_BUFFERED); 14898 tg3_flag_set(tp, FLASH); 14899 14900 switch (nvmpinstrp) { 14901 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14902 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14903 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14904 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14905 break; 14906 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14907 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14908 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14909 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14910 break; 14911 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14912 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14913 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14914 break; 14915 default: 14916 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14917 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14918 break; 14919 } 14920 break; 14921 case FLASH_5720VENDOR_M_ST_M25PE10: 14922 case FLASH_5720VENDOR_M_ST_M45PE10: 14923 case FLASH_5720VENDOR_A_ST_M25PE10: 14924 case FLASH_5720VENDOR_A_ST_M45PE10: 14925 case FLASH_5720VENDOR_M_ST_M25PE20: 14926 case FLASH_5720VENDOR_M_ST_M45PE20: 14927 case FLASH_5720VENDOR_A_ST_M25PE20: 14928 case FLASH_5720VENDOR_A_ST_M45PE20: 14929 case FLASH_5720VENDOR_M_ST_M25PE40: 14930 case FLASH_5720VENDOR_M_ST_M45PE40: 14931 case FLASH_5720VENDOR_A_ST_M25PE40: 14932 case FLASH_5720VENDOR_A_ST_M45PE40: 14933 case FLASH_5720VENDOR_M_ST_M25PE80: 14934 case FLASH_5720VENDOR_M_ST_M45PE80: 14935 case FLASH_5720VENDOR_A_ST_M25PE80: 14936 case FLASH_5720VENDOR_A_ST_M45PE80: 14937 case FLASH_5720VENDOR_ST_25USPT: 14938 case FLASH_5720VENDOR_ST_45USPT: 14939 tp->nvram_jedecnum = JEDEC_ST; 14940 tg3_flag_set(tp, NVRAM_BUFFERED); 14941 tg3_flag_set(tp, FLASH); 14942 14943 switch (nvmpinstrp) { 14944 case FLASH_5720VENDOR_M_ST_M25PE20: 14945 case FLASH_5720VENDOR_M_ST_M45PE20: 14946 case FLASH_5720VENDOR_A_ST_M25PE20: 14947 case FLASH_5720VENDOR_A_ST_M45PE20: 14948 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14949 break; 14950 case FLASH_5720VENDOR_M_ST_M25PE40: 14951 case FLASH_5720VENDOR_M_ST_M45PE40: 14952 case FLASH_5720VENDOR_A_ST_M25PE40: 14953 case FLASH_5720VENDOR_A_ST_M45PE40: 14954 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14955 break; 14956 case FLASH_5720VENDOR_M_ST_M25PE80: 14957 case FLASH_5720VENDOR_M_ST_M45PE80: 14958 case FLASH_5720VENDOR_A_ST_M25PE80: 14959 case FLASH_5720VENDOR_A_ST_M45PE80: 14960 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14961 break; 14962 default: 14963 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14964 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14965 break; 14966 } 14967 break; 14968 default: 14969 tg3_flag_set(tp, NO_NVRAM); 14970 return; 14971 } 14972 14973 tg3_nvram_get_pagesize(tp, nvcfg1); 14974 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14975 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14976 14977 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14978 u32 val; 14979 14980 if (tg3_nvram_read(tp, 0, &val)) 14981 return; 14982 14983 if (val != TG3_EEPROM_MAGIC && 14984 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) 14985 tg3_flag_set(tp, NO_NVRAM); 14986 } 14987 } 14988 14989 /* Chips other than 5700/5701 use the NVRAM for fetching info. */ 14990 static void tg3_nvram_init(struct tg3 *tp) 14991 { 14992 if (tg3_flag(tp, IS_SSB_CORE)) { 14993 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */ 14994 tg3_flag_clear(tp, NVRAM); 14995 tg3_flag_clear(tp, NVRAM_BUFFERED); 14996 tg3_flag_set(tp, NO_NVRAM); 14997 return; 14998 } 14999 15000 tw32_f(GRC_EEPROM_ADDR, 15001 (EEPROM_ADDR_FSM_RESET | 15002 (EEPROM_DEFAULT_CLOCK_PERIOD << 15003 EEPROM_ADDR_CLKPERD_SHIFT))); 15004 15005 msleep(1); 15006 15007 /* Enable seeprom accesses. */ 15008 tw32_f(GRC_LOCAL_CTRL, 15009 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); 15010 udelay(100); 15011 15012 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 15013 tg3_asic_rev(tp) != ASIC_REV_5701) { 15014 tg3_flag_set(tp, NVRAM); 15015 15016 if (tg3_nvram_lock(tp)) { 15017 netdev_warn(tp->dev, 15018 "Cannot get nvram lock, %s failed\n", 15019 __func__); 15020 return; 15021 } 15022 tg3_enable_nvram_access(tp); 15023 15024 tp->nvram_size = 0; 15025 15026 if (tg3_asic_rev(tp) == ASIC_REV_5752) 15027 tg3_get_5752_nvram_info(tp); 15028 else if (tg3_asic_rev(tp) == ASIC_REV_5755) 15029 tg3_get_5755_nvram_info(tp); 15030 else if (tg3_asic_rev(tp) == ASIC_REV_5787 || 15031 tg3_asic_rev(tp) == ASIC_REV_5784 || 15032 tg3_asic_rev(tp) == ASIC_REV_5785) 15033 tg3_get_5787_nvram_info(tp); 15034 else if (tg3_asic_rev(tp) == ASIC_REV_5761) 15035 tg3_get_5761_nvram_info(tp); 15036 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 15037 tg3_get_5906_nvram_info(tp); 15038 else if (tg3_asic_rev(tp) == ASIC_REV_57780 || 15039 tg3_flag(tp, 57765_CLASS)) 15040 tg3_get_57780_nvram_info(tp); 15041 else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 15042 tg3_asic_rev(tp) == ASIC_REV_5719) 15043 tg3_get_5717_nvram_info(tp); 15044 else if (tg3_asic_rev(tp) == ASIC_REV_5720 || 15045 tg3_asic_rev(tp) == ASIC_REV_5762) 15046 tg3_get_5720_nvram_info(tp); 15047 else 15048 tg3_get_nvram_info(tp); 15049 15050 if (tp->nvram_size == 0) 15051 tg3_get_nvram_size(tp); 15052 15053 tg3_disable_nvram_access(tp); 15054 tg3_nvram_unlock(tp); 15055 15056 } else { 15057 tg3_flag_clear(tp, NVRAM); 15058 tg3_flag_clear(tp, NVRAM_BUFFERED); 15059 15060 tg3_get_eeprom_size(tp); 15061 } 15062 } 15063 15064 struct subsys_tbl_ent { 15065 u16 subsys_vendor, subsys_devid; 15066 u32 phy_id; 15067 }; 15068 15069 static struct subsys_tbl_ent subsys_id_to_phy_id[] = { 15070 /* Broadcom boards. */ 15071 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15072 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, 15073 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15074 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, 15075 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15076 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, 15077 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15078 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, 15079 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15080 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, 15081 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15082 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, 15083 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15084 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, 15085 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15086 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, 15087 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15088 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, 15089 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15090 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, 15091 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15092 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, 15093 15094 /* 3com boards. */ 15095 { TG3PCI_SUBVENDOR_ID_3COM, 15096 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, 15097 { TG3PCI_SUBVENDOR_ID_3COM, 15098 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, 15099 { TG3PCI_SUBVENDOR_ID_3COM, 15100 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, 15101 { TG3PCI_SUBVENDOR_ID_3COM, 15102 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, 15103 { TG3PCI_SUBVENDOR_ID_3COM, 15104 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, 15105 15106 /* DELL boards. */ 15107 { TG3PCI_SUBVENDOR_ID_DELL, 15108 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, 15109 { TG3PCI_SUBVENDOR_ID_DELL, 15110 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, 15111 { TG3PCI_SUBVENDOR_ID_DELL, 15112 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, 15113 { TG3PCI_SUBVENDOR_ID_DELL, 15114 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, 15115 15116 /* Compaq boards. */ 15117 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15118 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, 15119 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15120 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, 15121 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15122 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, 15123 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15124 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, 15125 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15126 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, 15127 15128 /* IBM boards. */ 15129 { TG3PCI_SUBVENDOR_ID_IBM, 15130 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } 15131 }; 15132 15133 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp) 15134 { 15135 int i; 15136 15137 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { 15138 if ((subsys_id_to_phy_id[i].subsys_vendor == 15139 tp->pdev->subsystem_vendor) && 15140 (subsys_id_to_phy_id[i].subsys_devid == 15141 tp->pdev->subsystem_device)) 15142 return &subsys_id_to_phy_id[i]; 15143 } 15144 return NULL; 15145 } 15146 15147 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp) 15148 { 15149 u32 val; 15150 15151 tp->phy_id = TG3_PHY_ID_INVALID; 15152 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15153 15154 /* Assume an onboard device and WOL capable by default. */ 15155 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15156 tg3_flag_set(tp, WOL_CAP); 15157 15158 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15159 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { 15160 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15161 tg3_flag_set(tp, IS_NIC); 15162 } 15163 val = tr32(VCPU_CFGSHDW); 15164 if (val & VCPU_CFGSHDW_ASPM_DBNC) 15165 tg3_flag_set(tp, ASPM_WORKAROUND); 15166 if ((val & VCPU_CFGSHDW_WOL_ENABLE) && 15167 (val & VCPU_CFGSHDW_WOL_MAGPKT)) { 15168 tg3_flag_set(tp, WOL_ENABLE); 15169 device_set_wakeup_enable(&tp->pdev->dev, true); 15170 } 15171 goto done; 15172 } 15173 15174 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 15175 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 15176 u32 nic_cfg, led_cfg; 15177 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0; 15178 u32 nic_phy_id, ver, eeprom_phy_id; 15179 int eeprom_phy_serdes = 0; 15180 15181 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 15182 tp->nic_sram_data_cfg = nic_cfg; 15183 15184 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); 15185 ver >>= NIC_SRAM_DATA_VER_SHIFT; 15186 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 15187 tg3_asic_rev(tp) != ASIC_REV_5701 && 15188 tg3_asic_rev(tp) != ASIC_REV_5703 && 15189 (ver > 0) && (ver < 0x100)) 15190 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); 15191 15192 if (tg3_asic_rev(tp) == ASIC_REV_5785) 15193 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); 15194 15195 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 15196 tg3_asic_rev(tp) == ASIC_REV_5719 || 15197 tg3_asic_rev(tp) == ASIC_REV_5720) 15198 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5); 15199 15200 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == 15201 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) 15202 eeprom_phy_serdes = 1; 15203 15204 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); 15205 if (nic_phy_id != 0) { 15206 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; 15207 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; 15208 15209 eeprom_phy_id = (id1 >> 16) << 10; 15210 eeprom_phy_id |= (id2 & 0xfc00) << 16; 15211 eeprom_phy_id |= (id2 & 0x03ff) << 0; 15212 } else 15213 eeprom_phy_id = 0; 15214 15215 tp->phy_id = eeprom_phy_id; 15216 if (eeprom_phy_serdes) { 15217 if (!tg3_flag(tp, 5705_PLUS)) 15218 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15219 else 15220 tp->phy_flags |= TG3_PHYFLG_MII_SERDES; 15221 } 15222 15223 if (tg3_flag(tp, 5750_PLUS)) 15224 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | 15225 SHASTA_EXT_LED_MODE_MASK); 15226 else 15227 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; 15228 15229 switch (led_cfg) { 15230 default: 15231 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: 15232 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15233 break; 15234 15235 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: 15236 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15237 break; 15238 15239 case NIC_SRAM_DATA_CFG_LED_MODE_MAC: 15240 tp->led_ctrl = LED_CTRL_MODE_MAC; 15241 15242 /* Default to PHY_1_MODE if 0 (MAC_MODE) is 15243 * read on some older 5700/5701 bootcode. 15244 */ 15245 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 15246 tg3_asic_rev(tp) == ASIC_REV_5701) 15247 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15248 15249 break; 15250 15251 case SHASTA_EXT_LED_SHARED: 15252 tp->led_ctrl = LED_CTRL_MODE_SHARED; 15253 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 15254 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1) 15255 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15256 LED_CTRL_MODE_PHY_2); 15257 15258 if (tg3_flag(tp, 5717_PLUS) || 15259 tg3_asic_rev(tp) == ASIC_REV_5762) 15260 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE | 15261 LED_CTRL_BLINK_RATE_MASK; 15262 15263 break; 15264 15265 case SHASTA_EXT_LED_MAC: 15266 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; 15267 break; 15268 15269 case SHASTA_EXT_LED_COMBO: 15270 tp->led_ctrl = LED_CTRL_MODE_COMBO; 15271 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) 15272 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15273 LED_CTRL_MODE_PHY_2); 15274 break; 15275 15276 } 15277 15278 if ((tg3_asic_rev(tp) == ASIC_REV_5700 || 15279 tg3_asic_rev(tp) == ASIC_REV_5701) && 15280 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) 15281 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15282 15283 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) 15284 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15285 15286 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { 15287 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15288 if ((tp->pdev->subsystem_vendor == 15289 PCI_VENDOR_ID_ARIMA) && 15290 (tp->pdev->subsystem_device == 0x205a || 15291 tp->pdev->subsystem_device == 0x2063)) 15292 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15293 } else { 15294 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15295 tg3_flag_set(tp, IS_NIC); 15296 } 15297 15298 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 15299 tg3_flag_set(tp, ENABLE_ASF); 15300 if (tg3_flag(tp, 5750_PLUS)) 15301 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 15302 } 15303 15304 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && 15305 tg3_flag(tp, 5750_PLUS)) 15306 tg3_flag_set(tp, ENABLE_APE); 15307 15308 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && 15309 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) 15310 tg3_flag_clear(tp, WOL_CAP); 15311 15312 if (tg3_flag(tp, WOL_CAP) && 15313 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { 15314 tg3_flag_set(tp, WOL_ENABLE); 15315 device_set_wakeup_enable(&tp->pdev->dev, true); 15316 } 15317 15318 if (cfg2 & (1 << 17)) 15319 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; 15320 15321 /* serdes signal pre-emphasis in register 0x590 set by */ 15322 /* bootcode if bit 18 is set */ 15323 if (cfg2 & (1 << 18)) 15324 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; 15325 15326 if ((tg3_flag(tp, 57765_PLUS) || 15327 (tg3_asic_rev(tp) == ASIC_REV_5784 && 15328 tg3_chip_rev(tp) != CHIPREV_5784_AX)) && 15329 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 15330 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; 15331 15332 if (tg3_flag(tp, PCI_EXPRESS)) { 15333 u32 cfg3; 15334 15335 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); 15336 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 15337 !tg3_flag(tp, 57765_PLUS) && 15338 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)) 15339 tg3_flag_set(tp, ASPM_WORKAROUND); 15340 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID) 15341 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 15342 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK) 15343 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 15344 } 15345 15346 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) 15347 tg3_flag_set(tp, RGMII_INBAND_DISABLE); 15348 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) 15349 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); 15350 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) 15351 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); 15352 15353 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV) 15354 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV; 15355 } 15356 done: 15357 if (tg3_flag(tp, WOL_CAP)) 15358 device_set_wakeup_enable(&tp->pdev->dev, 15359 tg3_flag(tp, WOL_ENABLE)); 15360 else 15361 device_set_wakeup_capable(&tp->pdev->dev, false); 15362 } 15363 15364 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val) 15365 { 15366 int i, err; 15367 u32 val2, off = offset * 8; 15368 15369 err = tg3_nvram_lock(tp); 15370 if (err) 15371 return err; 15372 15373 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE); 15374 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN | 15375 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START); 15376 tg3_ape_read32(tp, TG3_APE_OTP_CTRL); 15377 udelay(10); 15378 15379 for (i = 0; i < 100; i++) { 15380 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS); 15381 if (val2 & APE_OTP_STATUS_CMD_DONE) { 15382 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA); 15383 break; 15384 } 15385 udelay(10); 15386 } 15387 15388 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0); 15389 15390 tg3_nvram_unlock(tp); 15391 if (val2 & APE_OTP_STATUS_CMD_DONE) 15392 return 0; 15393 15394 return -EBUSY; 15395 } 15396 15397 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd) 15398 { 15399 int i; 15400 u32 val; 15401 15402 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); 15403 tw32(OTP_CTRL, cmd); 15404 15405 /* Wait for up to 1 ms for command to execute. */ 15406 for (i = 0; i < 100; i++) { 15407 val = tr32(OTP_STATUS); 15408 if (val & OTP_STATUS_CMD_DONE) 15409 break; 15410 udelay(10); 15411 } 15412 15413 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; 15414 } 15415 15416 /* Read the gphy configuration from the OTP region of the chip. The gphy 15417 * configuration is a 32-bit value that straddles the alignment boundary. 15418 * We do two 32-bit reads and then shift and merge the results. 15419 */ 15420 static u32 tg3_read_otp_phycfg(struct tg3 *tp) 15421 { 15422 u32 bhalf_otp, thalf_otp; 15423 15424 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); 15425 15426 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) 15427 return 0; 15428 15429 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); 15430 15431 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15432 return 0; 15433 15434 thalf_otp = tr32(OTP_READ_DATA); 15435 15436 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); 15437 15438 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15439 return 0; 15440 15441 bhalf_otp = tr32(OTP_READ_DATA); 15442 15443 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); 15444 } 15445 15446 static void tg3_phy_init_link_config(struct tg3 *tp) 15447 { 15448 u32 adv = ADVERTISED_Autoneg; 15449 15450 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 15451 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV)) 15452 adv |= ADVERTISED_1000baseT_Half; 15453 adv |= ADVERTISED_1000baseT_Full; 15454 } 15455 15456 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 15457 adv |= ADVERTISED_100baseT_Half | 15458 ADVERTISED_100baseT_Full | 15459 ADVERTISED_10baseT_Half | 15460 ADVERTISED_10baseT_Full | 15461 ADVERTISED_TP; 15462 else 15463 adv |= ADVERTISED_FIBRE; 15464 15465 tp->link_config.advertising = adv; 15466 tp->link_config.speed = SPEED_UNKNOWN; 15467 tp->link_config.duplex = DUPLEX_UNKNOWN; 15468 tp->link_config.autoneg = AUTONEG_ENABLE; 15469 tp->link_config.active_speed = SPEED_UNKNOWN; 15470 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 15471 15472 tp->old_link = -1; 15473 } 15474 15475 static int tg3_phy_probe(struct tg3 *tp) 15476 { 15477 u32 hw_phy_id_1, hw_phy_id_2; 15478 u32 hw_phy_id, hw_phy_id_masked; 15479 int err; 15480 15481 /* flow control autonegotiation is default behavior */ 15482 tg3_flag_set(tp, PAUSE_AUTONEG); 15483 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; 15484 15485 if (tg3_flag(tp, ENABLE_APE)) { 15486 switch (tp->pci_fn) { 15487 case 0: 15488 tp->phy_ape_lock = TG3_APE_LOCK_PHY0; 15489 break; 15490 case 1: 15491 tp->phy_ape_lock = TG3_APE_LOCK_PHY1; 15492 break; 15493 case 2: 15494 tp->phy_ape_lock = TG3_APE_LOCK_PHY2; 15495 break; 15496 case 3: 15497 tp->phy_ape_lock = TG3_APE_LOCK_PHY3; 15498 break; 15499 } 15500 } 15501 15502 if (!tg3_flag(tp, ENABLE_ASF) && 15503 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15504 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 15505 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 15506 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 15507 15508 if (tg3_flag(tp, USE_PHYLIB)) 15509 return tg3_phy_init(tp); 15510 15511 /* Reading the PHY ID register can conflict with ASF 15512 * firmware access to the PHY hardware. 15513 */ 15514 err = 0; 15515 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { 15516 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; 15517 } else { 15518 /* Now read the physical PHY_ID from the chip and verify 15519 * that it is sane. If it doesn't look good, we fall back 15520 * to either the hard-coded table based PHY_ID and failing 15521 * that the value found in the eeprom area. 15522 */ 15523 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); 15524 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); 15525 15526 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; 15527 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; 15528 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; 15529 15530 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; 15531 } 15532 15533 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { 15534 tp->phy_id = hw_phy_id; 15535 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) 15536 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15537 else 15538 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; 15539 } else { 15540 if (tp->phy_id != TG3_PHY_ID_INVALID) { 15541 /* Do nothing, phy ID already set up in 15542 * tg3_get_eeprom_hw_cfg(). 15543 */ 15544 } else { 15545 struct subsys_tbl_ent *p; 15546 15547 /* No eeprom signature? Try the hardcoded 15548 * subsys device table. 15549 */ 15550 p = tg3_lookup_by_subsys(tp); 15551 if (p) { 15552 tp->phy_id = p->phy_id; 15553 } else if (!tg3_flag(tp, IS_SSB_CORE)) { 15554 /* For now we saw the IDs 0xbc050cd0, 15555 * 0xbc050f80 and 0xbc050c30 on devices 15556 * connected to an BCM4785 and there are 15557 * probably more. Just assume that the phy is 15558 * supported when it is connected to a SSB core 15559 * for now. 15560 */ 15561 return -ENODEV; 15562 } 15563 15564 if (!tp->phy_id || 15565 tp->phy_id == TG3_PHY_ID_BCM8002) 15566 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15567 } 15568 } 15569 15570 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15571 (tg3_asic_rev(tp) == ASIC_REV_5719 || 15572 tg3_asic_rev(tp) == ASIC_REV_5720 || 15573 tg3_asic_rev(tp) == ASIC_REV_57766 || 15574 tg3_asic_rev(tp) == ASIC_REV_5762 || 15575 (tg3_asic_rev(tp) == ASIC_REV_5717 && 15576 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || 15577 (tg3_asic_rev(tp) == ASIC_REV_57765 && 15578 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) { 15579 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 15580 15581 tp->eee.supported = SUPPORTED_100baseT_Full | 15582 SUPPORTED_1000baseT_Full; 15583 tp->eee.advertised = ADVERTISED_100baseT_Full | 15584 ADVERTISED_1000baseT_Full; 15585 tp->eee.eee_enabled = 1; 15586 tp->eee.tx_lpi_enabled = 1; 15587 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; 15588 } 15589 15590 tg3_phy_init_link_config(tp); 15591 15592 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 15593 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15594 !tg3_flag(tp, ENABLE_APE) && 15595 !tg3_flag(tp, ENABLE_ASF)) { 15596 u32 bmsr, dummy; 15597 15598 tg3_readphy(tp, MII_BMSR, &bmsr); 15599 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 15600 (bmsr & BMSR_LSTATUS)) 15601 goto skip_phy_reset; 15602 15603 err = tg3_phy_reset(tp); 15604 if (err) 15605 return err; 15606 15607 tg3_phy_set_wirespeed(tp); 15608 15609 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) { 15610 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, 15611 tp->link_config.flowctrl); 15612 15613 tg3_writephy(tp, MII_BMCR, 15614 BMCR_ANENABLE | BMCR_ANRESTART); 15615 } 15616 } 15617 15618 skip_phy_reset: 15619 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 15620 err = tg3_init_5401phy_dsp(tp); 15621 if (err) 15622 return err; 15623 15624 err = tg3_init_5401phy_dsp(tp); 15625 } 15626 15627 return err; 15628 } 15629 15630 static void tg3_read_vpd(struct tg3 *tp) 15631 { 15632 u8 *vpd_data; 15633 unsigned int block_end, rosize, len; 15634 u32 vpdlen; 15635 int j, i = 0; 15636 15637 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); 15638 if (!vpd_data) 15639 goto out_no_vpd; 15640 15641 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA); 15642 if (i < 0) 15643 goto out_not_found; 15644 15645 rosize = pci_vpd_lrdt_size(&vpd_data[i]); 15646 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize; 15647 i += PCI_VPD_LRDT_TAG_SIZE; 15648 15649 if (block_end > vpdlen) 15650 goto out_not_found; 15651 15652 j = pci_vpd_find_info_keyword(vpd_data, i, rosize, 15653 PCI_VPD_RO_KEYWORD_MFR_ID); 15654 if (j > 0) { 15655 len = pci_vpd_info_field_size(&vpd_data[j]); 15656 15657 j += PCI_VPD_INFO_FLD_HDR_SIZE; 15658 if (j + len > block_end || len != 4 || 15659 memcmp(&vpd_data[j], "1028", 4)) 15660 goto partno; 15661 15662 j = pci_vpd_find_info_keyword(vpd_data, i, rosize, 15663 PCI_VPD_RO_KEYWORD_VENDOR0); 15664 if (j < 0) 15665 goto partno; 15666 15667 len = pci_vpd_info_field_size(&vpd_data[j]); 15668 15669 j += PCI_VPD_INFO_FLD_HDR_SIZE; 15670 if (j + len > block_end) 15671 goto partno; 15672 15673 if (len >= sizeof(tp->fw_ver)) 15674 len = sizeof(tp->fw_ver) - 1; 15675 memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); 15676 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, 15677 &vpd_data[j]); 15678 } 15679 15680 partno: 15681 i = pci_vpd_find_info_keyword(vpd_data, i, rosize, 15682 PCI_VPD_RO_KEYWORD_PARTNO); 15683 if (i < 0) 15684 goto out_not_found; 15685 15686 len = pci_vpd_info_field_size(&vpd_data[i]); 15687 15688 i += PCI_VPD_INFO_FLD_HDR_SIZE; 15689 if (len > TG3_BPN_SIZE || 15690 (len + i) > vpdlen) 15691 goto out_not_found; 15692 15693 memcpy(tp->board_part_number, &vpd_data[i], len); 15694 15695 out_not_found: 15696 kfree(vpd_data); 15697 if (tp->board_part_number[0]) 15698 return; 15699 15700 out_no_vpd: 15701 if (tg3_asic_rev(tp) == ASIC_REV_5717) { 15702 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 15703 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C) 15704 strcpy(tp->board_part_number, "BCM5717"); 15705 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) 15706 strcpy(tp->board_part_number, "BCM5718"); 15707 else 15708 goto nomatch; 15709 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) { 15710 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) 15711 strcpy(tp->board_part_number, "BCM57780"); 15712 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) 15713 strcpy(tp->board_part_number, "BCM57760"); 15714 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) 15715 strcpy(tp->board_part_number, "BCM57790"); 15716 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) 15717 strcpy(tp->board_part_number, "BCM57788"); 15718 else 15719 goto nomatch; 15720 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) { 15721 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) 15722 strcpy(tp->board_part_number, "BCM57761"); 15723 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) 15724 strcpy(tp->board_part_number, "BCM57765"); 15725 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) 15726 strcpy(tp->board_part_number, "BCM57781"); 15727 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) 15728 strcpy(tp->board_part_number, "BCM57785"); 15729 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) 15730 strcpy(tp->board_part_number, "BCM57791"); 15731 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) 15732 strcpy(tp->board_part_number, "BCM57795"); 15733 else 15734 goto nomatch; 15735 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) { 15736 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) 15737 strcpy(tp->board_part_number, "BCM57762"); 15738 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) 15739 strcpy(tp->board_part_number, "BCM57766"); 15740 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782) 15741 strcpy(tp->board_part_number, "BCM57782"); 15742 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 15743 strcpy(tp->board_part_number, "BCM57786"); 15744 else 15745 goto nomatch; 15746 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15747 strcpy(tp->board_part_number, "BCM95906"); 15748 } else { 15749 nomatch: 15750 strcpy(tp->board_part_number, "none"); 15751 } 15752 } 15753 15754 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) 15755 { 15756 u32 val; 15757 15758 if (tg3_nvram_read(tp, offset, &val) || 15759 (val & 0xfc000000) != 0x0c000000 || 15760 tg3_nvram_read(tp, offset + 4, &val) || 15761 val != 0) 15762 return 0; 15763 15764 return 1; 15765 } 15766 15767 static void tg3_read_bc_ver(struct tg3 *tp) 15768 { 15769 u32 val, offset, start, ver_offset; 15770 int i, dst_off; 15771 bool newver = false; 15772 15773 if (tg3_nvram_read(tp, 0xc, &offset) || 15774 tg3_nvram_read(tp, 0x4, &start)) 15775 return; 15776 15777 offset = tg3_nvram_logical_addr(tp, offset); 15778 15779 if (tg3_nvram_read(tp, offset, &val)) 15780 return; 15781 15782 if ((val & 0xfc000000) == 0x0c000000) { 15783 if (tg3_nvram_read(tp, offset + 4, &val)) 15784 return; 15785 15786 if (val == 0) 15787 newver = true; 15788 } 15789 15790 dst_off = strlen(tp->fw_ver); 15791 15792 if (newver) { 15793 if (TG3_VER_SIZE - dst_off < 16 || 15794 tg3_nvram_read(tp, offset + 8, &ver_offset)) 15795 return; 15796 15797 offset = offset + ver_offset - start; 15798 for (i = 0; i < 16; i += 4) { 15799 __be32 v; 15800 if (tg3_nvram_read_be32(tp, offset + i, &v)) 15801 return; 15802 15803 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); 15804 } 15805 } else { 15806 u32 major, minor; 15807 15808 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) 15809 return; 15810 15811 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> 15812 TG3_NVM_BCVER_MAJSFT; 15813 minor = ver_offset & TG3_NVM_BCVER_MINMSK; 15814 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, 15815 "v%d.%02d", major, minor); 15816 } 15817 } 15818 15819 static void tg3_read_hwsb_ver(struct tg3 *tp) 15820 { 15821 u32 val, major, minor; 15822 15823 /* Use native endian representation */ 15824 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) 15825 return; 15826 15827 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> 15828 TG3_NVM_HWSB_CFG1_MAJSFT; 15829 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> 15830 TG3_NVM_HWSB_CFG1_MINSFT; 15831 15832 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); 15833 } 15834 15835 static void tg3_read_sb_ver(struct tg3 *tp, u32 val) 15836 { 15837 u32 offset, major, minor, build; 15838 15839 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); 15840 15841 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) 15842 return; 15843 15844 switch (val & TG3_EEPROM_SB_REVISION_MASK) { 15845 case TG3_EEPROM_SB_REVISION_0: 15846 offset = TG3_EEPROM_SB_F1R0_EDH_OFF; 15847 break; 15848 case TG3_EEPROM_SB_REVISION_2: 15849 offset = TG3_EEPROM_SB_F1R2_EDH_OFF; 15850 break; 15851 case TG3_EEPROM_SB_REVISION_3: 15852 offset = TG3_EEPROM_SB_F1R3_EDH_OFF; 15853 break; 15854 case TG3_EEPROM_SB_REVISION_4: 15855 offset = TG3_EEPROM_SB_F1R4_EDH_OFF; 15856 break; 15857 case TG3_EEPROM_SB_REVISION_5: 15858 offset = TG3_EEPROM_SB_F1R5_EDH_OFF; 15859 break; 15860 case TG3_EEPROM_SB_REVISION_6: 15861 offset = TG3_EEPROM_SB_F1R6_EDH_OFF; 15862 break; 15863 default: 15864 return; 15865 } 15866 15867 if (tg3_nvram_read(tp, offset, &val)) 15868 return; 15869 15870 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> 15871 TG3_EEPROM_SB_EDH_BLD_SHFT; 15872 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> 15873 TG3_EEPROM_SB_EDH_MAJ_SHFT; 15874 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; 15875 15876 if (minor > 99 || build > 26) 15877 return; 15878 15879 offset = strlen(tp->fw_ver); 15880 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, 15881 " v%d.%02d", major, minor); 15882 15883 if (build > 0) { 15884 offset = strlen(tp->fw_ver); 15885 if (offset < TG3_VER_SIZE - 1) 15886 tp->fw_ver[offset] = 'a' + build - 1; 15887 } 15888 } 15889 15890 static void tg3_read_mgmtfw_ver(struct tg3 *tp) 15891 { 15892 u32 val, offset, start; 15893 int i, vlen; 15894 15895 for (offset = TG3_NVM_DIR_START; 15896 offset < TG3_NVM_DIR_END; 15897 offset += TG3_NVM_DIRENT_SIZE) { 15898 if (tg3_nvram_read(tp, offset, &val)) 15899 return; 15900 15901 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) 15902 break; 15903 } 15904 15905 if (offset == TG3_NVM_DIR_END) 15906 return; 15907 15908 if (!tg3_flag(tp, 5705_PLUS)) 15909 start = 0x08000000; 15910 else if (tg3_nvram_read(tp, offset - 4, &start)) 15911 return; 15912 15913 if (tg3_nvram_read(tp, offset + 4, &offset) || 15914 !tg3_fw_img_is_valid(tp, offset) || 15915 tg3_nvram_read(tp, offset + 8, &val)) 15916 return; 15917 15918 offset += val - start; 15919 15920 vlen = strlen(tp->fw_ver); 15921 15922 tp->fw_ver[vlen++] = ','; 15923 tp->fw_ver[vlen++] = ' '; 15924 15925 for (i = 0; i < 4; i++) { 15926 __be32 v; 15927 if (tg3_nvram_read_be32(tp, offset, &v)) 15928 return; 15929 15930 offset += sizeof(v); 15931 15932 if (vlen > TG3_VER_SIZE - sizeof(v)) { 15933 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); 15934 break; 15935 } 15936 15937 memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); 15938 vlen += sizeof(v); 15939 } 15940 } 15941 15942 static void tg3_probe_ncsi(struct tg3 *tp) 15943 { 15944 u32 apedata; 15945 15946 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 15947 if (apedata != APE_SEG_SIG_MAGIC) 15948 return; 15949 15950 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 15951 if (!(apedata & APE_FW_STATUS_READY)) 15952 return; 15953 15954 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) 15955 tg3_flag_set(tp, APE_HAS_NCSI); 15956 } 15957 15958 static void tg3_read_dash_ver(struct tg3 *tp) 15959 { 15960 int vlen; 15961 u32 apedata; 15962 char *fwtype; 15963 15964 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); 15965 15966 if (tg3_flag(tp, APE_HAS_NCSI)) 15967 fwtype = "NCSI"; 15968 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725) 15969 fwtype = "SMASH"; 15970 else 15971 fwtype = "DASH"; 15972 15973 vlen = strlen(tp->fw_ver); 15974 15975 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", 15976 fwtype, 15977 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, 15978 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, 15979 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, 15980 (apedata & APE_FW_VERSION_BLDMSK)); 15981 } 15982 15983 static void tg3_read_otp_ver(struct tg3 *tp) 15984 { 15985 u32 val, val2; 15986 15987 if (tg3_asic_rev(tp) != ASIC_REV_5762) 15988 return; 15989 15990 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) && 15991 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) && 15992 TG3_OTP_MAGIC0_VALID(val)) { 15993 u64 val64 = (u64) val << 32 | val2; 15994 u32 ver = 0; 15995 int i, vlen; 15996 15997 for (i = 0; i < 7; i++) { 15998 if ((val64 & 0xff) == 0) 15999 break; 16000 ver = val64 & 0xff; 16001 val64 >>= 8; 16002 } 16003 vlen = strlen(tp->fw_ver); 16004 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver); 16005 } 16006 } 16007 16008 static void tg3_read_fw_ver(struct tg3 *tp) 16009 { 16010 u32 val; 16011 bool vpd_vers = false; 16012 16013 if (tp->fw_ver[0] != 0) 16014 vpd_vers = true; 16015 16016 if (tg3_flag(tp, NO_NVRAM)) { 16017 strcat(tp->fw_ver, "sb"); 16018 tg3_read_otp_ver(tp); 16019 return; 16020 } 16021 16022 if (tg3_nvram_read(tp, 0, &val)) 16023 return; 16024 16025 if (val == TG3_EEPROM_MAGIC) 16026 tg3_read_bc_ver(tp); 16027 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) 16028 tg3_read_sb_ver(tp, val); 16029 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 16030 tg3_read_hwsb_ver(tp); 16031 16032 if (tg3_flag(tp, ENABLE_ASF)) { 16033 if (tg3_flag(tp, ENABLE_APE)) { 16034 tg3_probe_ncsi(tp); 16035 if (!vpd_vers) 16036 tg3_read_dash_ver(tp); 16037 } else if (!vpd_vers) { 16038 tg3_read_mgmtfw_ver(tp); 16039 } 16040 } 16041 16042 tp->fw_ver[TG3_VER_SIZE - 1] = 0; 16043 } 16044 16045 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) 16046 { 16047 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 16048 return TG3_RX_RET_MAX_SIZE_5717; 16049 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) 16050 return TG3_RX_RET_MAX_SIZE_5700; 16051 else 16052 return TG3_RX_RET_MAX_SIZE_5705; 16053 } 16054 16055 static const struct pci_device_id tg3_write_reorder_chipsets[] = { 16056 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 16057 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, 16058 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, 16059 { }, 16060 }; 16061 16062 static struct pci_dev *tg3_find_peer(struct tg3 *tp) 16063 { 16064 struct pci_dev *peer; 16065 unsigned int func, devnr = tp->pdev->devfn & ~7; 16066 16067 for (func = 0; func < 8; func++) { 16068 peer = pci_get_slot(tp->pdev->bus, devnr | func); 16069 if (peer && peer != tp->pdev) 16070 break; 16071 pci_dev_put(peer); 16072 } 16073 /* 5704 can be configured in single-port mode, set peer to 16074 * tp->pdev in that case. 16075 */ 16076 if (!peer) { 16077 peer = tp->pdev; 16078 return peer; 16079 } 16080 16081 /* 16082 * We don't need to keep the refcount elevated; there's no way 16083 * to remove one half of this device without removing the other 16084 */ 16085 pci_dev_put(peer); 16086 16087 return peer; 16088 } 16089 16090 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) 16091 { 16092 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT; 16093 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) { 16094 u32 reg; 16095 16096 /* All devices that use the alternate 16097 * ASIC REV location have a CPMU. 16098 */ 16099 tg3_flag_set(tp, CPMU_PRESENT); 16100 16101 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 16102 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 16103 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 16104 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 16105 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 16106 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 16107 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 16108 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 16109 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 16110 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 16111 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) 16112 reg = TG3PCI_GEN2_PRODID_ASICREV; 16113 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || 16114 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || 16115 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || 16116 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || 16117 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || 16118 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || 16119 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 || 16120 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 || 16121 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 || 16122 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 16123 reg = TG3PCI_GEN15_PRODID_ASICREV; 16124 else 16125 reg = TG3PCI_PRODID_ASICREV; 16126 16127 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id); 16128 } 16129 16130 /* Wrong chip ID in 5752 A0. This code can be removed later 16131 * as A0 is not in production. 16132 */ 16133 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW) 16134 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; 16135 16136 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0) 16137 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0; 16138 16139 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16140 tg3_asic_rev(tp) == ASIC_REV_5719 || 16141 tg3_asic_rev(tp) == ASIC_REV_5720) 16142 tg3_flag_set(tp, 5717_PLUS); 16143 16144 if (tg3_asic_rev(tp) == ASIC_REV_57765 || 16145 tg3_asic_rev(tp) == ASIC_REV_57766) 16146 tg3_flag_set(tp, 57765_CLASS); 16147 16148 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) || 16149 tg3_asic_rev(tp) == ASIC_REV_5762) 16150 tg3_flag_set(tp, 57765_PLUS); 16151 16152 /* Intentionally exclude ASIC_REV_5906 */ 16153 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16154 tg3_asic_rev(tp) == ASIC_REV_5787 || 16155 tg3_asic_rev(tp) == ASIC_REV_5784 || 16156 tg3_asic_rev(tp) == ASIC_REV_5761 || 16157 tg3_asic_rev(tp) == ASIC_REV_5785 || 16158 tg3_asic_rev(tp) == ASIC_REV_57780 || 16159 tg3_flag(tp, 57765_PLUS)) 16160 tg3_flag_set(tp, 5755_PLUS); 16161 16162 if (tg3_asic_rev(tp) == ASIC_REV_5780 || 16163 tg3_asic_rev(tp) == ASIC_REV_5714) 16164 tg3_flag_set(tp, 5780_CLASS); 16165 16166 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16167 tg3_asic_rev(tp) == ASIC_REV_5752 || 16168 tg3_asic_rev(tp) == ASIC_REV_5906 || 16169 tg3_flag(tp, 5755_PLUS) || 16170 tg3_flag(tp, 5780_CLASS)) 16171 tg3_flag_set(tp, 5750_PLUS); 16172 16173 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 16174 tg3_flag(tp, 5750_PLUS)) 16175 tg3_flag_set(tp, 5705_PLUS); 16176 } 16177 16178 static bool tg3_10_100_only_device(struct tg3 *tp, 16179 const struct pci_device_id *ent) 16180 { 16181 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK; 16182 16183 if ((tg3_asic_rev(tp) == ASIC_REV_5703 && 16184 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 16185 (tp->phy_flags & TG3_PHYFLG_IS_FET)) 16186 return true; 16187 16188 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) { 16189 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 16190 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100) 16191 return true; 16192 } else { 16193 return true; 16194 } 16195 } 16196 16197 return false; 16198 } 16199 16200 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) 16201 { 16202 u32 misc_ctrl_reg; 16203 u32 pci_state_reg, grc_misc_cfg; 16204 u32 val; 16205 u16 pci_cmd; 16206 int err; 16207 16208 /* Force memory write invalidate off. If we leave it on, 16209 * then on 5700_BX chips we have to enable a workaround. 16210 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary 16211 * to match the cacheline size. The Broadcom driver have this 16212 * workaround but turns MWI off all the times so never uses 16213 * it. This seems to suggest that the workaround is insufficient. 16214 */ 16215 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16216 pci_cmd &= ~PCI_COMMAND_INVALIDATE; 16217 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16218 16219 /* Important! -- Make sure register accesses are byteswapped 16220 * correctly. Also, for those chips that require it, make 16221 * sure that indirect register accesses are enabled before 16222 * the first operation. 16223 */ 16224 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16225 &misc_ctrl_reg); 16226 tp->misc_host_ctrl |= (misc_ctrl_reg & 16227 MISC_HOST_CTRL_CHIPREV); 16228 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16229 tp->misc_host_ctrl); 16230 16231 tg3_detect_asic_rev(tp, misc_ctrl_reg); 16232 16233 /* If we have 5702/03 A1 or A2 on certain ICH chipsets, 16234 * we need to disable memory and use config. cycles 16235 * only to access all registers. The 5702/03 chips 16236 * can mistakenly decode the special cycles from the 16237 * ICH chipsets as memory write cycles, causing corruption 16238 * of register and memory space. Only certain ICH bridges 16239 * will drive special cycles with non-zero data during the 16240 * address phase which can fall within the 5703's address 16241 * range. This is not an ICH bug as the PCI spec allows 16242 * non-zero address during special cycles. However, only 16243 * these ICH bridges are known to drive non-zero addresses 16244 * during special cycles. 16245 * 16246 * Since special cycles do not cross PCI bridges, we only 16247 * enable this workaround if the 5703 is on the secondary 16248 * bus of these ICH bridges. 16249 */ 16250 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) || 16251 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) { 16252 static struct tg3_dev_id { 16253 u32 vendor; 16254 u32 device; 16255 u32 rev; 16256 } ich_chipsets[] = { 16257 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, 16258 PCI_ANY_ID }, 16259 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, 16260 PCI_ANY_ID }, 16261 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, 16262 0xa }, 16263 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, 16264 PCI_ANY_ID }, 16265 { }, 16266 }; 16267 struct tg3_dev_id *pci_id = &ich_chipsets[0]; 16268 struct pci_dev *bridge = NULL; 16269 16270 while (pci_id->vendor != 0) { 16271 bridge = pci_get_device(pci_id->vendor, pci_id->device, 16272 bridge); 16273 if (!bridge) { 16274 pci_id++; 16275 continue; 16276 } 16277 if (pci_id->rev != PCI_ANY_ID) { 16278 if (bridge->revision > pci_id->rev) 16279 continue; 16280 } 16281 if (bridge->subordinate && 16282 (bridge->subordinate->number == 16283 tp->pdev->bus->number)) { 16284 tg3_flag_set(tp, ICH_WORKAROUND); 16285 pci_dev_put(bridge); 16286 break; 16287 } 16288 } 16289 } 16290 16291 if (tg3_asic_rev(tp) == ASIC_REV_5701) { 16292 static struct tg3_dev_id { 16293 u32 vendor; 16294 u32 device; 16295 } bridge_chipsets[] = { 16296 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, 16297 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, 16298 { }, 16299 }; 16300 struct tg3_dev_id *pci_id = &bridge_chipsets[0]; 16301 struct pci_dev *bridge = NULL; 16302 16303 while (pci_id->vendor != 0) { 16304 bridge = pci_get_device(pci_id->vendor, 16305 pci_id->device, 16306 bridge); 16307 if (!bridge) { 16308 pci_id++; 16309 continue; 16310 } 16311 if (bridge->subordinate && 16312 (bridge->subordinate->number <= 16313 tp->pdev->bus->number) && 16314 (bridge->subordinate->busn_res.end >= 16315 tp->pdev->bus->number)) { 16316 tg3_flag_set(tp, 5701_DMA_BUG); 16317 pci_dev_put(bridge); 16318 break; 16319 } 16320 } 16321 } 16322 16323 /* The EPB bridge inside 5714, 5715, and 5780 cannot support 16324 * DMA addresses > 40-bit. This bridge may have other additional 16325 * 57xx devices behind it in some 4-port NIC designs for example. 16326 * Any tg3 device found behind the bridge will also need the 40-bit 16327 * DMA workaround. 16328 */ 16329 if (tg3_flag(tp, 5780_CLASS)) { 16330 tg3_flag_set(tp, 40BIT_DMA_BUG); 16331 tp->msi_cap = tp->pdev->msi_cap; 16332 } else { 16333 struct pci_dev *bridge = NULL; 16334 16335 do { 16336 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, 16337 PCI_DEVICE_ID_SERVERWORKS_EPB, 16338 bridge); 16339 if (bridge && bridge->subordinate && 16340 (bridge->subordinate->number <= 16341 tp->pdev->bus->number) && 16342 (bridge->subordinate->busn_res.end >= 16343 tp->pdev->bus->number)) { 16344 tg3_flag_set(tp, 40BIT_DMA_BUG); 16345 pci_dev_put(bridge); 16346 break; 16347 } 16348 } while (bridge); 16349 } 16350 16351 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16352 tg3_asic_rev(tp) == ASIC_REV_5714) 16353 tp->pdev_peer = tg3_find_peer(tp); 16354 16355 /* Determine TSO capabilities */ 16356 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0) 16357 ; /* Do nothing. HW bug. */ 16358 else if (tg3_flag(tp, 57765_PLUS)) 16359 tg3_flag_set(tp, HW_TSO_3); 16360 else if (tg3_flag(tp, 5755_PLUS) || 16361 tg3_asic_rev(tp) == ASIC_REV_5906) 16362 tg3_flag_set(tp, HW_TSO_2); 16363 else if (tg3_flag(tp, 5750_PLUS)) { 16364 tg3_flag_set(tp, HW_TSO_1); 16365 tg3_flag_set(tp, TSO_BUG); 16366 if (tg3_asic_rev(tp) == ASIC_REV_5750 && 16367 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2) 16368 tg3_flag_clear(tp, TSO_BUG); 16369 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 && 16370 tg3_asic_rev(tp) != ASIC_REV_5701 && 16371 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 16372 tg3_flag_set(tp, FW_TSO); 16373 tg3_flag_set(tp, TSO_BUG); 16374 if (tg3_asic_rev(tp) == ASIC_REV_5705) 16375 tp->fw_needed = FIRMWARE_TG3TSO5; 16376 else 16377 tp->fw_needed = FIRMWARE_TG3TSO; 16378 } 16379 16380 /* Selectively allow TSO based on operating conditions */ 16381 if (tg3_flag(tp, HW_TSO_1) || 16382 tg3_flag(tp, HW_TSO_2) || 16383 tg3_flag(tp, HW_TSO_3) || 16384 tg3_flag(tp, FW_TSO)) { 16385 /* For firmware TSO, assume ASF is disabled. 16386 * We'll disable TSO later if we discover ASF 16387 * is enabled in tg3_get_eeprom_hw_cfg(). 16388 */ 16389 tg3_flag_set(tp, TSO_CAPABLE); 16390 } else { 16391 tg3_flag_clear(tp, TSO_CAPABLE); 16392 tg3_flag_clear(tp, TSO_BUG); 16393 tp->fw_needed = NULL; 16394 } 16395 16396 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) 16397 tp->fw_needed = FIRMWARE_TG3; 16398 16399 if (tg3_asic_rev(tp) == ASIC_REV_57766) 16400 tp->fw_needed = FIRMWARE_TG357766; 16401 16402 tp->irq_max = 1; 16403 16404 if (tg3_flag(tp, 5750_PLUS)) { 16405 tg3_flag_set(tp, SUPPORT_MSI); 16406 if (tg3_chip_rev(tp) == CHIPREV_5750_AX || 16407 tg3_chip_rev(tp) == CHIPREV_5750_BX || 16408 (tg3_asic_rev(tp) == ASIC_REV_5714 && 16409 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 && 16410 tp->pdev_peer == tp->pdev)) 16411 tg3_flag_clear(tp, SUPPORT_MSI); 16412 16413 if (tg3_flag(tp, 5755_PLUS) || 16414 tg3_asic_rev(tp) == ASIC_REV_5906) { 16415 tg3_flag_set(tp, 1SHOT_MSI); 16416 } 16417 16418 if (tg3_flag(tp, 57765_PLUS)) { 16419 tg3_flag_set(tp, SUPPORT_MSIX); 16420 tp->irq_max = TG3_IRQ_MAX_VECS; 16421 } 16422 } 16423 16424 tp->txq_max = 1; 16425 tp->rxq_max = 1; 16426 if (tp->irq_max > 1) { 16427 tp->rxq_max = TG3_RSS_MAX_NUM_QS; 16428 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS); 16429 16430 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 16431 tg3_asic_rev(tp) == ASIC_REV_5720) 16432 tp->txq_max = tp->irq_max - 1; 16433 } 16434 16435 if (tg3_flag(tp, 5755_PLUS) || 16436 tg3_asic_rev(tp) == ASIC_REV_5906) 16437 tg3_flag_set(tp, SHORT_DMA_BUG); 16438 16439 if (tg3_asic_rev(tp) == ASIC_REV_5719) 16440 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; 16441 16442 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16443 tg3_asic_rev(tp) == ASIC_REV_5719 || 16444 tg3_asic_rev(tp) == ASIC_REV_5720 || 16445 tg3_asic_rev(tp) == ASIC_REV_5762) 16446 tg3_flag_set(tp, LRG_PROD_RING_CAP); 16447 16448 if (tg3_flag(tp, 57765_PLUS) && 16449 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0) 16450 tg3_flag_set(tp, USE_JUMBO_BDFLAG); 16451 16452 if (!tg3_flag(tp, 5705_PLUS) || 16453 tg3_flag(tp, 5780_CLASS) || 16454 tg3_flag(tp, USE_JUMBO_BDFLAG)) 16455 tg3_flag_set(tp, JUMBO_CAPABLE); 16456 16457 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16458 &pci_state_reg); 16459 16460 if (pci_is_pcie(tp->pdev)) { 16461 u16 lnkctl; 16462 16463 tg3_flag_set(tp, PCI_EXPRESS); 16464 16465 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl); 16466 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { 16467 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16468 tg3_flag_clear(tp, HW_TSO_2); 16469 tg3_flag_clear(tp, TSO_CAPABLE); 16470 } 16471 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 16472 tg3_asic_rev(tp) == ASIC_REV_5761 || 16473 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 || 16474 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1) 16475 tg3_flag_set(tp, CLKREQ_BUG); 16476 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) { 16477 tg3_flag_set(tp, L1PLLPD_EN); 16478 } 16479 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) { 16480 /* BCM5785 devices are effectively PCIe devices, and should 16481 * follow PCIe codepaths, but do not have a PCIe capabilities 16482 * section. 16483 */ 16484 tg3_flag_set(tp, PCI_EXPRESS); 16485 } else if (!tg3_flag(tp, 5705_PLUS) || 16486 tg3_flag(tp, 5780_CLASS)) { 16487 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); 16488 if (!tp->pcix_cap) { 16489 dev_err(&tp->pdev->dev, 16490 "Cannot find PCI-X capability, aborting\n"); 16491 return -EIO; 16492 } 16493 16494 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) 16495 tg3_flag_set(tp, PCIX_MODE); 16496 } 16497 16498 /* If we have an AMD 762 or VIA K8T800 chipset, write 16499 * reordering to the mailbox registers done by the host 16500 * controller can cause major troubles. We read back from 16501 * every mailbox register write to force the writes to be 16502 * posted to the chip in order. 16503 */ 16504 if (pci_dev_present(tg3_write_reorder_chipsets) && 16505 !tg3_flag(tp, PCI_EXPRESS)) 16506 tg3_flag_set(tp, MBOX_WRITE_REORDER); 16507 16508 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 16509 &tp->pci_cacheline_sz); 16510 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16511 &tp->pci_lat_timer); 16512 if (tg3_asic_rev(tp) == ASIC_REV_5703 && 16513 tp->pci_lat_timer < 64) { 16514 tp->pci_lat_timer = 64; 16515 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16516 tp->pci_lat_timer); 16517 } 16518 16519 /* Important! -- It is critical that the PCI-X hw workaround 16520 * situation is decided before the first MMIO register access. 16521 */ 16522 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) { 16523 /* 5700 BX chips need to have their TX producer index 16524 * mailboxes written twice to workaround a bug. 16525 */ 16526 tg3_flag_set(tp, TXD_MBOX_HWBUG); 16527 16528 /* If we are in PCI-X mode, enable register write workaround. 16529 * 16530 * The workaround is to use indirect register accesses 16531 * for all chip writes not to mailbox registers. 16532 */ 16533 if (tg3_flag(tp, PCIX_MODE)) { 16534 u32 pm_reg; 16535 16536 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16537 16538 /* The chip can have it's power management PCI config 16539 * space registers clobbered due to this bug. 16540 * So explicitly force the chip into D0 here. 16541 */ 16542 pci_read_config_dword(tp->pdev, 16543 tp->pdev->pm_cap + PCI_PM_CTRL, 16544 &pm_reg); 16545 pm_reg &= ~PCI_PM_CTRL_STATE_MASK; 16546 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; 16547 pci_write_config_dword(tp->pdev, 16548 tp->pdev->pm_cap + PCI_PM_CTRL, 16549 pm_reg); 16550 16551 /* Also, force SERR#/PERR# in PCI command. */ 16552 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16553 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; 16554 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16555 } 16556 } 16557 16558 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) 16559 tg3_flag_set(tp, PCI_HIGH_SPEED); 16560 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) 16561 tg3_flag_set(tp, PCI_32BIT); 16562 16563 /* Chip-specific fixup from Broadcom driver */ 16564 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) && 16565 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { 16566 pci_state_reg |= PCISTATE_RETRY_SAME_DMA; 16567 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); 16568 } 16569 16570 /* Default fast path register access methods */ 16571 tp->read32 = tg3_read32; 16572 tp->write32 = tg3_write32; 16573 tp->read32_mbox = tg3_read32; 16574 tp->write32_mbox = tg3_write32; 16575 tp->write32_tx_mbox = tg3_write32; 16576 tp->write32_rx_mbox = tg3_write32; 16577 16578 /* Various workaround register access methods */ 16579 if (tg3_flag(tp, PCIX_TARGET_HWBUG)) 16580 tp->write32 = tg3_write_indirect_reg32; 16581 else if (tg3_asic_rev(tp) == ASIC_REV_5701 || 16582 (tg3_flag(tp, PCI_EXPRESS) && 16583 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) { 16584 /* 16585 * Back to back register writes can cause problems on these 16586 * chips, the workaround is to read back all reg writes 16587 * except those to mailbox regs. 16588 * 16589 * See tg3_write_indirect_reg32(). 16590 */ 16591 tp->write32 = tg3_write_flush_reg32; 16592 } 16593 16594 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { 16595 tp->write32_tx_mbox = tg3_write32_tx_mbox; 16596 if (tg3_flag(tp, MBOX_WRITE_REORDER)) 16597 tp->write32_rx_mbox = tg3_write_flush_reg32; 16598 } 16599 16600 if (tg3_flag(tp, ICH_WORKAROUND)) { 16601 tp->read32 = tg3_read_indirect_reg32; 16602 tp->write32 = tg3_write_indirect_reg32; 16603 tp->read32_mbox = tg3_read_indirect_mbox; 16604 tp->write32_mbox = tg3_write_indirect_mbox; 16605 tp->write32_tx_mbox = tg3_write_indirect_mbox; 16606 tp->write32_rx_mbox = tg3_write_indirect_mbox; 16607 16608 iounmap(tp->regs); 16609 tp->regs = NULL; 16610 16611 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16612 pci_cmd &= ~PCI_COMMAND_MEMORY; 16613 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16614 } 16615 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16616 tp->read32_mbox = tg3_read32_mbox_5906; 16617 tp->write32_mbox = tg3_write32_mbox_5906; 16618 tp->write32_tx_mbox = tg3_write32_mbox_5906; 16619 tp->write32_rx_mbox = tg3_write32_mbox_5906; 16620 } 16621 16622 if (tp->write32 == tg3_write_indirect_reg32 || 16623 (tg3_flag(tp, PCIX_MODE) && 16624 (tg3_asic_rev(tp) == ASIC_REV_5700 || 16625 tg3_asic_rev(tp) == ASIC_REV_5701))) 16626 tg3_flag_set(tp, SRAM_USE_CONFIG); 16627 16628 /* The memory arbiter has to be enabled in order for SRAM accesses 16629 * to succeed. Normally on powerup the tg3 chip firmware will make 16630 * sure it is enabled, but other entities such as system netboot 16631 * code might disable it. 16632 */ 16633 val = tr32(MEMARB_MODE); 16634 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 16635 16636 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; 16637 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16638 tg3_flag(tp, 5780_CLASS)) { 16639 if (tg3_flag(tp, PCIX_MODE)) { 16640 pci_read_config_dword(tp->pdev, 16641 tp->pcix_cap + PCI_X_STATUS, 16642 &val); 16643 tp->pci_fn = val & 0x7; 16644 } 16645 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16646 tg3_asic_rev(tp) == ASIC_REV_5719 || 16647 tg3_asic_rev(tp) == ASIC_REV_5720) { 16648 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); 16649 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG) 16650 val = tr32(TG3_CPMU_STATUS); 16651 16652 if (tg3_asic_rev(tp) == ASIC_REV_5717) 16653 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0; 16654 else 16655 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >> 16656 TG3_CPMU_STATUS_FSHFT_5719; 16657 } 16658 16659 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 16660 tp->write32_tx_mbox = tg3_write_flush_reg32; 16661 tp->write32_rx_mbox = tg3_write_flush_reg32; 16662 } 16663 16664 /* Get eeprom hw config before calling tg3_set_power_state(). 16665 * In particular, the TG3_FLAG_IS_NIC flag must be 16666 * determined before calling tg3_set_power_state() so that 16667 * we know whether or not to switch out of Vaux power. 16668 * When the flag is set, it means that GPIO1 is used for eeprom 16669 * write protect and also implies that it is a LOM where GPIOs 16670 * are not used to switch power. 16671 */ 16672 tg3_get_eeprom_hw_cfg(tp); 16673 16674 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) { 16675 tg3_flag_clear(tp, TSO_CAPABLE); 16676 tg3_flag_clear(tp, TSO_BUG); 16677 tp->fw_needed = NULL; 16678 } 16679 16680 if (tg3_flag(tp, ENABLE_APE)) { 16681 /* Allow reads and writes to the 16682 * APE register and memory space. 16683 */ 16684 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | 16685 PCISTATE_ALLOW_APE_SHMEM_WR | 16686 PCISTATE_ALLOW_APE_PSPACE_WR; 16687 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, 16688 pci_state_reg); 16689 16690 tg3_ape_lock_init(tp); 16691 tp->ape_hb_interval = 16692 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC); 16693 } 16694 16695 /* Set up tp->grc_local_ctrl before calling 16696 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high 16697 * will bring 5700's external PHY out of reset. 16698 * It is also used as eeprom write protect on LOMs. 16699 */ 16700 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; 16701 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16702 tg3_flag(tp, EEPROM_WRITE_PROT)) 16703 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 16704 GRC_LCLCTRL_GPIO_OUTPUT1); 16705 /* Unused GPIO3 must be driven as output on 5752 because there 16706 * are no pull-up resistors on unused GPIO pins. 16707 */ 16708 else if (tg3_asic_rev(tp) == ASIC_REV_5752) 16709 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 16710 16711 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16712 tg3_asic_rev(tp) == ASIC_REV_57780 || 16713 tg3_flag(tp, 57765_CLASS)) 16714 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16715 16716 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 16717 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 16718 /* Turn off the debug UART. */ 16719 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16720 if (tg3_flag(tp, IS_NIC)) 16721 /* Keep VMain power. */ 16722 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 16723 GRC_LCLCTRL_GPIO_OUTPUT0; 16724 } 16725 16726 if (tg3_asic_rev(tp) == ASIC_REV_5762) 16727 tp->grc_local_ctrl |= 16728 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL; 16729 16730 /* Switch out of Vaux if it is a NIC */ 16731 tg3_pwrsrc_switch_to_vmain(tp); 16732 16733 /* Derive initial jumbo mode from MTU assigned in 16734 * ether_setup() via the alloc_etherdev() call 16735 */ 16736 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) 16737 tg3_flag_set(tp, JUMBO_RING_ENABLE); 16738 16739 /* Determine WakeOnLan speed to use. */ 16740 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16741 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16742 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16743 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) { 16744 tg3_flag_clear(tp, WOL_SPEED_100MB); 16745 } else { 16746 tg3_flag_set(tp, WOL_SPEED_100MB); 16747 } 16748 16749 if (tg3_asic_rev(tp) == ASIC_REV_5906) 16750 tp->phy_flags |= TG3_PHYFLG_IS_FET; 16751 16752 /* A few boards don't want Ethernet@WireSpeed phy feature */ 16753 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16754 (tg3_asic_rev(tp) == ASIC_REV_5705 && 16755 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) && 16756 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) || 16757 (tp->phy_flags & TG3_PHYFLG_IS_FET) || 16758 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 16759 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; 16760 16761 if (tg3_chip_rev(tp) == CHIPREV_5703_AX || 16762 tg3_chip_rev(tp) == CHIPREV_5704_AX) 16763 tp->phy_flags |= TG3_PHYFLG_ADC_BUG; 16764 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) 16765 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; 16766 16767 if (tg3_flag(tp, 5705_PLUS) && 16768 !(tp->phy_flags & TG3_PHYFLG_IS_FET) && 16769 tg3_asic_rev(tp) != ASIC_REV_5785 && 16770 tg3_asic_rev(tp) != ASIC_REV_57780 && 16771 !tg3_flag(tp, 57765_PLUS)) { 16772 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16773 tg3_asic_rev(tp) == ASIC_REV_5787 || 16774 tg3_asic_rev(tp) == ASIC_REV_5784 || 16775 tg3_asic_rev(tp) == ASIC_REV_5761) { 16776 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && 16777 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) 16778 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; 16779 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) 16780 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; 16781 } else 16782 tp->phy_flags |= TG3_PHYFLG_BER_BUG; 16783 } 16784 16785 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 16786 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 16787 tp->phy_otp = tg3_read_otp_phycfg(tp); 16788 if (tp->phy_otp == 0) 16789 tp->phy_otp = TG3_OTP_DEFAULT; 16790 } 16791 16792 if (tg3_flag(tp, CPMU_PRESENT)) 16793 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; 16794 else 16795 tp->mi_mode = MAC_MI_MODE_BASE; 16796 16797 tp->coalesce_mode = 0; 16798 if (tg3_chip_rev(tp) != CHIPREV_5700_AX && 16799 tg3_chip_rev(tp) != CHIPREV_5700_BX) 16800 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 16801 16802 /* Set these bits to enable statistics workaround. */ 16803 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16804 tg3_asic_rev(tp) == ASIC_REV_5762 || 16805 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 16806 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) { 16807 tp->coalesce_mode |= HOSTCC_MODE_ATTN; 16808 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; 16809 } 16810 16811 if (tg3_asic_rev(tp) == ASIC_REV_5785 || 16812 tg3_asic_rev(tp) == ASIC_REV_57780) 16813 tg3_flag_set(tp, USE_PHYLIB); 16814 16815 err = tg3_mdio_init(tp); 16816 if (err) 16817 return err; 16818 16819 /* Initialize data/descriptor byte/word swapping. */ 16820 val = tr32(GRC_MODE); 16821 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 16822 tg3_asic_rev(tp) == ASIC_REV_5762) 16823 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | 16824 GRC_MODE_WORD_SWAP_B2HRX_DATA | 16825 GRC_MODE_B2HRX_ENABLE | 16826 GRC_MODE_HTX2B_ENABLE | 16827 GRC_MODE_HOST_STACKUP); 16828 else 16829 val &= GRC_MODE_HOST_STACKUP; 16830 16831 tw32(GRC_MODE, val | tp->grc_mode); 16832 16833 tg3_switch_clocks(tp); 16834 16835 /* Clear this out for sanity. */ 16836 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 16837 16838 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ 16839 tw32(TG3PCI_REG_BASE_ADDR, 0); 16840 16841 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16842 &pci_state_reg); 16843 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && 16844 !tg3_flag(tp, PCIX_TARGET_HWBUG)) { 16845 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16846 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16847 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 || 16848 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) { 16849 void __iomem *sram_base; 16850 16851 /* Write some dummy words into the SRAM status block 16852 * area, see if it reads back correctly. If the return 16853 * value is bad, force enable the PCIX workaround. 16854 */ 16855 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; 16856 16857 writel(0x00000000, sram_base); 16858 writel(0x00000000, sram_base + 4); 16859 writel(0xffffffff, sram_base + 4); 16860 if (readl(sram_base) != 0x00000000) 16861 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16862 } 16863 } 16864 16865 udelay(50); 16866 tg3_nvram_init(tp); 16867 16868 /* If the device has an NVRAM, no need to load patch firmware */ 16869 if (tg3_asic_rev(tp) == ASIC_REV_57766 && 16870 !tg3_flag(tp, NO_NVRAM)) 16871 tp->fw_needed = NULL; 16872 16873 grc_misc_cfg = tr32(GRC_MISC_CFG); 16874 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; 16875 16876 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 16877 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || 16878 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 16879 tg3_flag_set(tp, IS_5788); 16880 16881 if (!tg3_flag(tp, IS_5788) && 16882 tg3_asic_rev(tp) != ASIC_REV_5700) 16883 tg3_flag_set(tp, TAGGED_STATUS); 16884 if (tg3_flag(tp, TAGGED_STATUS)) { 16885 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | 16886 HOSTCC_MODE_CLRTICK_TXBD); 16887 16888 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; 16889 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16890 tp->misc_host_ctrl); 16891 } 16892 16893 /* Preserve the APE MAC_MODE bits */ 16894 if (tg3_flag(tp, ENABLE_APE)) 16895 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 16896 else 16897 tp->mac_mode = 0; 16898 16899 if (tg3_10_100_only_device(tp, ent)) 16900 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; 16901 16902 err = tg3_phy_probe(tp); 16903 if (err) { 16904 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); 16905 /* ... but do not return immediately ... */ 16906 tg3_mdio_fini(tp); 16907 } 16908 16909 tg3_read_vpd(tp); 16910 tg3_read_fw_ver(tp); 16911 16912 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 16913 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16914 } else { 16915 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16916 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16917 else 16918 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16919 } 16920 16921 /* 5700 {AX,BX} chips have a broken status block link 16922 * change bit implementation, so we must use the 16923 * status register in those cases. 16924 */ 16925 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16926 tg3_flag_set(tp, USE_LINKCHG_REG); 16927 else 16928 tg3_flag_clear(tp, USE_LINKCHG_REG); 16929 16930 /* The led_ctrl is set during tg3_phy_probe, here we might 16931 * have to force the link status polling mechanism based 16932 * upon subsystem IDs. 16933 */ 16934 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 16935 tg3_asic_rev(tp) == ASIC_REV_5701 && 16936 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 16937 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16938 tg3_flag_set(tp, USE_LINKCHG_REG); 16939 } 16940 16941 /* For all SERDES we poll the MAC status register. */ 16942 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 16943 tg3_flag_set(tp, POLL_SERDES); 16944 else 16945 tg3_flag_clear(tp, POLL_SERDES); 16946 16947 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF)) 16948 tg3_flag_set(tp, POLL_CPMU_LINK); 16949 16950 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN; 16951 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; 16952 if (tg3_asic_rev(tp) == ASIC_REV_5701 && 16953 tg3_flag(tp, PCIX_MODE)) { 16954 tp->rx_offset = NET_SKB_PAD; 16955 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 16956 tp->rx_copy_thresh = ~(u16)0; 16957 #endif 16958 } 16959 16960 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; 16961 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; 16962 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; 16963 16964 tp->rx_std_max_post = tp->rx_std_ring_mask + 1; 16965 16966 /* Increment the rx prod index on the rx std ring by at most 16967 * 8 for these chips to workaround hw errata. 16968 */ 16969 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16970 tg3_asic_rev(tp) == ASIC_REV_5752 || 16971 tg3_asic_rev(tp) == ASIC_REV_5755) 16972 tp->rx_std_max_post = 8; 16973 16974 if (tg3_flag(tp, ASPM_WORKAROUND)) 16975 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & 16976 PCIE_PWR_MGMT_L1_THRESH_MSK; 16977 16978 return err; 16979 } 16980 16981 static int tg3_get_device_address(struct tg3 *tp) 16982 { 16983 struct net_device *dev = tp->dev; 16984 u32 hi, lo, mac_offset; 16985 int addr_ok = 0; 16986 int err; 16987 16988 if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr)) 16989 return 0; 16990 16991 if (tg3_flag(tp, IS_SSB_CORE)) { 16992 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]); 16993 if (!err && is_valid_ether_addr(&dev->dev_addr[0])) 16994 return 0; 16995 } 16996 16997 mac_offset = 0x7c; 16998 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16999 tg3_flag(tp, 5780_CLASS)) { 17000 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 17001 mac_offset = 0xcc; 17002 if (tg3_nvram_lock(tp)) 17003 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); 17004 else 17005 tg3_nvram_unlock(tp); 17006 } else if (tg3_flag(tp, 5717_PLUS)) { 17007 if (tp->pci_fn & 1) 17008 mac_offset = 0xcc; 17009 if (tp->pci_fn > 1) 17010 mac_offset += 0x18c; 17011 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) 17012 mac_offset = 0x10; 17013 17014 /* First try to get it from MAC address mailbox. */ 17015 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); 17016 if ((hi >> 16) == 0x484b) { 17017 dev->dev_addr[0] = (hi >> 8) & 0xff; 17018 dev->dev_addr[1] = (hi >> 0) & 0xff; 17019 17020 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); 17021 dev->dev_addr[2] = (lo >> 24) & 0xff; 17022 dev->dev_addr[3] = (lo >> 16) & 0xff; 17023 dev->dev_addr[4] = (lo >> 8) & 0xff; 17024 dev->dev_addr[5] = (lo >> 0) & 0xff; 17025 17026 /* Some old bootcode may report a 0 MAC address in SRAM */ 17027 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]); 17028 } 17029 if (!addr_ok) { 17030 /* Next, try NVRAM. */ 17031 if (!tg3_flag(tp, NO_NVRAM) && 17032 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && 17033 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { 17034 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); 17035 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); 17036 } 17037 /* Finally just fetch it out of the MAC control regs. */ 17038 else { 17039 hi = tr32(MAC_ADDR_0_HIGH); 17040 lo = tr32(MAC_ADDR_0_LOW); 17041 17042 dev->dev_addr[5] = lo & 0xff; 17043 dev->dev_addr[4] = (lo >> 8) & 0xff; 17044 dev->dev_addr[3] = (lo >> 16) & 0xff; 17045 dev->dev_addr[2] = (lo >> 24) & 0xff; 17046 dev->dev_addr[1] = hi & 0xff; 17047 dev->dev_addr[0] = (hi >> 8) & 0xff; 17048 } 17049 } 17050 17051 if (!is_valid_ether_addr(&dev->dev_addr[0])) 17052 return -EINVAL; 17053 return 0; 17054 } 17055 17056 #define BOUNDARY_SINGLE_CACHELINE 1 17057 #define BOUNDARY_MULTI_CACHELINE 2 17058 17059 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) 17060 { 17061 int cacheline_size; 17062 u8 byte; 17063 int goal; 17064 17065 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); 17066 if (byte == 0) 17067 cacheline_size = 1024; 17068 else 17069 cacheline_size = (int) byte * 4; 17070 17071 /* On 5703 and later chips, the boundary bits have no 17072 * effect. 17073 */ 17074 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17075 tg3_asic_rev(tp) != ASIC_REV_5701 && 17076 !tg3_flag(tp, PCI_EXPRESS)) 17077 goto out; 17078 17079 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) 17080 goal = BOUNDARY_MULTI_CACHELINE; 17081 #else 17082 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) 17083 goal = BOUNDARY_SINGLE_CACHELINE; 17084 #else 17085 goal = 0; 17086 #endif 17087 #endif 17088 17089 if (tg3_flag(tp, 57765_PLUS)) { 17090 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 17091 goto out; 17092 } 17093 17094 if (!goal) 17095 goto out; 17096 17097 /* PCI controllers on most RISC systems tend to disconnect 17098 * when a device tries to burst across a cache-line boundary. 17099 * Therefore, letting tg3 do so just wastes PCI bandwidth. 17100 * 17101 * Unfortunately, for PCI-E there are only limited 17102 * write-side controls for this, and thus for reads 17103 * we will still get the disconnects. We'll also waste 17104 * these PCI cycles for both read and write for chips 17105 * other than 5700 and 5701 which do not implement the 17106 * boundary bits. 17107 */ 17108 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { 17109 switch (cacheline_size) { 17110 case 16: 17111 case 32: 17112 case 64: 17113 case 128: 17114 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17115 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | 17116 DMA_RWCTRL_WRITE_BNDRY_128_PCIX); 17117 } else { 17118 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17119 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17120 } 17121 break; 17122 17123 case 256: 17124 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | 17125 DMA_RWCTRL_WRITE_BNDRY_256_PCIX); 17126 break; 17127 17128 default: 17129 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17130 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17131 break; 17132 } 17133 } else if (tg3_flag(tp, PCI_EXPRESS)) { 17134 switch (cacheline_size) { 17135 case 16: 17136 case 32: 17137 case 64: 17138 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17139 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17140 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; 17141 break; 17142 } 17143 /* fallthrough */ 17144 case 128: 17145 default: 17146 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17147 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; 17148 break; 17149 } 17150 } else { 17151 switch (cacheline_size) { 17152 case 16: 17153 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17154 val |= (DMA_RWCTRL_READ_BNDRY_16 | 17155 DMA_RWCTRL_WRITE_BNDRY_16); 17156 break; 17157 } 17158 /* fallthrough */ 17159 case 32: 17160 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17161 val |= (DMA_RWCTRL_READ_BNDRY_32 | 17162 DMA_RWCTRL_WRITE_BNDRY_32); 17163 break; 17164 } 17165 /* fallthrough */ 17166 case 64: 17167 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17168 val |= (DMA_RWCTRL_READ_BNDRY_64 | 17169 DMA_RWCTRL_WRITE_BNDRY_64); 17170 break; 17171 } 17172 /* fallthrough */ 17173 case 128: 17174 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17175 val |= (DMA_RWCTRL_READ_BNDRY_128 | 17176 DMA_RWCTRL_WRITE_BNDRY_128); 17177 break; 17178 } 17179 /* fallthrough */ 17180 case 256: 17181 val |= (DMA_RWCTRL_READ_BNDRY_256 | 17182 DMA_RWCTRL_WRITE_BNDRY_256); 17183 break; 17184 case 512: 17185 val |= (DMA_RWCTRL_READ_BNDRY_512 | 17186 DMA_RWCTRL_WRITE_BNDRY_512); 17187 break; 17188 case 1024: 17189 default: 17190 val |= (DMA_RWCTRL_READ_BNDRY_1024 | 17191 DMA_RWCTRL_WRITE_BNDRY_1024); 17192 break; 17193 } 17194 } 17195 17196 out: 17197 return val; 17198 } 17199 17200 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, 17201 int size, bool to_device) 17202 { 17203 struct tg3_internal_buffer_desc test_desc; 17204 u32 sram_dma_descs; 17205 int i, ret; 17206 17207 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; 17208 17209 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); 17210 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); 17211 tw32(RDMAC_STATUS, 0); 17212 tw32(WDMAC_STATUS, 0); 17213 17214 tw32(BUFMGR_MODE, 0); 17215 tw32(FTQ_RESET, 0); 17216 17217 test_desc.addr_hi = ((u64) buf_dma) >> 32; 17218 test_desc.addr_lo = buf_dma & 0xffffffff; 17219 test_desc.nic_mbuf = 0x00002100; 17220 test_desc.len = size; 17221 17222 /* 17223 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz 17224 * the *second* time the tg3 driver was getting loaded after an 17225 * initial scan. 17226 * 17227 * Broadcom tells me: 17228 * ...the DMA engine is connected to the GRC block and a DMA 17229 * reset may affect the GRC block in some unpredictable way... 17230 * The behavior of resets to individual blocks has not been tested. 17231 * 17232 * Broadcom noted the GRC reset will also reset all sub-components. 17233 */ 17234 if (to_device) { 17235 test_desc.cqid_sqid = (13 << 8) | 2; 17236 17237 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); 17238 udelay(40); 17239 } else { 17240 test_desc.cqid_sqid = (16 << 8) | 7; 17241 17242 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); 17243 udelay(40); 17244 } 17245 test_desc.flags = 0x00000005; 17246 17247 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { 17248 u32 val; 17249 17250 val = *(((u32 *)&test_desc) + i); 17251 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 17252 sram_dma_descs + (i * sizeof(u32))); 17253 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 17254 } 17255 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 17256 17257 if (to_device) 17258 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); 17259 else 17260 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); 17261 17262 ret = -ENODEV; 17263 for (i = 0; i < 40; i++) { 17264 u32 val; 17265 17266 if (to_device) 17267 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); 17268 else 17269 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); 17270 if ((val & 0xffff) == sram_dma_descs) { 17271 ret = 0; 17272 break; 17273 } 17274 17275 udelay(100); 17276 } 17277 17278 return ret; 17279 } 17280 17281 #define TEST_BUFFER_SIZE 0x2000 17282 17283 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = { 17284 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, 17285 { }, 17286 }; 17287 17288 static int tg3_test_dma(struct tg3 *tp) 17289 { 17290 dma_addr_t buf_dma; 17291 u32 *buf, saved_dma_rwctrl; 17292 int ret = 0; 17293 17294 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, 17295 &buf_dma, GFP_KERNEL); 17296 if (!buf) { 17297 ret = -ENOMEM; 17298 goto out_nofree; 17299 } 17300 17301 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 17302 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 17303 17304 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 17305 17306 if (tg3_flag(tp, 57765_PLUS)) 17307 goto out; 17308 17309 if (tg3_flag(tp, PCI_EXPRESS)) { 17310 /* DMA read watermark not used on PCIE */ 17311 tp->dma_rwctrl |= 0x00180000; 17312 } else if (!tg3_flag(tp, PCIX_MODE)) { 17313 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 17314 tg3_asic_rev(tp) == ASIC_REV_5750) 17315 tp->dma_rwctrl |= 0x003f0000; 17316 else 17317 tp->dma_rwctrl |= 0x003f000f; 17318 } else { 17319 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17320 tg3_asic_rev(tp) == ASIC_REV_5704) { 17321 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); 17322 u32 read_water = 0x7; 17323 17324 /* If the 5704 is behind the EPB bridge, we can 17325 * do the less restrictive ONE_DMA workaround for 17326 * better performance. 17327 */ 17328 if (tg3_flag(tp, 40BIT_DMA_BUG) && 17329 tg3_asic_rev(tp) == ASIC_REV_5704) 17330 tp->dma_rwctrl |= 0x8000; 17331 else if (ccval == 0x6 || ccval == 0x7) 17332 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17333 17334 if (tg3_asic_rev(tp) == ASIC_REV_5703) 17335 read_water = 4; 17336 /* Set bit 23 to enable PCIX hw bug fix */ 17337 tp->dma_rwctrl |= 17338 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | 17339 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | 17340 (1 << 23); 17341 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) { 17342 /* 5780 always in PCIX mode */ 17343 tp->dma_rwctrl |= 0x00144000; 17344 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) { 17345 /* 5714 always in PCIX mode */ 17346 tp->dma_rwctrl |= 0x00148000; 17347 } else { 17348 tp->dma_rwctrl |= 0x001b000f; 17349 } 17350 } 17351 if (tg3_flag(tp, ONE_DMA_AT_ONCE)) 17352 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17353 17354 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17355 tg3_asic_rev(tp) == ASIC_REV_5704) 17356 tp->dma_rwctrl &= 0xfffffff0; 17357 17358 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 17359 tg3_asic_rev(tp) == ASIC_REV_5701) { 17360 /* Remove this if it causes problems for some boards. */ 17361 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; 17362 17363 /* On 5700/5701 chips, we need to set this bit. 17364 * Otherwise the chip will issue cacheline transactions 17365 * to streamable DMA memory with not all the byte 17366 * enables turned on. This is an error on several 17367 * RISC PCI controllers, in particular sparc64. 17368 * 17369 * On 5703/5704 chips, this bit has been reassigned 17370 * a different meaning. In particular, it is used 17371 * on those chips to enable a PCI-X workaround. 17372 */ 17373 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; 17374 } 17375 17376 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17377 17378 17379 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17380 tg3_asic_rev(tp) != ASIC_REV_5701) 17381 goto out; 17382 17383 /* It is best to perform DMA test with maximum write burst size 17384 * to expose the 5700/5701 write DMA bug. 17385 */ 17386 saved_dma_rwctrl = tp->dma_rwctrl; 17387 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17388 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17389 17390 while (1) { 17391 u32 *p = buf, i; 17392 17393 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) 17394 p[i] = i; 17395 17396 /* Send the buffer to the chip. */ 17397 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true); 17398 if (ret) { 17399 dev_err(&tp->pdev->dev, 17400 "%s: Buffer write failed. err = %d\n", 17401 __func__, ret); 17402 break; 17403 } 17404 17405 /* Now read it back. */ 17406 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false); 17407 if (ret) { 17408 dev_err(&tp->pdev->dev, "%s: Buffer read failed. " 17409 "err = %d\n", __func__, ret); 17410 break; 17411 } 17412 17413 /* Verify it. */ 17414 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { 17415 if (p[i] == i) 17416 continue; 17417 17418 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17419 DMA_RWCTRL_WRITE_BNDRY_16) { 17420 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17421 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17422 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17423 break; 17424 } else { 17425 dev_err(&tp->pdev->dev, 17426 "%s: Buffer corrupted on read back! " 17427 "(%d != %d)\n", __func__, p[i], i); 17428 ret = -ENODEV; 17429 goto out; 17430 } 17431 } 17432 17433 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { 17434 /* Success. */ 17435 ret = 0; 17436 break; 17437 } 17438 } 17439 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17440 DMA_RWCTRL_WRITE_BNDRY_16) { 17441 /* DMA test passed without adjusting DMA boundary, 17442 * now look for chipsets that are known to expose the 17443 * DMA bug without failing the test. 17444 */ 17445 if (pci_dev_present(tg3_dma_wait_state_chipsets)) { 17446 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17447 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17448 } else { 17449 /* Safe to use the calculated DMA boundary. */ 17450 tp->dma_rwctrl = saved_dma_rwctrl; 17451 } 17452 17453 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17454 } 17455 17456 out: 17457 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); 17458 out_nofree: 17459 return ret; 17460 } 17461 17462 static void tg3_init_bufmgr_config(struct tg3 *tp) 17463 { 17464 if (tg3_flag(tp, 57765_PLUS)) { 17465 tp->bufmgr_config.mbuf_read_dma_low_water = 17466 DEFAULT_MB_RDMA_LOW_WATER_5705; 17467 tp->bufmgr_config.mbuf_mac_rx_low_water = 17468 DEFAULT_MB_MACRX_LOW_WATER_57765; 17469 tp->bufmgr_config.mbuf_high_water = 17470 DEFAULT_MB_HIGH_WATER_57765; 17471 17472 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17473 DEFAULT_MB_RDMA_LOW_WATER_5705; 17474 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17475 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; 17476 tp->bufmgr_config.mbuf_high_water_jumbo = 17477 DEFAULT_MB_HIGH_WATER_JUMBO_57765; 17478 } else if (tg3_flag(tp, 5705_PLUS)) { 17479 tp->bufmgr_config.mbuf_read_dma_low_water = 17480 DEFAULT_MB_RDMA_LOW_WATER_5705; 17481 tp->bufmgr_config.mbuf_mac_rx_low_water = 17482 DEFAULT_MB_MACRX_LOW_WATER_5705; 17483 tp->bufmgr_config.mbuf_high_water = 17484 DEFAULT_MB_HIGH_WATER_5705; 17485 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 17486 tp->bufmgr_config.mbuf_mac_rx_low_water = 17487 DEFAULT_MB_MACRX_LOW_WATER_5906; 17488 tp->bufmgr_config.mbuf_high_water = 17489 DEFAULT_MB_HIGH_WATER_5906; 17490 } 17491 17492 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17493 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; 17494 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17495 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; 17496 tp->bufmgr_config.mbuf_high_water_jumbo = 17497 DEFAULT_MB_HIGH_WATER_JUMBO_5780; 17498 } else { 17499 tp->bufmgr_config.mbuf_read_dma_low_water = 17500 DEFAULT_MB_RDMA_LOW_WATER; 17501 tp->bufmgr_config.mbuf_mac_rx_low_water = 17502 DEFAULT_MB_MACRX_LOW_WATER; 17503 tp->bufmgr_config.mbuf_high_water = 17504 DEFAULT_MB_HIGH_WATER; 17505 17506 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17507 DEFAULT_MB_RDMA_LOW_WATER_JUMBO; 17508 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17509 DEFAULT_MB_MACRX_LOW_WATER_JUMBO; 17510 tp->bufmgr_config.mbuf_high_water_jumbo = 17511 DEFAULT_MB_HIGH_WATER_JUMBO; 17512 } 17513 17514 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; 17515 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; 17516 } 17517 17518 static char *tg3_phy_string(struct tg3 *tp) 17519 { 17520 switch (tp->phy_id & TG3_PHY_ID_MASK) { 17521 case TG3_PHY_ID_BCM5400: return "5400"; 17522 case TG3_PHY_ID_BCM5401: return "5401"; 17523 case TG3_PHY_ID_BCM5411: return "5411"; 17524 case TG3_PHY_ID_BCM5701: return "5701"; 17525 case TG3_PHY_ID_BCM5703: return "5703"; 17526 case TG3_PHY_ID_BCM5704: return "5704"; 17527 case TG3_PHY_ID_BCM5705: return "5705"; 17528 case TG3_PHY_ID_BCM5750: return "5750"; 17529 case TG3_PHY_ID_BCM5752: return "5752"; 17530 case TG3_PHY_ID_BCM5714: return "5714"; 17531 case TG3_PHY_ID_BCM5780: return "5780"; 17532 case TG3_PHY_ID_BCM5755: return "5755"; 17533 case TG3_PHY_ID_BCM5787: return "5787"; 17534 case TG3_PHY_ID_BCM5784: return "5784"; 17535 case TG3_PHY_ID_BCM5756: return "5722/5756"; 17536 case TG3_PHY_ID_BCM5906: return "5906"; 17537 case TG3_PHY_ID_BCM5761: return "5761"; 17538 case TG3_PHY_ID_BCM5718C: return "5718C"; 17539 case TG3_PHY_ID_BCM5718S: return "5718S"; 17540 case TG3_PHY_ID_BCM57765: return "57765"; 17541 case TG3_PHY_ID_BCM5719C: return "5719C"; 17542 case TG3_PHY_ID_BCM5720C: return "5720C"; 17543 case TG3_PHY_ID_BCM5762: return "5762C"; 17544 case TG3_PHY_ID_BCM8002: return "8002/serdes"; 17545 case 0: return "serdes"; 17546 default: return "unknown"; 17547 } 17548 } 17549 17550 static char *tg3_bus_string(struct tg3 *tp, char *str) 17551 { 17552 if (tg3_flag(tp, PCI_EXPRESS)) { 17553 strcpy(str, "PCI Express"); 17554 return str; 17555 } else if (tg3_flag(tp, PCIX_MODE)) { 17556 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; 17557 17558 strcpy(str, "PCIX:"); 17559 17560 if ((clock_ctrl == 7) || 17561 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == 17562 GRC_MISC_CFG_BOARD_ID_5704CIOBE)) 17563 strcat(str, "133MHz"); 17564 else if (clock_ctrl == 0) 17565 strcat(str, "33MHz"); 17566 else if (clock_ctrl == 2) 17567 strcat(str, "50MHz"); 17568 else if (clock_ctrl == 4) 17569 strcat(str, "66MHz"); 17570 else if (clock_ctrl == 6) 17571 strcat(str, "100MHz"); 17572 } else { 17573 strcpy(str, "PCI:"); 17574 if (tg3_flag(tp, PCI_HIGH_SPEED)) 17575 strcat(str, "66MHz"); 17576 else 17577 strcat(str, "33MHz"); 17578 } 17579 if (tg3_flag(tp, PCI_32BIT)) 17580 strcat(str, ":32-bit"); 17581 else 17582 strcat(str, ":64-bit"); 17583 return str; 17584 } 17585 17586 static void tg3_init_coal(struct tg3 *tp) 17587 { 17588 struct ethtool_coalesce *ec = &tp->coal; 17589 17590 memset(ec, 0, sizeof(*ec)); 17591 ec->cmd = ETHTOOL_GCOALESCE; 17592 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; 17593 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; 17594 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; 17595 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; 17596 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; 17597 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; 17598 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; 17599 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; 17600 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; 17601 17602 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | 17603 HOSTCC_MODE_CLRTICK_TXBD)) { 17604 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; 17605 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; 17606 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; 17607 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; 17608 } 17609 17610 if (tg3_flag(tp, 5705_PLUS)) { 17611 ec->rx_coalesce_usecs_irq = 0; 17612 ec->tx_coalesce_usecs_irq = 0; 17613 ec->stats_block_coalesce_usecs = 0; 17614 } 17615 } 17616 17617 static int tg3_init_one(struct pci_dev *pdev, 17618 const struct pci_device_id *ent) 17619 { 17620 struct net_device *dev; 17621 struct tg3 *tp; 17622 int i, err; 17623 u32 sndmbx, rcvmbx, intmbx; 17624 char str[40]; 17625 u64 dma_mask, persist_dma_mask; 17626 netdev_features_t features = 0; 17627 17628 printk_once(KERN_INFO "%s\n", version); 17629 17630 err = pci_enable_device(pdev); 17631 if (err) { 17632 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 17633 return err; 17634 } 17635 17636 err = pci_request_regions(pdev, DRV_MODULE_NAME); 17637 if (err) { 17638 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 17639 goto err_out_disable_pdev; 17640 } 17641 17642 pci_set_master(pdev); 17643 17644 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); 17645 if (!dev) { 17646 err = -ENOMEM; 17647 goto err_out_free_res; 17648 } 17649 17650 SET_NETDEV_DEV(dev, &pdev->dev); 17651 17652 tp = netdev_priv(dev); 17653 tp->pdev = pdev; 17654 tp->dev = dev; 17655 tp->rx_mode = TG3_DEF_RX_MODE; 17656 tp->tx_mode = TG3_DEF_TX_MODE; 17657 tp->irq_sync = 1; 17658 tp->pcierr_recovery = false; 17659 17660 if (tg3_debug > 0) 17661 tp->msg_enable = tg3_debug; 17662 else 17663 tp->msg_enable = TG3_DEF_MSG_ENABLE; 17664 17665 if (pdev_is_ssb_gige_core(pdev)) { 17666 tg3_flag_set(tp, IS_SSB_CORE); 17667 if (ssb_gige_must_flush_posted_writes(pdev)) 17668 tg3_flag_set(tp, FLUSH_POSTED_WRITES); 17669 if (ssb_gige_one_dma_at_once(pdev)) 17670 tg3_flag_set(tp, ONE_DMA_AT_ONCE); 17671 if (ssb_gige_have_roboswitch(pdev)) { 17672 tg3_flag_set(tp, USE_PHYLIB); 17673 tg3_flag_set(tp, ROBOSWITCH); 17674 } 17675 if (ssb_gige_is_rgmii(pdev)) 17676 tg3_flag_set(tp, RGMII_MODE); 17677 } 17678 17679 /* The word/byte swap controls here control register access byte 17680 * swapping. DMA data byte swapping is controlled in the GRC_MODE 17681 * setting below. 17682 */ 17683 tp->misc_host_ctrl = 17684 MISC_HOST_CTRL_MASK_PCI_INT | 17685 MISC_HOST_CTRL_WORD_SWAP | 17686 MISC_HOST_CTRL_INDIR_ACCESS | 17687 MISC_HOST_CTRL_PCISTATE_RW; 17688 17689 /* The NONFRM (non-frame) byte/word swap controls take effect 17690 * on descriptor entries, anything which isn't packet data. 17691 * 17692 * The StrongARM chips on the board (one for tx, one for rx) 17693 * are running in big-endian mode. 17694 */ 17695 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | 17696 GRC_MODE_WSWAP_NONFRM_DATA); 17697 #ifdef __BIG_ENDIAN 17698 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; 17699 #endif 17700 spin_lock_init(&tp->lock); 17701 spin_lock_init(&tp->indirect_lock); 17702 INIT_WORK(&tp->reset_task, tg3_reset_task); 17703 17704 tp->regs = pci_ioremap_bar(pdev, BAR_0); 17705 if (!tp->regs) { 17706 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 17707 err = -ENOMEM; 17708 goto err_out_free_dev; 17709 } 17710 17711 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 17712 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || 17713 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || 17714 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || 17715 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 17716 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 17717 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 17718 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 17719 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 17720 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 17721 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 17722 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 17723 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 17724 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 17725 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) { 17726 tg3_flag_set(tp, ENABLE_APE); 17727 tp->aperegs = pci_ioremap_bar(pdev, BAR_2); 17728 if (!tp->aperegs) { 17729 dev_err(&pdev->dev, 17730 "Cannot map APE registers, aborting\n"); 17731 err = -ENOMEM; 17732 goto err_out_iounmap; 17733 } 17734 } 17735 17736 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 17737 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 17738 17739 dev->ethtool_ops = &tg3_ethtool_ops; 17740 dev->watchdog_timeo = TG3_TX_TIMEOUT; 17741 dev->netdev_ops = &tg3_netdev_ops; 17742 dev->irq = pdev->irq; 17743 17744 err = tg3_get_invariants(tp, ent); 17745 if (err) { 17746 dev_err(&pdev->dev, 17747 "Problem fetching invariants of chip, aborting\n"); 17748 goto err_out_apeunmap; 17749 } 17750 17751 /* The EPB bridge inside 5714, 5715, and 5780 and any 17752 * device behind the EPB cannot support DMA addresses > 40-bit. 17753 * On 64-bit systems with IOMMU, use 40-bit dma_mask. 17754 * On 64-bit systems without IOMMU, use 64-bit dma_mask and 17755 * do DMA address check in tg3_start_xmit(). 17756 */ 17757 if (tg3_flag(tp, IS_5788)) 17758 persist_dma_mask = dma_mask = DMA_BIT_MASK(32); 17759 else if (tg3_flag(tp, 40BIT_DMA_BUG)) { 17760 persist_dma_mask = dma_mask = DMA_BIT_MASK(40); 17761 #ifdef CONFIG_HIGHMEM 17762 dma_mask = DMA_BIT_MASK(64); 17763 #endif 17764 } else 17765 persist_dma_mask = dma_mask = DMA_BIT_MASK(64); 17766 17767 /* Configure DMA attributes. */ 17768 if (dma_mask > DMA_BIT_MASK(32)) { 17769 err = pci_set_dma_mask(pdev, dma_mask); 17770 if (!err) { 17771 features |= NETIF_F_HIGHDMA; 17772 err = pci_set_consistent_dma_mask(pdev, 17773 persist_dma_mask); 17774 if (err < 0) { 17775 dev_err(&pdev->dev, "Unable to obtain 64 bit " 17776 "DMA for consistent allocations\n"); 17777 goto err_out_apeunmap; 17778 } 17779 } 17780 } 17781 if (err || dma_mask == DMA_BIT_MASK(32)) { 17782 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 17783 if (err) { 17784 dev_err(&pdev->dev, 17785 "No usable DMA configuration, aborting\n"); 17786 goto err_out_apeunmap; 17787 } 17788 } 17789 17790 tg3_init_bufmgr_config(tp); 17791 17792 /* 5700 B0 chips do not support checksumming correctly due 17793 * to hardware bugs. 17794 */ 17795 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) { 17796 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 17797 17798 if (tg3_flag(tp, 5755_PLUS)) 17799 features |= NETIF_F_IPV6_CSUM; 17800 } 17801 17802 /* TSO is on by default on chips that support hardware TSO. 17803 * Firmware TSO on older chips gives lower performance, so it 17804 * is off by default, but can be enabled using ethtool. 17805 */ 17806 if ((tg3_flag(tp, HW_TSO_1) || 17807 tg3_flag(tp, HW_TSO_2) || 17808 tg3_flag(tp, HW_TSO_3)) && 17809 (features & NETIF_F_IP_CSUM)) 17810 features |= NETIF_F_TSO; 17811 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { 17812 if (features & NETIF_F_IPV6_CSUM) 17813 features |= NETIF_F_TSO6; 17814 if (tg3_flag(tp, HW_TSO_3) || 17815 tg3_asic_rev(tp) == ASIC_REV_5761 || 17816 (tg3_asic_rev(tp) == ASIC_REV_5784 && 17817 tg3_chip_rev(tp) != CHIPREV_5784_AX) || 17818 tg3_asic_rev(tp) == ASIC_REV_5785 || 17819 tg3_asic_rev(tp) == ASIC_REV_57780) 17820 features |= NETIF_F_TSO_ECN; 17821 } 17822 17823 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | 17824 NETIF_F_HW_VLAN_CTAG_RX; 17825 dev->vlan_features |= features; 17826 17827 /* 17828 * Add loopback capability only for a subset of devices that support 17829 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY 17830 * loopback for the remaining devices. 17831 */ 17832 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 17833 !tg3_flag(tp, CPMU_PRESENT)) 17834 /* Add the loopback capability */ 17835 features |= NETIF_F_LOOPBACK; 17836 17837 dev->hw_features |= features; 17838 dev->priv_flags |= IFF_UNICAST_FLT; 17839 17840 /* MTU range: 60 - 9000 or 1500, depending on hardware */ 17841 dev->min_mtu = TG3_MIN_MTU; 17842 dev->max_mtu = TG3_MAX_MTU(tp); 17843 17844 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 && 17845 !tg3_flag(tp, TSO_CAPABLE) && 17846 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 17847 tg3_flag_set(tp, MAX_RXPEND_64); 17848 tp->rx_pending = 63; 17849 } 17850 17851 err = tg3_get_device_address(tp); 17852 if (err) { 17853 dev_err(&pdev->dev, 17854 "Could not obtain valid ethernet address, aborting\n"); 17855 goto err_out_apeunmap; 17856 } 17857 17858 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; 17859 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; 17860 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; 17861 for (i = 0; i < tp->irq_max; i++) { 17862 struct tg3_napi *tnapi = &tp->napi[i]; 17863 17864 tnapi->tp = tp; 17865 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; 17866 17867 tnapi->int_mbox = intmbx; 17868 if (i <= 4) 17869 intmbx += 0x8; 17870 else 17871 intmbx += 0x4; 17872 17873 tnapi->consmbox = rcvmbx; 17874 tnapi->prodmbox = sndmbx; 17875 17876 if (i) 17877 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); 17878 else 17879 tnapi->coal_now = HOSTCC_MODE_NOW; 17880 17881 if (!tg3_flag(tp, SUPPORT_MSIX)) 17882 break; 17883 17884 /* 17885 * If we support MSIX, we'll be using RSS. If we're using 17886 * RSS, the first vector only handles link interrupts and the 17887 * remaining vectors handle rx and tx interrupts. Reuse the 17888 * mailbox values for the next iteration. The values we setup 17889 * above are still useful for the single vectored mode. 17890 */ 17891 if (!i) 17892 continue; 17893 17894 rcvmbx += 0x8; 17895 17896 if (sndmbx & 0x4) 17897 sndmbx -= 0x4; 17898 else 17899 sndmbx += 0xc; 17900 } 17901 17902 /* 17903 * Reset chip in case UNDI or EFI driver did not shutdown 17904 * DMA self test will enable WDMAC and we'll see (spurious) 17905 * pending DMA on the PCI bus at that point. 17906 */ 17907 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || 17908 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 17909 tg3_full_lock(tp, 0); 17910 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 17911 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 17912 tg3_full_unlock(tp); 17913 } 17914 17915 err = tg3_test_dma(tp); 17916 if (err) { 17917 dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); 17918 goto err_out_apeunmap; 17919 } 17920 17921 tg3_init_coal(tp); 17922 17923 pci_set_drvdata(pdev, dev); 17924 17925 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 17926 tg3_asic_rev(tp) == ASIC_REV_5720 || 17927 tg3_asic_rev(tp) == ASIC_REV_5762) 17928 tg3_flag_set(tp, PTP_CAPABLE); 17929 17930 tg3_timer_init(tp); 17931 17932 tg3_carrier_off(tp); 17933 17934 err = register_netdev(dev); 17935 if (err) { 17936 dev_err(&pdev->dev, "Cannot register net device, aborting\n"); 17937 goto err_out_apeunmap; 17938 } 17939 17940 if (tg3_flag(tp, PTP_CAPABLE)) { 17941 tg3_ptp_init(tp); 17942 tp->ptp_clock = ptp_clock_register(&tp->ptp_info, 17943 &tp->pdev->dev); 17944 if (IS_ERR(tp->ptp_clock)) 17945 tp->ptp_clock = NULL; 17946 } 17947 17948 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", 17949 tp->board_part_number, 17950 tg3_chip_rev_id(tp), 17951 tg3_bus_string(tp, str), 17952 dev->dev_addr); 17953 17954 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) { 17955 char *ethtype; 17956 17957 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 17958 ethtype = "10/100Base-TX"; 17959 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 17960 ethtype = "1000Base-SX"; 17961 else 17962 ethtype = "10/100/1000Base-T"; 17963 17964 netdev_info(dev, "attached PHY is %s (%s Ethernet) " 17965 "(WireSpeed[%d], EEE[%d])\n", 17966 tg3_phy_string(tp), ethtype, 17967 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, 17968 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); 17969 } 17970 17971 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", 17972 (dev->features & NETIF_F_RXCSUM) != 0, 17973 tg3_flag(tp, USE_LINKCHG_REG) != 0, 17974 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, 17975 tg3_flag(tp, ENABLE_ASF) != 0, 17976 tg3_flag(tp, TSO_CAPABLE) != 0); 17977 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", 17978 tp->dma_rwctrl, 17979 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : 17980 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); 17981 17982 pci_save_state(pdev); 17983 17984 return 0; 17985 17986 err_out_apeunmap: 17987 if (tp->aperegs) { 17988 iounmap(tp->aperegs); 17989 tp->aperegs = NULL; 17990 } 17991 17992 err_out_iounmap: 17993 if (tp->regs) { 17994 iounmap(tp->regs); 17995 tp->regs = NULL; 17996 } 17997 17998 err_out_free_dev: 17999 free_netdev(dev); 18000 18001 err_out_free_res: 18002 pci_release_regions(pdev); 18003 18004 err_out_disable_pdev: 18005 if (pci_is_enabled(pdev)) 18006 pci_disable_device(pdev); 18007 return err; 18008 } 18009 18010 static void tg3_remove_one(struct pci_dev *pdev) 18011 { 18012 struct net_device *dev = pci_get_drvdata(pdev); 18013 18014 if (dev) { 18015 struct tg3 *tp = netdev_priv(dev); 18016 18017 tg3_ptp_fini(tp); 18018 18019 release_firmware(tp->fw); 18020 18021 tg3_reset_task_cancel(tp); 18022 18023 if (tg3_flag(tp, USE_PHYLIB)) { 18024 tg3_phy_fini(tp); 18025 tg3_mdio_fini(tp); 18026 } 18027 18028 unregister_netdev(dev); 18029 if (tp->aperegs) { 18030 iounmap(tp->aperegs); 18031 tp->aperegs = NULL; 18032 } 18033 if (tp->regs) { 18034 iounmap(tp->regs); 18035 tp->regs = NULL; 18036 } 18037 free_netdev(dev); 18038 pci_release_regions(pdev); 18039 pci_disable_device(pdev); 18040 } 18041 } 18042 18043 #ifdef CONFIG_PM_SLEEP 18044 static int tg3_suspend(struct device *device) 18045 { 18046 struct net_device *dev = dev_get_drvdata(device); 18047 struct tg3 *tp = netdev_priv(dev); 18048 int err = 0; 18049 18050 rtnl_lock(); 18051 18052 if (!netif_running(dev)) 18053 goto unlock; 18054 18055 tg3_reset_task_cancel(tp); 18056 tg3_phy_stop(tp); 18057 tg3_netif_stop(tp); 18058 18059 tg3_timer_stop(tp); 18060 18061 tg3_full_lock(tp, 1); 18062 tg3_disable_ints(tp); 18063 tg3_full_unlock(tp); 18064 18065 netif_device_detach(dev); 18066 18067 tg3_full_lock(tp, 0); 18068 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 18069 tg3_flag_clear(tp, INIT_COMPLETE); 18070 tg3_full_unlock(tp); 18071 18072 err = tg3_power_down_prepare(tp); 18073 if (err) { 18074 int err2; 18075 18076 tg3_full_lock(tp, 0); 18077 18078 tg3_flag_set(tp, INIT_COMPLETE); 18079 err2 = tg3_restart_hw(tp, true); 18080 if (err2) 18081 goto out; 18082 18083 tg3_timer_start(tp); 18084 18085 netif_device_attach(dev); 18086 tg3_netif_start(tp); 18087 18088 out: 18089 tg3_full_unlock(tp); 18090 18091 if (!err2) 18092 tg3_phy_start(tp); 18093 } 18094 18095 unlock: 18096 rtnl_unlock(); 18097 return err; 18098 } 18099 18100 static int tg3_resume(struct device *device) 18101 { 18102 struct net_device *dev = dev_get_drvdata(device); 18103 struct tg3 *tp = netdev_priv(dev); 18104 int err = 0; 18105 18106 rtnl_lock(); 18107 18108 if (!netif_running(dev)) 18109 goto unlock; 18110 18111 netif_device_attach(dev); 18112 18113 tg3_full_lock(tp, 0); 18114 18115 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18116 18117 tg3_flag_set(tp, INIT_COMPLETE); 18118 err = tg3_restart_hw(tp, 18119 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); 18120 if (err) 18121 goto out; 18122 18123 tg3_timer_start(tp); 18124 18125 tg3_netif_start(tp); 18126 18127 out: 18128 tg3_full_unlock(tp); 18129 18130 if (!err) 18131 tg3_phy_start(tp); 18132 18133 unlock: 18134 rtnl_unlock(); 18135 return err; 18136 } 18137 #endif /* CONFIG_PM_SLEEP */ 18138 18139 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); 18140 18141 static void tg3_shutdown(struct pci_dev *pdev) 18142 { 18143 struct net_device *dev = pci_get_drvdata(pdev); 18144 struct tg3 *tp = netdev_priv(dev); 18145 18146 rtnl_lock(); 18147 netif_device_detach(dev); 18148 18149 if (netif_running(dev)) 18150 dev_close(dev); 18151 18152 if (system_state == SYSTEM_POWER_OFF) 18153 tg3_power_down(tp); 18154 18155 rtnl_unlock(); 18156 } 18157 18158 /** 18159 * tg3_io_error_detected - called when PCI error is detected 18160 * @pdev: Pointer to PCI device 18161 * @state: The current pci connection state 18162 * 18163 * This function is called after a PCI bus error affecting 18164 * this device has been detected. 18165 */ 18166 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, 18167 pci_channel_state_t state) 18168 { 18169 struct net_device *netdev = pci_get_drvdata(pdev); 18170 struct tg3 *tp = netdev_priv(netdev); 18171 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; 18172 18173 netdev_info(netdev, "PCI I/O error detected\n"); 18174 18175 rtnl_lock(); 18176 18177 /* We probably don't have netdev yet */ 18178 if (!netdev || !netif_running(netdev)) 18179 goto done; 18180 18181 /* We needn't recover from permanent error */ 18182 if (state == pci_channel_io_frozen) 18183 tp->pcierr_recovery = true; 18184 18185 tg3_phy_stop(tp); 18186 18187 tg3_netif_stop(tp); 18188 18189 tg3_timer_stop(tp); 18190 18191 /* Want to make sure that the reset task doesn't run */ 18192 tg3_reset_task_cancel(tp); 18193 18194 netif_device_detach(netdev); 18195 18196 /* Clean up software state, even if MMIO is blocked */ 18197 tg3_full_lock(tp, 0); 18198 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 18199 tg3_full_unlock(tp); 18200 18201 done: 18202 if (state == pci_channel_io_perm_failure) { 18203 if (netdev) { 18204 tg3_napi_enable(tp); 18205 dev_close(netdev); 18206 } 18207 err = PCI_ERS_RESULT_DISCONNECT; 18208 } else { 18209 pci_disable_device(pdev); 18210 } 18211 18212 rtnl_unlock(); 18213 18214 return err; 18215 } 18216 18217 /** 18218 * tg3_io_slot_reset - called after the pci bus has been reset. 18219 * @pdev: Pointer to PCI device 18220 * 18221 * Restart the card from scratch, as if from a cold-boot. 18222 * At this point, the card has exprienced a hard reset, 18223 * followed by fixups by BIOS, and has its config space 18224 * set up identically to what it was at cold boot. 18225 */ 18226 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) 18227 { 18228 struct net_device *netdev = pci_get_drvdata(pdev); 18229 struct tg3 *tp = netdev_priv(netdev); 18230 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 18231 int err; 18232 18233 rtnl_lock(); 18234 18235 if (pci_enable_device(pdev)) { 18236 dev_err(&pdev->dev, 18237 "Cannot re-enable PCI device after reset.\n"); 18238 goto done; 18239 } 18240 18241 pci_set_master(pdev); 18242 pci_restore_state(pdev); 18243 pci_save_state(pdev); 18244 18245 if (!netdev || !netif_running(netdev)) { 18246 rc = PCI_ERS_RESULT_RECOVERED; 18247 goto done; 18248 } 18249 18250 err = tg3_power_up(tp); 18251 if (err) 18252 goto done; 18253 18254 rc = PCI_ERS_RESULT_RECOVERED; 18255 18256 done: 18257 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { 18258 tg3_napi_enable(tp); 18259 dev_close(netdev); 18260 } 18261 rtnl_unlock(); 18262 18263 return rc; 18264 } 18265 18266 /** 18267 * tg3_io_resume - called when traffic can start flowing again. 18268 * @pdev: Pointer to PCI device 18269 * 18270 * This callback is called when the error recovery driver tells 18271 * us that its OK to resume normal operation. 18272 */ 18273 static void tg3_io_resume(struct pci_dev *pdev) 18274 { 18275 struct net_device *netdev = pci_get_drvdata(pdev); 18276 struct tg3 *tp = netdev_priv(netdev); 18277 int err; 18278 18279 rtnl_lock(); 18280 18281 if (!netdev || !netif_running(netdev)) 18282 goto done; 18283 18284 tg3_full_lock(tp, 0); 18285 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18286 tg3_flag_set(tp, INIT_COMPLETE); 18287 err = tg3_restart_hw(tp, true); 18288 if (err) { 18289 tg3_full_unlock(tp); 18290 netdev_err(netdev, "Cannot restart hardware after reset.\n"); 18291 goto done; 18292 } 18293 18294 netif_device_attach(netdev); 18295 18296 tg3_timer_start(tp); 18297 18298 tg3_netif_start(tp); 18299 18300 tg3_full_unlock(tp); 18301 18302 tg3_phy_start(tp); 18303 18304 done: 18305 tp->pcierr_recovery = false; 18306 rtnl_unlock(); 18307 } 18308 18309 static const struct pci_error_handlers tg3_err_handler = { 18310 .error_detected = tg3_io_error_detected, 18311 .slot_reset = tg3_io_slot_reset, 18312 .resume = tg3_io_resume 18313 }; 18314 18315 static struct pci_driver tg3_driver = { 18316 .name = DRV_MODULE_NAME, 18317 .id_table = tg3_pci_tbl, 18318 .probe = tg3_init_one, 18319 .remove = tg3_remove_one, 18320 .err_handler = &tg3_err_handler, 18321 .driver.pm = &tg3_pm_ops, 18322 .shutdown = tg3_shutdown, 18323 }; 18324 18325 module_pci_driver(tg3_driver); 18326