1 /* 2 * tg3.c: Broadcom Tigon3 ethernet driver. 3 * 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 6 * Copyright (C) 2004 Sun Microsystems Inc. 7 * Copyright (C) 2005-2016 Broadcom Corporation. 8 * Copyright (C) 2016-2017 Broadcom Limited. 9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" 10 * refers to Broadcom Inc. and/or its subsidiaries. 11 * 12 * Firmware is: 13 * Derived from proprietary unpublished source code, 14 * Copyright (C) 2000-2016 Broadcom Corporation. 15 * Copyright (C) 2016-2017 Broadcom Ltd. 16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" 17 * refers to Broadcom Inc. and/or its subsidiaries. 18 * 19 * Permission is hereby granted for the distribution of this firmware 20 * data in hexadecimal or equivalent format, provided this copyright 21 * notice is accompanying it. 22 */ 23 24 25 #include <linux/module.h> 26 #include <linux/moduleparam.h> 27 #include <linux/stringify.h> 28 #include <linux/kernel.h> 29 #include <linux/sched/signal.h> 30 #include <linux/types.h> 31 #include <linux/compiler.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/in.h> 35 #include <linux/interrupt.h> 36 #include <linux/ioport.h> 37 #include <linux/pci.h> 38 #include <linux/netdevice.h> 39 #include <linux/etherdevice.h> 40 #include <linux/skbuff.h> 41 #include <linux/ethtool.h> 42 #include <linux/mdio.h> 43 #include <linux/mii.h> 44 #include <linux/phy.h> 45 #include <linux/brcmphy.h> 46 #include <linux/if.h> 47 #include <linux/if_vlan.h> 48 #include <linux/ip.h> 49 #include <linux/tcp.h> 50 #include <linux/workqueue.h> 51 #include <linux/prefetch.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/firmware.h> 54 #include <linux/ssb/ssb_driver_gige.h> 55 #include <linux/hwmon.h> 56 #include <linux/hwmon-sysfs.h> 57 #include <linux/crc32poly.h> 58 59 #include <net/checksum.h> 60 #include <net/ip.h> 61 62 #include <linux/io.h> 63 #include <asm/byteorder.h> 64 #include <linux/uaccess.h> 65 66 #include <uapi/linux/net_tstamp.h> 67 #include <linux/ptp_clock_kernel.h> 68 69 #define BAR_0 0 70 #define BAR_2 2 71 72 #include "tg3.h" 73 74 /* Functions & macros to verify TG3_FLAGS types */ 75 76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) 77 { 78 return test_bit(flag, bits); 79 } 80 81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) 82 { 83 set_bit(flag, bits); 84 } 85 86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) 87 { 88 clear_bit(flag, bits); 89 } 90 91 #define tg3_flag(tp, flag) \ 92 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) 93 #define tg3_flag_set(tp, flag) \ 94 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) 95 #define tg3_flag_clear(tp, flag) \ 96 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) 97 98 #define DRV_MODULE_NAME "tg3" 99 #define TG3_MAJ_NUM 3 100 #define TG3_MIN_NUM 137 101 #define DRV_MODULE_VERSION \ 102 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 103 #define DRV_MODULE_RELDATE "May 11, 2014" 104 105 #define RESET_KIND_SHUTDOWN 0 106 #define RESET_KIND_INIT 1 107 #define RESET_KIND_SUSPEND 2 108 109 #define TG3_DEF_RX_MODE 0 110 #define TG3_DEF_TX_MODE 0 111 #define TG3_DEF_MSG_ENABLE \ 112 (NETIF_MSG_DRV | \ 113 NETIF_MSG_PROBE | \ 114 NETIF_MSG_LINK | \ 115 NETIF_MSG_TIMER | \ 116 NETIF_MSG_IFDOWN | \ 117 NETIF_MSG_IFUP | \ 118 NETIF_MSG_RX_ERR | \ 119 NETIF_MSG_TX_ERR) 120 121 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100 122 123 /* length of time before we decide the hardware is borked, 124 * and dev->tx_timeout() should be called to fix the problem 125 */ 126 127 #define TG3_TX_TIMEOUT (5 * HZ) 128 129 /* hardware minimum and maximum for a single frame's data payload */ 130 #define TG3_MIN_MTU ETH_ZLEN 131 #define TG3_MAX_MTU(tp) \ 132 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) 133 134 /* These numbers seem to be hard coded in the NIC firmware somehow. 135 * You can't change the ring sizes, but you can change where you place 136 * them in the NIC onboard memory. 137 */ 138 #define TG3_RX_STD_RING_SIZE(tp) \ 139 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 140 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) 141 #define TG3_DEF_RX_RING_PENDING 200 142 #define TG3_RX_JMB_RING_SIZE(tp) \ 143 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 144 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) 145 #define TG3_DEF_RX_JUMBO_RING_PENDING 100 146 147 /* Do not place this n-ring entries value into the tp struct itself, 148 * we really want to expose these constants to GCC so that modulo et 149 * al. operations are done with shifts and masks instead of with 150 * hw multiply/modulo instructions. Another solution would be to 151 * replace things like '% foo' with '& (foo - 1)'. 152 */ 153 154 #define TG3_TX_RING_SIZE 512 155 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) 156 157 #define TG3_RX_STD_RING_BYTES(tp) \ 158 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) 159 #define TG3_RX_JMB_RING_BYTES(tp) \ 160 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) 161 #define TG3_RX_RCB_RING_BYTES(tp) \ 162 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) 163 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 164 TG3_TX_RING_SIZE) 165 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 166 167 #define TG3_DMA_BYTE_ENAB 64 168 169 #define TG3_RX_STD_DMA_SZ 1536 170 #define TG3_RX_JMB_DMA_SZ 9046 171 172 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) 173 174 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) 175 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 176 177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \ 178 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) 179 180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ 181 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) 182 183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses 184 * that are at least dword aligned when used in PCIX mode. The driver 185 * works around this bug by double copying the packet. This workaround 186 * is built into the normal double copy length check for efficiency. 187 * 188 * However, the double copy is only necessary on those architectures 189 * where unaligned memory accesses are inefficient. For those architectures 190 * where unaligned memory accesses incur little penalty, we can reintegrate 191 * the 5701 in the normal rx path. Doing so saves a device structure 192 * dereference by hardcoding the double copy threshold in place. 193 */ 194 #define TG3_RX_COPY_THRESHOLD 256 195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 196 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD 197 #else 198 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) 199 #endif 200 201 #if (NET_IP_ALIGN != 0) 202 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset) 203 #else 204 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD) 205 #endif 206 207 /* minimum number of free TX descriptors required to wake up TX process */ 208 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 209 #define TG3_TX_BD_DMA_MAX_2K 2048 210 #define TG3_TX_BD_DMA_MAX_4K 4096 211 212 #define TG3_RAW_IP_ALIGN 2 213 214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3) 215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1) 216 217 #define TG3_FW_UPDATE_TIMEOUT_SEC 5 218 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2) 219 220 #define FIRMWARE_TG3 "tigon/tg3.bin" 221 #define FIRMWARE_TG357766 "tigon/tg357766.bin" 222 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" 223 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" 224 225 static char version[] = 226 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; 227 228 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); 229 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); 230 MODULE_LICENSE("GPL"); 231 MODULE_VERSION(DRV_MODULE_VERSION); 232 MODULE_FIRMWARE(FIRMWARE_TG3); 233 MODULE_FIRMWARE(FIRMWARE_TG3TSO); 234 MODULE_FIRMWARE(FIRMWARE_TG3TSO5); 235 236 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ 237 module_param(tg3_debug, int, 0); 238 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 239 240 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001 241 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002 242 243 static const struct pci_device_id tg3_pci_tbl[] = { 244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, 245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, 246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, 247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, 248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, 249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, 250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, 251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, 252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, 253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, 254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, 255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, 256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, 257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, 258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, 259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, 260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, 261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, 262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901), 263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 264 TG3_DRV_DATA_FLAG_5705_10_100}, 265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2), 266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 267 TG3_DRV_DATA_FLAG_5705_10_100}, 268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, 269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F), 270 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 271 TG3_DRV_DATA_FLAG_5705_10_100}, 272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, 273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, 274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, 275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, 276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, 277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F), 278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, 280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, 281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, 282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, 283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F), 284 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, 286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, 287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, 288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, 289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, 290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, 291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, 292 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M, 293 PCI_VENDOR_ID_LENOVO, 294 TG3PCI_SUBDEVICE_ID_LENOVO_5787M), 295 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, 297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F), 298 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, 300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, 301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, 302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, 303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, 304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, 305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, 306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, 307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, 308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, 309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, 310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, 311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, 312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, 314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, 315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, 316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, 317 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 318 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A), 319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 320 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 321 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B), 322 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, 324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, 325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790), 326 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, 329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)}, 330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, 331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, 332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, 333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, 334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, 335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791), 336 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795), 338 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, 340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, 341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)}, 342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)}, 343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)}, 344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)}, 345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)}, 346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)}, 347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)}, 348 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)}, 349 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)}, 350 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)}, 351 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 352 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 354 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, 355 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, 356 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, 357 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, 358 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ 359 {} 360 }; 361 362 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); 363 364 static const struct { 365 const char string[ETH_GSTRING_LEN]; 366 } ethtool_stats_keys[] = { 367 { "rx_octets" }, 368 { "rx_fragments" }, 369 { "rx_ucast_packets" }, 370 { "rx_mcast_packets" }, 371 { "rx_bcast_packets" }, 372 { "rx_fcs_errors" }, 373 { "rx_align_errors" }, 374 { "rx_xon_pause_rcvd" }, 375 { "rx_xoff_pause_rcvd" }, 376 { "rx_mac_ctrl_rcvd" }, 377 { "rx_xoff_entered" }, 378 { "rx_frame_too_long_errors" }, 379 { "rx_jabbers" }, 380 { "rx_undersize_packets" }, 381 { "rx_in_length_errors" }, 382 { "rx_out_length_errors" }, 383 { "rx_64_or_less_octet_packets" }, 384 { "rx_65_to_127_octet_packets" }, 385 { "rx_128_to_255_octet_packets" }, 386 { "rx_256_to_511_octet_packets" }, 387 { "rx_512_to_1023_octet_packets" }, 388 { "rx_1024_to_1522_octet_packets" }, 389 { "rx_1523_to_2047_octet_packets" }, 390 { "rx_2048_to_4095_octet_packets" }, 391 { "rx_4096_to_8191_octet_packets" }, 392 { "rx_8192_to_9022_octet_packets" }, 393 394 { "tx_octets" }, 395 { "tx_collisions" }, 396 397 { "tx_xon_sent" }, 398 { "tx_xoff_sent" }, 399 { "tx_flow_control" }, 400 { "tx_mac_errors" }, 401 { "tx_single_collisions" }, 402 { "tx_mult_collisions" }, 403 { "tx_deferred" }, 404 { "tx_excessive_collisions" }, 405 { "tx_late_collisions" }, 406 { "tx_collide_2times" }, 407 { "tx_collide_3times" }, 408 { "tx_collide_4times" }, 409 { "tx_collide_5times" }, 410 { "tx_collide_6times" }, 411 { "tx_collide_7times" }, 412 { "tx_collide_8times" }, 413 { "tx_collide_9times" }, 414 { "tx_collide_10times" }, 415 { "tx_collide_11times" }, 416 { "tx_collide_12times" }, 417 { "tx_collide_13times" }, 418 { "tx_collide_14times" }, 419 { "tx_collide_15times" }, 420 { "tx_ucast_packets" }, 421 { "tx_mcast_packets" }, 422 { "tx_bcast_packets" }, 423 { "tx_carrier_sense_errors" }, 424 { "tx_discards" }, 425 { "tx_errors" }, 426 427 { "dma_writeq_full" }, 428 { "dma_write_prioq_full" }, 429 { "rxbds_empty" }, 430 { "rx_discards" }, 431 { "rx_errors" }, 432 { "rx_threshold_hit" }, 433 434 { "dma_readq_full" }, 435 { "dma_read_prioq_full" }, 436 { "tx_comp_queue_full" }, 437 438 { "ring_set_send_prod_index" }, 439 { "ring_status_update" }, 440 { "nic_irqs" }, 441 { "nic_avoided_irqs" }, 442 { "nic_tx_threshold_hit" }, 443 444 { "mbuf_lwm_thresh_hit" }, 445 }; 446 447 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) 448 #define TG3_NVRAM_TEST 0 449 #define TG3_LINK_TEST 1 450 #define TG3_REGISTER_TEST 2 451 #define TG3_MEMORY_TEST 3 452 #define TG3_MAC_LOOPB_TEST 4 453 #define TG3_PHY_LOOPB_TEST 5 454 #define TG3_EXT_LOOPB_TEST 6 455 #define TG3_INTERRUPT_TEST 7 456 457 458 static const struct { 459 const char string[ETH_GSTRING_LEN]; 460 } ethtool_test_keys[] = { 461 [TG3_NVRAM_TEST] = { "nvram test (online) " }, 462 [TG3_LINK_TEST] = { "link test (online) " }, 463 [TG3_REGISTER_TEST] = { "register test (offline)" }, 464 [TG3_MEMORY_TEST] = { "memory test (offline)" }, 465 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" }, 466 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" }, 467 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" }, 468 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" }, 469 }; 470 471 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) 472 473 474 static void tg3_write32(struct tg3 *tp, u32 off, u32 val) 475 { 476 writel(val, tp->regs + off); 477 } 478 479 static u32 tg3_read32(struct tg3 *tp, u32 off) 480 { 481 return readl(tp->regs + off); 482 } 483 484 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) 485 { 486 writel(val, tp->aperegs + off); 487 } 488 489 static u32 tg3_ape_read32(struct tg3 *tp, u32 off) 490 { 491 return readl(tp->aperegs + off); 492 } 493 494 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 495 { 496 unsigned long flags; 497 498 spin_lock_irqsave(&tp->indirect_lock, flags); 499 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 500 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 501 spin_unlock_irqrestore(&tp->indirect_lock, flags); 502 } 503 504 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) 505 { 506 writel(val, tp->regs + off); 507 readl(tp->regs + off); 508 } 509 510 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) 511 { 512 unsigned long flags; 513 u32 val; 514 515 spin_lock_irqsave(&tp->indirect_lock, flags); 516 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 517 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 518 spin_unlock_irqrestore(&tp->indirect_lock, flags); 519 return val; 520 } 521 522 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) 523 { 524 unsigned long flags; 525 526 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { 527 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + 528 TG3_64BIT_REG_LOW, val); 529 return; 530 } 531 if (off == TG3_RX_STD_PROD_IDX_REG) { 532 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + 533 TG3_64BIT_REG_LOW, val); 534 return; 535 } 536 537 spin_lock_irqsave(&tp->indirect_lock, flags); 538 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 539 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 540 spin_unlock_irqrestore(&tp->indirect_lock, flags); 541 542 /* In indirect mode when disabling interrupts, we also need 543 * to clear the interrupt bit in the GRC local ctrl register. 544 */ 545 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && 546 (val == 0x1)) { 547 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, 548 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); 549 } 550 } 551 552 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) 553 { 554 unsigned long flags; 555 u32 val; 556 557 spin_lock_irqsave(&tp->indirect_lock, flags); 558 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 559 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 560 spin_unlock_irqrestore(&tp->indirect_lock, flags); 561 return val; 562 } 563 564 /* usec_wait specifies the wait time in usec when writing to certain registers 565 * where it is unsafe to read back the register without some delay. 566 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. 567 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. 568 */ 569 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) 570 { 571 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) 572 /* Non-posted methods */ 573 tp->write32(tp, off, val); 574 else { 575 /* Posted method */ 576 tg3_write32(tp, off, val); 577 if (usec_wait) 578 udelay(usec_wait); 579 tp->read32(tp, off); 580 } 581 /* Wait again after the read for the posted method to guarantee that 582 * the wait time is met. 583 */ 584 if (usec_wait) 585 udelay(usec_wait); 586 } 587 588 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) 589 { 590 tp->write32_mbox(tp, off, val); 591 if (tg3_flag(tp, FLUSH_POSTED_WRITES) || 592 (!tg3_flag(tp, MBOX_WRITE_REORDER) && 593 !tg3_flag(tp, ICH_WORKAROUND))) 594 tp->read32_mbox(tp, off); 595 } 596 597 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) 598 { 599 void __iomem *mbox = tp->regs + off; 600 writel(val, mbox); 601 if (tg3_flag(tp, TXD_MBOX_HWBUG)) 602 writel(val, mbox); 603 if (tg3_flag(tp, MBOX_WRITE_REORDER) || 604 tg3_flag(tp, FLUSH_POSTED_WRITES)) 605 readl(mbox); 606 } 607 608 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) 609 { 610 return readl(tp->regs + off + GRCMBOX_BASE); 611 } 612 613 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) 614 { 615 writel(val, tp->regs + off + GRCMBOX_BASE); 616 } 617 618 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) 619 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) 620 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) 621 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) 622 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg) 623 624 #define tw32(reg, val) tp->write32(tp, reg, val) 625 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) 626 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) 627 #define tr32(reg) tp->read32(tp, reg) 628 629 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 630 { 631 unsigned long flags; 632 633 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 634 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) 635 return; 636 637 spin_lock_irqsave(&tp->indirect_lock, flags); 638 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 639 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 640 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 641 642 /* Always leave this as zero. */ 643 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 644 } else { 645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 646 tw32_f(TG3PCI_MEM_WIN_DATA, val); 647 648 /* Always leave this as zero. */ 649 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 650 } 651 spin_unlock_irqrestore(&tp->indirect_lock, flags); 652 } 653 654 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 655 { 656 unsigned long flags; 657 658 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 659 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { 660 *val = 0; 661 return; 662 } 663 664 spin_lock_irqsave(&tp->indirect_lock, flags); 665 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 666 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 667 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 668 669 /* Always leave this as zero. */ 670 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 671 } else { 672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 673 *val = tr32(TG3PCI_MEM_WIN_DATA); 674 675 /* Always leave this as zero. */ 676 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 677 } 678 spin_unlock_irqrestore(&tp->indirect_lock, flags); 679 } 680 681 static void tg3_ape_lock_init(struct tg3 *tp) 682 { 683 int i; 684 u32 regbase, bit; 685 686 if (tg3_asic_rev(tp) == ASIC_REV_5761) 687 regbase = TG3_APE_LOCK_GRANT; 688 else 689 regbase = TG3_APE_PER_LOCK_GRANT; 690 691 /* Make sure the driver hasn't any stale locks. */ 692 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) { 693 switch (i) { 694 case TG3_APE_LOCK_PHY0: 695 case TG3_APE_LOCK_PHY1: 696 case TG3_APE_LOCK_PHY2: 697 case TG3_APE_LOCK_PHY3: 698 bit = APE_LOCK_GRANT_DRIVER; 699 break; 700 default: 701 if (!tp->pci_fn) 702 bit = APE_LOCK_GRANT_DRIVER; 703 else 704 bit = 1 << tp->pci_fn; 705 } 706 tg3_ape_write32(tp, regbase + 4 * i, bit); 707 } 708 709 } 710 711 static int tg3_ape_lock(struct tg3 *tp, int locknum) 712 { 713 int i, off; 714 int ret = 0; 715 u32 status, req, gnt, bit; 716 717 if (!tg3_flag(tp, ENABLE_APE)) 718 return 0; 719 720 switch (locknum) { 721 case TG3_APE_LOCK_GPIO: 722 if (tg3_asic_rev(tp) == ASIC_REV_5761) 723 return 0; 724 /* else: fall through */ 725 case TG3_APE_LOCK_GRC: 726 case TG3_APE_LOCK_MEM: 727 if (!tp->pci_fn) 728 bit = APE_LOCK_REQ_DRIVER; 729 else 730 bit = 1 << tp->pci_fn; 731 break; 732 case TG3_APE_LOCK_PHY0: 733 case TG3_APE_LOCK_PHY1: 734 case TG3_APE_LOCK_PHY2: 735 case TG3_APE_LOCK_PHY3: 736 bit = APE_LOCK_REQ_DRIVER; 737 break; 738 default: 739 return -EINVAL; 740 } 741 742 if (tg3_asic_rev(tp) == ASIC_REV_5761) { 743 req = TG3_APE_LOCK_REQ; 744 gnt = TG3_APE_LOCK_GRANT; 745 } else { 746 req = TG3_APE_PER_LOCK_REQ; 747 gnt = TG3_APE_PER_LOCK_GRANT; 748 } 749 750 off = 4 * locknum; 751 752 tg3_ape_write32(tp, req + off, bit); 753 754 /* Wait for up to 1 millisecond to acquire lock. */ 755 for (i = 0; i < 100; i++) { 756 status = tg3_ape_read32(tp, gnt + off); 757 if (status == bit) 758 break; 759 if (pci_channel_offline(tp->pdev)) 760 break; 761 762 udelay(10); 763 } 764 765 if (status != bit) { 766 /* Revoke the lock request. */ 767 tg3_ape_write32(tp, gnt + off, bit); 768 ret = -EBUSY; 769 } 770 771 return ret; 772 } 773 774 static void tg3_ape_unlock(struct tg3 *tp, int locknum) 775 { 776 u32 gnt, bit; 777 778 if (!tg3_flag(tp, ENABLE_APE)) 779 return; 780 781 switch (locknum) { 782 case TG3_APE_LOCK_GPIO: 783 if (tg3_asic_rev(tp) == ASIC_REV_5761) 784 return; 785 /* else: fall through */ 786 case TG3_APE_LOCK_GRC: 787 case TG3_APE_LOCK_MEM: 788 if (!tp->pci_fn) 789 bit = APE_LOCK_GRANT_DRIVER; 790 else 791 bit = 1 << tp->pci_fn; 792 break; 793 case TG3_APE_LOCK_PHY0: 794 case TG3_APE_LOCK_PHY1: 795 case TG3_APE_LOCK_PHY2: 796 case TG3_APE_LOCK_PHY3: 797 bit = APE_LOCK_GRANT_DRIVER; 798 break; 799 default: 800 return; 801 } 802 803 if (tg3_asic_rev(tp) == ASIC_REV_5761) 804 gnt = TG3_APE_LOCK_GRANT; 805 else 806 gnt = TG3_APE_PER_LOCK_GRANT; 807 808 tg3_ape_write32(tp, gnt + 4 * locknum, bit); 809 } 810 811 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us) 812 { 813 u32 apedata; 814 815 while (timeout_us) { 816 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) 817 return -EBUSY; 818 819 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 820 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 821 break; 822 823 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 824 825 udelay(10); 826 timeout_us -= (timeout_us > 10) ? 10 : timeout_us; 827 } 828 829 return timeout_us ? 0 : -EBUSY; 830 } 831 832 #ifdef CONFIG_TIGON3_HWMON 833 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us) 834 { 835 u32 i, apedata; 836 837 for (i = 0; i < timeout_us / 10; i++) { 838 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 839 840 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 841 break; 842 843 udelay(10); 844 } 845 846 return i == timeout_us / 10; 847 } 848 849 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, 850 u32 len) 851 { 852 int err; 853 u32 i, bufoff, msgoff, maxlen, apedata; 854 855 if (!tg3_flag(tp, APE_HAS_NCSI)) 856 return 0; 857 858 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 859 if (apedata != APE_SEG_SIG_MAGIC) 860 return -ENODEV; 861 862 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 863 if (!(apedata & APE_FW_STATUS_READY)) 864 return -EAGAIN; 865 866 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) + 867 TG3_APE_SHMEM_BASE; 868 msgoff = bufoff + 2 * sizeof(u32); 869 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN); 870 871 while (len) { 872 u32 length; 873 874 /* Cap xfer sizes to scratchpad limits. */ 875 length = (len > maxlen) ? maxlen : len; 876 len -= length; 877 878 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 879 if (!(apedata & APE_FW_STATUS_READY)) 880 return -EAGAIN; 881 882 /* Wait for up to 1 msec for APE to service previous event. */ 883 err = tg3_ape_event_lock(tp, 1000); 884 if (err) 885 return err; 886 887 apedata = APE_EVENT_STATUS_DRIVER_EVNT | 888 APE_EVENT_STATUS_SCRTCHPD_READ | 889 APE_EVENT_STATUS_EVENT_PENDING; 890 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata); 891 892 tg3_ape_write32(tp, bufoff, base_off); 893 tg3_ape_write32(tp, bufoff + sizeof(u32), length); 894 895 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 896 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 897 898 base_off += length; 899 900 if (tg3_ape_wait_for_event(tp, 30000)) 901 return -EAGAIN; 902 903 for (i = 0; length; i += 4, length -= 4) { 904 u32 val = tg3_ape_read32(tp, msgoff + i); 905 memcpy(data, &val, sizeof(u32)); 906 data++; 907 } 908 } 909 910 return 0; 911 } 912 #endif 913 914 static int tg3_ape_send_event(struct tg3 *tp, u32 event) 915 { 916 int err; 917 u32 apedata; 918 919 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 920 if (apedata != APE_SEG_SIG_MAGIC) 921 return -EAGAIN; 922 923 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 924 if (!(apedata & APE_FW_STATUS_READY)) 925 return -EAGAIN; 926 927 /* Wait for up to 20 millisecond for APE to service previous event. */ 928 err = tg3_ape_event_lock(tp, 20000); 929 if (err) 930 return err; 931 932 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, 933 event | APE_EVENT_STATUS_EVENT_PENDING); 934 935 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 936 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 937 938 return 0; 939 } 940 941 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) 942 { 943 u32 event; 944 u32 apedata; 945 946 if (!tg3_flag(tp, ENABLE_APE)) 947 return; 948 949 switch (kind) { 950 case RESET_KIND_INIT: 951 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); 952 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 953 APE_HOST_SEG_SIG_MAGIC); 954 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, 955 APE_HOST_SEG_LEN_MAGIC); 956 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); 957 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); 958 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, 959 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); 960 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, 961 APE_HOST_BEHAV_NO_PHYLOCK); 962 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, 963 TG3_APE_HOST_DRVR_STATE_START); 964 965 event = APE_EVENT_STATUS_STATE_START; 966 break; 967 case RESET_KIND_SHUTDOWN: 968 if (device_may_wakeup(&tp->pdev->dev) && 969 tg3_flag(tp, WOL_ENABLE)) { 970 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, 971 TG3_APE_HOST_WOL_SPEED_AUTO); 972 apedata = TG3_APE_HOST_DRVR_STATE_WOL; 973 } else 974 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; 975 976 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); 977 978 event = APE_EVENT_STATUS_STATE_UNLOAD; 979 break; 980 default: 981 return; 982 } 983 984 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; 985 986 tg3_ape_send_event(tp, event); 987 } 988 989 static void tg3_send_ape_heartbeat(struct tg3 *tp, 990 unsigned long interval) 991 { 992 /* Check if hb interval has exceeded */ 993 if (!tg3_flag(tp, ENABLE_APE) || 994 time_before(jiffies, tp->ape_hb_jiffies + interval)) 995 return; 996 997 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); 998 tp->ape_hb_jiffies = jiffies; 999 } 1000 1001 static void tg3_disable_ints(struct tg3 *tp) 1002 { 1003 int i; 1004 1005 tw32(TG3PCI_MISC_HOST_CTRL, 1006 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); 1007 for (i = 0; i < tp->irq_max; i++) 1008 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); 1009 } 1010 1011 static void tg3_enable_ints(struct tg3 *tp) 1012 { 1013 int i; 1014 1015 tp->irq_sync = 0; 1016 wmb(); 1017 1018 tw32(TG3PCI_MISC_HOST_CTRL, 1019 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 1020 1021 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; 1022 for (i = 0; i < tp->irq_cnt; i++) { 1023 struct tg3_napi *tnapi = &tp->napi[i]; 1024 1025 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1026 if (tg3_flag(tp, 1SHOT_MSI)) 1027 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1028 1029 tp->coal_now |= tnapi->coal_now; 1030 } 1031 1032 /* Force an initial interrupt */ 1033 if (!tg3_flag(tp, TAGGED_STATUS) && 1034 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) 1035 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 1036 else 1037 tw32(HOSTCC_MODE, tp->coal_now); 1038 1039 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); 1040 } 1041 1042 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) 1043 { 1044 struct tg3 *tp = tnapi->tp; 1045 struct tg3_hw_status *sblk = tnapi->hw_status; 1046 unsigned int work_exists = 0; 1047 1048 /* check for phy events */ 1049 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 1050 if (sblk->status & SD_STATUS_LINK_CHG) 1051 work_exists = 1; 1052 } 1053 1054 /* check for TX work to do */ 1055 if (sblk->idx[0].tx_consumer != tnapi->tx_cons) 1056 work_exists = 1; 1057 1058 /* check for RX work to do */ 1059 if (tnapi->rx_rcb_prod_idx && 1060 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 1061 work_exists = 1; 1062 1063 return work_exists; 1064 } 1065 1066 /* tg3_int_reenable 1067 * similar to tg3_enable_ints, but it accurately determines whether there 1068 * is new work pending and can return without flushing the PIO write 1069 * which reenables interrupts 1070 */ 1071 static void tg3_int_reenable(struct tg3_napi *tnapi) 1072 { 1073 struct tg3 *tp = tnapi->tp; 1074 1075 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 1076 mmiowb(); 1077 1078 /* When doing tagged status, this work check is unnecessary. 1079 * The last_tag we write above tells the chip which piece of 1080 * work we've completed. 1081 */ 1082 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) 1083 tw32(HOSTCC_MODE, tp->coalesce_mode | 1084 HOSTCC_MODE_ENABLE | tnapi->coal_now); 1085 } 1086 1087 static void tg3_switch_clocks(struct tg3 *tp) 1088 { 1089 u32 clock_ctrl; 1090 u32 orig_clock_ctrl; 1091 1092 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) 1093 return; 1094 1095 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); 1096 1097 orig_clock_ctrl = clock_ctrl; 1098 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | 1099 CLOCK_CTRL_CLKRUN_OENABLE | 1100 0x1f); 1101 tp->pci_clock_ctrl = clock_ctrl; 1102 1103 if (tg3_flag(tp, 5705_PLUS)) { 1104 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { 1105 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1106 clock_ctrl | CLOCK_CTRL_625_CORE, 40); 1107 } 1108 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { 1109 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1110 clock_ctrl | 1111 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), 1112 40); 1113 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1114 clock_ctrl | (CLOCK_CTRL_ALTCLK), 1115 40); 1116 } 1117 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); 1118 } 1119 1120 #define PHY_BUSY_LOOPS 5000 1121 1122 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg, 1123 u32 *val) 1124 { 1125 u32 frame_val; 1126 unsigned int loops; 1127 int ret; 1128 1129 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1130 tw32_f(MAC_MI_MODE, 1131 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1132 udelay(80); 1133 } 1134 1135 tg3_ape_lock(tp, tp->phy_ape_lock); 1136 1137 *val = 0x0; 1138 1139 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1140 MI_COM_PHY_ADDR_MASK); 1141 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1142 MI_COM_REG_ADDR_MASK); 1143 frame_val |= (MI_COM_CMD_READ | MI_COM_START); 1144 1145 tw32_f(MAC_MI_COM, frame_val); 1146 1147 loops = PHY_BUSY_LOOPS; 1148 while (loops != 0) { 1149 udelay(10); 1150 frame_val = tr32(MAC_MI_COM); 1151 1152 if ((frame_val & MI_COM_BUSY) == 0) { 1153 udelay(5); 1154 frame_val = tr32(MAC_MI_COM); 1155 break; 1156 } 1157 loops -= 1; 1158 } 1159 1160 ret = -EBUSY; 1161 if (loops != 0) { 1162 *val = frame_val & MI_COM_DATA_MASK; 1163 ret = 0; 1164 } 1165 1166 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1167 tw32_f(MAC_MI_MODE, tp->mi_mode); 1168 udelay(80); 1169 } 1170 1171 tg3_ape_unlock(tp, tp->phy_ape_lock); 1172 1173 return ret; 1174 } 1175 1176 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) 1177 { 1178 return __tg3_readphy(tp, tp->phy_addr, reg, val); 1179 } 1180 1181 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg, 1182 u32 val) 1183 { 1184 u32 frame_val; 1185 unsigned int loops; 1186 int ret; 1187 1188 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 1189 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) 1190 return 0; 1191 1192 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1193 tw32_f(MAC_MI_MODE, 1194 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1195 udelay(80); 1196 } 1197 1198 tg3_ape_lock(tp, tp->phy_ape_lock); 1199 1200 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1201 MI_COM_PHY_ADDR_MASK); 1202 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1203 MI_COM_REG_ADDR_MASK); 1204 frame_val |= (val & MI_COM_DATA_MASK); 1205 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); 1206 1207 tw32_f(MAC_MI_COM, frame_val); 1208 1209 loops = PHY_BUSY_LOOPS; 1210 while (loops != 0) { 1211 udelay(10); 1212 frame_val = tr32(MAC_MI_COM); 1213 if ((frame_val & MI_COM_BUSY) == 0) { 1214 udelay(5); 1215 frame_val = tr32(MAC_MI_COM); 1216 break; 1217 } 1218 loops -= 1; 1219 } 1220 1221 ret = -EBUSY; 1222 if (loops != 0) 1223 ret = 0; 1224 1225 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1226 tw32_f(MAC_MI_MODE, tp->mi_mode); 1227 udelay(80); 1228 } 1229 1230 tg3_ape_unlock(tp, tp->phy_ape_lock); 1231 1232 return ret; 1233 } 1234 1235 static int tg3_writephy(struct tg3 *tp, int reg, u32 val) 1236 { 1237 return __tg3_writephy(tp, tp->phy_addr, reg, val); 1238 } 1239 1240 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) 1241 { 1242 int err; 1243 1244 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1245 if (err) 1246 goto done; 1247 1248 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1249 if (err) 1250 goto done; 1251 1252 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1253 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1254 if (err) 1255 goto done; 1256 1257 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); 1258 1259 done: 1260 return err; 1261 } 1262 1263 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) 1264 { 1265 int err; 1266 1267 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1268 if (err) 1269 goto done; 1270 1271 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1272 if (err) 1273 goto done; 1274 1275 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1276 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1277 if (err) 1278 goto done; 1279 1280 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); 1281 1282 done: 1283 return err; 1284 } 1285 1286 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) 1287 { 1288 int err; 1289 1290 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1291 if (!err) 1292 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); 1293 1294 return err; 1295 } 1296 1297 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) 1298 { 1299 int err; 1300 1301 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1302 if (!err) 1303 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); 1304 1305 return err; 1306 } 1307 1308 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) 1309 { 1310 int err; 1311 1312 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 1313 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | 1314 MII_TG3_AUXCTL_SHDWSEL_MISC); 1315 if (!err) 1316 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); 1317 1318 return err; 1319 } 1320 1321 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) 1322 { 1323 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) 1324 set |= MII_TG3_AUXCTL_MISC_WREN; 1325 1326 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); 1327 } 1328 1329 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) 1330 { 1331 u32 val; 1332 int err; 1333 1334 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 1335 1336 if (err) 1337 return err; 1338 1339 if (enable) 1340 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1341 else 1342 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1343 1344 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 1345 val | MII_TG3_AUXCTL_ACTL_TX_6DB); 1346 1347 return err; 1348 } 1349 1350 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val) 1351 { 1352 return tg3_writephy(tp, MII_TG3_MISC_SHDW, 1353 reg | val | MII_TG3_MISC_SHDW_WREN); 1354 } 1355 1356 static int tg3_bmcr_reset(struct tg3 *tp) 1357 { 1358 u32 phy_control; 1359 int limit, err; 1360 1361 /* OK, reset it, and poll the BMCR_RESET bit until it 1362 * clears or we time out. 1363 */ 1364 phy_control = BMCR_RESET; 1365 err = tg3_writephy(tp, MII_BMCR, phy_control); 1366 if (err != 0) 1367 return -EBUSY; 1368 1369 limit = 5000; 1370 while (limit--) { 1371 err = tg3_readphy(tp, MII_BMCR, &phy_control); 1372 if (err != 0) 1373 return -EBUSY; 1374 1375 if ((phy_control & BMCR_RESET) == 0) { 1376 udelay(40); 1377 break; 1378 } 1379 udelay(10); 1380 } 1381 if (limit < 0) 1382 return -EBUSY; 1383 1384 return 0; 1385 } 1386 1387 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) 1388 { 1389 struct tg3 *tp = bp->priv; 1390 u32 val; 1391 1392 spin_lock_bh(&tp->lock); 1393 1394 if (__tg3_readphy(tp, mii_id, reg, &val)) 1395 val = -EIO; 1396 1397 spin_unlock_bh(&tp->lock); 1398 1399 return val; 1400 } 1401 1402 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) 1403 { 1404 struct tg3 *tp = bp->priv; 1405 u32 ret = 0; 1406 1407 spin_lock_bh(&tp->lock); 1408 1409 if (__tg3_writephy(tp, mii_id, reg, val)) 1410 ret = -EIO; 1411 1412 spin_unlock_bh(&tp->lock); 1413 1414 return ret; 1415 } 1416 1417 static void tg3_mdio_config_5785(struct tg3 *tp) 1418 { 1419 u32 val; 1420 struct phy_device *phydev; 1421 1422 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1423 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1424 case PHY_ID_BCM50610: 1425 case PHY_ID_BCM50610M: 1426 val = MAC_PHYCFG2_50610_LED_MODES; 1427 break; 1428 case PHY_ID_BCMAC131: 1429 val = MAC_PHYCFG2_AC131_LED_MODES; 1430 break; 1431 case PHY_ID_RTL8211C: 1432 val = MAC_PHYCFG2_RTL8211C_LED_MODES; 1433 break; 1434 case PHY_ID_RTL8201E: 1435 val = MAC_PHYCFG2_RTL8201E_LED_MODES; 1436 break; 1437 default: 1438 return; 1439 } 1440 1441 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { 1442 tw32(MAC_PHYCFG2, val); 1443 1444 val = tr32(MAC_PHYCFG1); 1445 val &= ~(MAC_PHYCFG1_RGMII_INT | 1446 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); 1447 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; 1448 tw32(MAC_PHYCFG1, val); 1449 1450 return; 1451 } 1452 1453 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) 1454 val |= MAC_PHYCFG2_EMODE_MASK_MASK | 1455 MAC_PHYCFG2_FMODE_MASK_MASK | 1456 MAC_PHYCFG2_GMODE_MASK_MASK | 1457 MAC_PHYCFG2_ACT_MASK_MASK | 1458 MAC_PHYCFG2_QUAL_MASK_MASK | 1459 MAC_PHYCFG2_INBAND_ENABLE; 1460 1461 tw32(MAC_PHYCFG2, val); 1462 1463 val = tr32(MAC_PHYCFG1); 1464 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | 1465 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); 1466 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1467 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1468 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; 1469 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1470 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; 1471 } 1472 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | 1473 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; 1474 tw32(MAC_PHYCFG1, val); 1475 1476 val = tr32(MAC_EXT_RGMII_MODE); 1477 val &= ~(MAC_RGMII_MODE_RX_INT_B | 1478 MAC_RGMII_MODE_RX_QUALITY | 1479 MAC_RGMII_MODE_RX_ACTIVITY | 1480 MAC_RGMII_MODE_RX_ENG_DET | 1481 MAC_RGMII_MODE_TX_ENABLE | 1482 MAC_RGMII_MODE_TX_LOWPWR | 1483 MAC_RGMII_MODE_TX_RESET); 1484 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1485 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1486 val |= MAC_RGMII_MODE_RX_INT_B | 1487 MAC_RGMII_MODE_RX_QUALITY | 1488 MAC_RGMII_MODE_RX_ACTIVITY | 1489 MAC_RGMII_MODE_RX_ENG_DET; 1490 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1491 val |= MAC_RGMII_MODE_TX_ENABLE | 1492 MAC_RGMII_MODE_TX_LOWPWR | 1493 MAC_RGMII_MODE_TX_RESET; 1494 } 1495 tw32(MAC_EXT_RGMII_MODE, val); 1496 } 1497 1498 static void tg3_mdio_start(struct tg3 *tp) 1499 { 1500 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; 1501 tw32_f(MAC_MI_MODE, tp->mi_mode); 1502 udelay(80); 1503 1504 if (tg3_flag(tp, MDIOBUS_INITED) && 1505 tg3_asic_rev(tp) == ASIC_REV_5785) 1506 tg3_mdio_config_5785(tp); 1507 } 1508 1509 static int tg3_mdio_init(struct tg3 *tp) 1510 { 1511 int i; 1512 u32 reg; 1513 struct phy_device *phydev; 1514 1515 if (tg3_flag(tp, 5717_PLUS)) { 1516 u32 is_serdes; 1517 1518 tp->phy_addr = tp->pci_fn + 1; 1519 1520 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) 1521 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; 1522 else 1523 is_serdes = tr32(TG3_CPMU_PHY_STRAP) & 1524 TG3_CPMU_PHY_STRAP_IS_SERDES; 1525 if (is_serdes) 1526 tp->phy_addr += 7; 1527 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) { 1528 int addr; 1529 1530 addr = ssb_gige_get_phyaddr(tp->pdev); 1531 if (addr < 0) 1532 return addr; 1533 tp->phy_addr = addr; 1534 } else 1535 tp->phy_addr = TG3_PHY_MII_ADDR; 1536 1537 tg3_mdio_start(tp); 1538 1539 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) 1540 return 0; 1541 1542 tp->mdio_bus = mdiobus_alloc(); 1543 if (tp->mdio_bus == NULL) 1544 return -ENOMEM; 1545 1546 tp->mdio_bus->name = "tg3 mdio bus"; 1547 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", 1548 (tp->pdev->bus->number << 8) | tp->pdev->devfn); 1549 tp->mdio_bus->priv = tp; 1550 tp->mdio_bus->parent = &tp->pdev->dev; 1551 tp->mdio_bus->read = &tg3_mdio_read; 1552 tp->mdio_bus->write = &tg3_mdio_write; 1553 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); 1554 1555 /* The bus registration will look for all the PHYs on the mdio bus. 1556 * Unfortunately, it does not ensure the PHY is powered up before 1557 * accessing the PHY ID registers. A chip reset is the 1558 * quickest way to bring the device back to an operational state.. 1559 */ 1560 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) 1561 tg3_bmcr_reset(tp); 1562 1563 i = mdiobus_register(tp->mdio_bus); 1564 if (i) { 1565 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); 1566 mdiobus_free(tp->mdio_bus); 1567 return i; 1568 } 1569 1570 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1571 1572 if (!phydev || !phydev->drv) { 1573 dev_warn(&tp->pdev->dev, "No PHY devices\n"); 1574 mdiobus_unregister(tp->mdio_bus); 1575 mdiobus_free(tp->mdio_bus); 1576 return -ENODEV; 1577 } 1578 1579 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1580 case PHY_ID_BCM57780: 1581 phydev->interface = PHY_INTERFACE_MODE_GMII; 1582 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1583 break; 1584 case PHY_ID_BCM50610: 1585 case PHY_ID_BCM50610M: 1586 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | 1587 PHY_BRCM_RX_REFCLK_UNUSED | 1588 PHY_BRCM_DIS_TXCRXC_NOENRGY | 1589 PHY_BRCM_AUTO_PWRDWN_ENABLE; 1590 if (tg3_flag(tp, RGMII_INBAND_DISABLE)) 1591 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; 1592 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1593 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; 1594 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1595 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; 1596 /* fall through */ 1597 case PHY_ID_RTL8211C: 1598 phydev->interface = PHY_INTERFACE_MODE_RGMII; 1599 break; 1600 case PHY_ID_RTL8201E: 1601 case PHY_ID_BCMAC131: 1602 phydev->interface = PHY_INTERFACE_MODE_MII; 1603 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1604 tp->phy_flags |= TG3_PHYFLG_IS_FET; 1605 break; 1606 } 1607 1608 tg3_flag_set(tp, MDIOBUS_INITED); 1609 1610 if (tg3_asic_rev(tp) == ASIC_REV_5785) 1611 tg3_mdio_config_5785(tp); 1612 1613 return 0; 1614 } 1615 1616 static void tg3_mdio_fini(struct tg3 *tp) 1617 { 1618 if (tg3_flag(tp, MDIOBUS_INITED)) { 1619 tg3_flag_clear(tp, MDIOBUS_INITED); 1620 mdiobus_unregister(tp->mdio_bus); 1621 mdiobus_free(tp->mdio_bus); 1622 } 1623 } 1624 1625 /* tp->lock is held. */ 1626 static inline void tg3_generate_fw_event(struct tg3 *tp) 1627 { 1628 u32 val; 1629 1630 val = tr32(GRC_RX_CPU_EVENT); 1631 val |= GRC_RX_CPU_DRIVER_EVENT; 1632 tw32_f(GRC_RX_CPU_EVENT, val); 1633 1634 tp->last_event_jiffies = jiffies; 1635 } 1636 1637 #define TG3_FW_EVENT_TIMEOUT_USEC 2500 1638 1639 /* tp->lock is held. */ 1640 static void tg3_wait_for_event_ack(struct tg3 *tp) 1641 { 1642 int i; 1643 unsigned int delay_cnt; 1644 long time_remain; 1645 1646 /* If enough time has passed, no wait is necessary. */ 1647 time_remain = (long)(tp->last_event_jiffies + 1 + 1648 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - 1649 (long)jiffies; 1650 if (time_remain < 0) 1651 return; 1652 1653 /* Check if we can shorten the wait time. */ 1654 delay_cnt = jiffies_to_usecs(time_remain); 1655 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) 1656 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; 1657 delay_cnt = (delay_cnt >> 3) + 1; 1658 1659 for (i = 0; i < delay_cnt; i++) { 1660 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) 1661 break; 1662 if (pci_channel_offline(tp->pdev)) 1663 break; 1664 1665 udelay(8); 1666 } 1667 } 1668 1669 /* tp->lock is held. */ 1670 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data) 1671 { 1672 u32 reg, val; 1673 1674 val = 0; 1675 if (!tg3_readphy(tp, MII_BMCR, ®)) 1676 val = reg << 16; 1677 if (!tg3_readphy(tp, MII_BMSR, ®)) 1678 val |= (reg & 0xffff); 1679 *data++ = val; 1680 1681 val = 0; 1682 if (!tg3_readphy(tp, MII_ADVERTISE, ®)) 1683 val = reg << 16; 1684 if (!tg3_readphy(tp, MII_LPA, ®)) 1685 val |= (reg & 0xffff); 1686 *data++ = val; 1687 1688 val = 0; 1689 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { 1690 if (!tg3_readphy(tp, MII_CTRL1000, ®)) 1691 val = reg << 16; 1692 if (!tg3_readphy(tp, MII_STAT1000, ®)) 1693 val |= (reg & 0xffff); 1694 } 1695 *data++ = val; 1696 1697 if (!tg3_readphy(tp, MII_PHYADDR, ®)) 1698 val = reg << 16; 1699 else 1700 val = 0; 1701 *data++ = val; 1702 } 1703 1704 /* tp->lock is held. */ 1705 static void tg3_ump_link_report(struct tg3 *tp) 1706 { 1707 u32 data[4]; 1708 1709 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) 1710 return; 1711 1712 tg3_phy_gather_ump_data(tp, data); 1713 1714 tg3_wait_for_event_ack(tp); 1715 1716 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); 1717 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); 1718 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]); 1719 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]); 1720 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]); 1721 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]); 1722 1723 tg3_generate_fw_event(tp); 1724 } 1725 1726 /* tp->lock is held. */ 1727 static void tg3_stop_fw(struct tg3 *tp) 1728 { 1729 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 1730 /* Wait for RX cpu to ACK the previous event. */ 1731 tg3_wait_for_event_ack(tp); 1732 1733 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); 1734 1735 tg3_generate_fw_event(tp); 1736 1737 /* Wait for RX cpu to ACK this event. */ 1738 tg3_wait_for_event_ack(tp); 1739 } 1740 } 1741 1742 /* tp->lock is held. */ 1743 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) 1744 { 1745 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, 1746 NIC_SRAM_FIRMWARE_MBOX_MAGIC1); 1747 1748 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1749 switch (kind) { 1750 case RESET_KIND_INIT: 1751 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1752 DRV_STATE_START); 1753 break; 1754 1755 case RESET_KIND_SHUTDOWN: 1756 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1757 DRV_STATE_UNLOAD); 1758 break; 1759 1760 case RESET_KIND_SUSPEND: 1761 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1762 DRV_STATE_SUSPEND); 1763 break; 1764 1765 default: 1766 break; 1767 } 1768 } 1769 } 1770 1771 /* tp->lock is held. */ 1772 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) 1773 { 1774 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1775 switch (kind) { 1776 case RESET_KIND_INIT: 1777 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1778 DRV_STATE_START_DONE); 1779 break; 1780 1781 case RESET_KIND_SHUTDOWN: 1782 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1783 DRV_STATE_UNLOAD_DONE); 1784 break; 1785 1786 default: 1787 break; 1788 } 1789 } 1790 } 1791 1792 /* tp->lock is held. */ 1793 static void tg3_write_sig_legacy(struct tg3 *tp, int kind) 1794 { 1795 if (tg3_flag(tp, ENABLE_ASF)) { 1796 switch (kind) { 1797 case RESET_KIND_INIT: 1798 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1799 DRV_STATE_START); 1800 break; 1801 1802 case RESET_KIND_SHUTDOWN: 1803 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1804 DRV_STATE_UNLOAD); 1805 break; 1806 1807 case RESET_KIND_SUSPEND: 1808 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1809 DRV_STATE_SUSPEND); 1810 break; 1811 1812 default: 1813 break; 1814 } 1815 } 1816 } 1817 1818 static int tg3_poll_fw(struct tg3 *tp) 1819 { 1820 int i; 1821 u32 val; 1822 1823 if (tg3_flag(tp, NO_FWARE_REPORTED)) 1824 return 0; 1825 1826 if (tg3_flag(tp, IS_SSB_CORE)) { 1827 /* We don't use firmware. */ 1828 return 0; 1829 } 1830 1831 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 1832 /* Wait up to 20ms for init done. */ 1833 for (i = 0; i < 200; i++) { 1834 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) 1835 return 0; 1836 if (pci_channel_offline(tp->pdev)) 1837 return -ENODEV; 1838 1839 udelay(100); 1840 } 1841 return -ENODEV; 1842 } 1843 1844 /* Wait for firmware initialization to complete. */ 1845 for (i = 0; i < 100000; i++) { 1846 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); 1847 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 1848 break; 1849 if (pci_channel_offline(tp->pdev)) { 1850 if (!tg3_flag(tp, NO_FWARE_REPORTED)) { 1851 tg3_flag_set(tp, NO_FWARE_REPORTED); 1852 netdev_info(tp->dev, "No firmware running\n"); 1853 } 1854 1855 break; 1856 } 1857 1858 udelay(10); 1859 } 1860 1861 /* Chip might not be fitted with firmware. Some Sun onboard 1862 * parts are configured like that. So don't signal the timeout 1863 * of the above loop as an error, but do report the lack of 1864 * running firmware once. 1865 */ 1866 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { 1867 tg3_flag_set(tp, NO_FWARE_REPORTED); 1868 1869 netdev_info(tp->dev, "No firmware running\n"); 1870 } 1871 1872 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 1873 /* The 57765 A0 needs a little more 1874 * time to do some important work. 1875 */ 1876 mdelay(10); 1877 } 1878 1879 return 0; 1880 } 1881 1882 static void tg3_link_report(struct tg3 *tp) 1883 { 1884 if (!netif_carrier_ok(tp->dev)) { 1885 netif_info(tp, link, tp->dev, "Link is down\n"); 1886 tg3_ump_link_report(tp); 1887 } else if (netif_msg_link(tp)) { 1888 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", 1889 (tp->link_config.active_speed == SPEED_1000 ? 1890 1000 : 1891 (tp->link_config.active_speed == SPEED_100 ? 1892 100 : 10)), 1893 (tp->link_config.active_duplex == DUPLEX_FULL ? 1894 "full" : "half")); 1895 1896 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", 1897 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? 1898 "on" : "off", 1899 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? 1900 "on" : "off"); 1901 1902 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 1903 netdev_info(tp->dev, "EEE is %s\n", 1904 tp->setlpicnt ? "enabled" : "disabled"); 1905 1906 tg3_ump_link_report(tp); 1907 } 1908 1909 tp->link_up = netif_carrier_ok(tp->dev); 1910 } 1911 1912 static u32 tg3_decode_flowctrl_1000T(u32 adv) 1913 { 1914 u32 flowctrl = 0; 1915 1916 if (adv & ADVERTISE_PAUSE_CAP) { 1917 flowctrl |= FLOW_CTRL_RX; 1918 if (!(adv & ADVERTISE_PAUSE_ASYM)) 1919 flowctrl |= FLOW_CTRL_TX; 1920 } else if (adv & ADVERTISE_PAUSE_ASYM) 1921 flowctrl |= FLOW_CTRL_TX; 1922 1923 return flowctrl; 1924 } 1925 1926 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) 1927 { 1928 u16 miireg; 1929 1930 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) 1931 miireg = ADVERTISE_1000XPAUSE; 1932 else if (flow_ctrl & FLOW_CTRL_TX) 1933 miireg = ADVERTISE_1000XPSE_ASYM; 1934 else if (flow_ctrl & FLOW_CTRL_RX) 1935 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; 1936 else 1937 miireg = 0; 1938 1939 return miireg; 1940 } 1941 1942 static u32 tg3_decode_flowctrl_1000X(u32 adv) 1943 { 1944 u32 flowctrl = 0; 1945 1946 if (adv & ADVERTISE_1000XPAUSE) { 1947 flowctrl |= FLOW_CTRL_RX; 1948 if (!(adv & ADVERTISE_1000XPSE_ASYM)) 1949 flowctrl |= FLOW_CTRL_TX; 1950 } else if (adv & ADVERTISE_1000XPSE_ASYM) 1951 flowctrl |= FLOW_CTRL_TX; 1952 1953 return flowctrl; 1954 } 1955 1956 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) 1957 { 1958 u8 cap = 0; 1959 1960 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) { 1961 cap = FLOW_CTRL_TX | FLOW_CTRL_RX; 1962 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) { 1963 if (lcladv & ADVERTISE_1000XPAUSE) 1964 cap = FLOW_CTRL_RX; 1965 if (rmtadv & ADVERTISE_1000XPAUSE) 1966 cap = FLOW_CTRL_TX; 1967 } 1968 1969 return cap; 1970 } 1971 1972 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) 1973 { 1974 u8 autoneg; 1975 u8 flowctrl = 0; 1976 u32 old_rx_mode = tp->rx_mode; 1977 u32 old_tx_mode = tp->tx_mode; 1978 1979 if (tg3_flag(tp, USE_PHYLIB)) 1980 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg; 1981 else 1982 autoneg = tp->link_config.autoneg; 1983 1984 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { 1985 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 1986 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); 1987 else 1988 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 1989 } else 1990 flowctrl = tp->link_config.flowctrl; 1991 1992 tp->link_config.active_flowctrl = flowctrl; 1993 1994 if (flowctrl & FLOW_CTRL_RX) 1995 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; 1996 else 1997 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; 1998 1999 if (old_rx_mode != tp->rx_mode) 2000 tw32_f(MAC_RX_MODE, tp->rx_mode); 2001 2002 if (flowctrl & FLOW_CTRL_TX) 2003 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; 2004 else 2005 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; 2006 2007 if (old_tx_mode != tp->tx_mode) 2008 tw32_f(MAC_TX_MODE, tp->tx_mode); 2009 } 2010 2011 static void tg3_adjust_link(struct net_device *dev) 2012 { 2013 u8 oldflowctrl, linkmesg = 0; 2014 u32 mac_mode, lcl_adv, rmt_adv; 2015 struct tg3 *tp = netdev_priv(dev); 2016 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2017 2018 spin_lock_bh(&tp->lock); 2019 2020 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | 2021 MAC_MODE_HALF_DUPLEX); 2022 2023 oldflowctrl = tp->link_config.active_flowctrl; 2024 2025 if (phydev->link) { 2026 lcl_adv = 0; 2027 rmt_adv = 0; 2028 2029 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) 2030 mac_mode |= MAC_MODE_PORT_MODE_MII; 2031 else if (phydev->speed == SPEED_1000 || 2032 tg3_asic_rev(tp) != ASIC_REV_5785) 2033 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2034 else 2035 mac_mode |= MAC_MODE_PORT_MODE_MII; 2036 2037 if (phydev->duplex == DUPLEX_HALF) 2038 mac_mode |= MAC_MODE_HALF_DUPLEX; 2039 else { 2040 lcl_adv = mii_advertise_flowctrl( 2041 tp->link_config.flowctrl); 2042 2043 if (phydev->pause) 2044 rmt_adv = LPA_PAUSE_CAP; 2045 if (phydev->asym_pause) 2046 rmt_adv |= LPA_PAUSE_ASYM; 2047 } 2048 2049 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 2050 } else 2051 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2052 2053 if (mac_mode != tp->mac_mode) { 2054 tp->mac_mode = mac_mode; 2055 tw32_f(MAC_MODE, tp->mac_mode); 2056 udelay(40); 2057 } 2058 2059 if (tg3_asic_rev(tp) == ASIC_REV_5785) { 2060 if (phydev->speed == SPEED_10) 2061 tw32(MAC_MI_STAT, 2062 MAC_MI_STAT_10MBPS_MODE | 2063 MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2064 else 2065 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2066 } 2067 2068 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) 2069 tw32(MAC_TX_LENGTHS, 2070 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2071 (6 << TX_LENGTHS_IPG_SHIFT) | 2072 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); 2073 else 2074 tw32(MAC_TX_LENGTHS, 2075 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2076 (6 << TX_LENGTHS_IPG_SHIFT) | 2077 (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); 2078 2079 if (phydev->link != tp->old_link || 2080 phydev->speed != tp->link_config.active_speed || 2081 phydev->duplex != tp->link_config.active_duplex || 2082 oldflowctrl != tp->link_config.active_flowctrl) 2083 linkmesg = 1; 2084 2085 tp->old_link = phydev->link; 2086 tp->link_config.active_speed = phydev->speed; 2087 tp->link_config.active_duplex = phydev->duplex; 2088 2089 spin_unlock_bh(&tp->lock); 2090 2091 if (linkmesg) 2092 tg3_link_report(tp); 2093 } 2094 2095 static int tg3_phy_init(struct tg3 *tp) 2096 { 2097 struct phy_device *phydev; 2098 2099 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) 2100 return 0; 2101 2102 /* Bring the PHY back to a known state. */ 2103 tg3_bmcr_reset(tp); 2104 2105 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2106 2107 /* Attach the MAC to the PHY. */ 2108 phydev = phy_connect(tp->dev, phydev_name(phydev), 2109 tg3_adjust_link, phydev->interface); 2110 if (IS_ERR(phydev)) { 2111 dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); 2112 return PTR_ERR(phydev); 2113 } 2114 2115 /* Mask with MAC supported features. */ 2116 switch (phydev->interface) { 2117 case PHY_INTERFACE_MODE_GMII: 2118 case PHY_INTERFACE_MODE_RGMII: 2119 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 2120 phy_set_max_speed(phydev, SPEED_1000); 2121 phy_support_asym_pause(phydev); 2122 break; 2123 } 2124 /* fall through */ 2125 case PHY_INTERFACE_MODE_MII: 2126 phy_set_max_speed(phydev, SPEED_100); 2127 phy_support_asym_pause(phydev); 2128 break; 2129 default: 2130 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2131 return -EINVAL; 2132 } 2133 2134 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; 2135 2136 phy_attached_info(phydev); 2137 2138 return 0; 2139 } 2140 2141 static void tg3_phy_start(struct tg3 *tp) 2142 { 2143 struct phy_device *phydev; 2144 2145 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2146 return; 2147 2148 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2149 2150 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 2151 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 2152 phydev->speed = tp->link_config.speed; 2153 phydev->duplex = tp->link_config.duplex; 2154 phydev->autoneg = tp->link_config.autoneg; 2155 ethtool_convert_legacy_u32_to_link_mode( 2156 phydev->advertising, tp->link_config.advertising); 2157 } 2158 2159 phy_start(phydev); 2160 2161 phy_start_aneg(phydev); 2162 } 2163 2164 static void tg3_phy_stop(struct tg3 *tp) 2165 { 2166 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2167 return; 2168 2169 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2170 } 2171 2172 static void tg3_phy_fini(struct tg3 *tp) 2173 { 2174 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 2175 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2176 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; 2177 } 2178 } 2179 2180 static int tg3_phy_set_extloopbk(struct tg3 *tp) 2181 { 2182 int err; 2183 u32 val; 2184 2185 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 2186 return 0; 2187 2188 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2189 /* Cannot do read-modify-write on 5401 */ 2190 err = tg3_phy_auxctl_write(tp, 2191 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2192 MII_TG3_AUXCTL_ACTL_EXTLOOPBK | 2193 0x4c20); 2194 goto done; 2195 } 2196 2197 err = tg3_phy_auxctl_read(tp, 2198 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2199 if (err) 2200 return err; 2201 2202 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK; 2203 err = tg3_phy_auxctl_write(tp, 2204 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val); 2205 2206 done: 2207 return err; 2208 } 2209 2210 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) 2211 { 2212 u32 phytest; 2213 2214 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 2215 u32 phy; 2216 2217 tg3_writephy(tp, MII_TG3_FET_TEST, 2218 phytest | MII_TG3_FET_SHADOW_EN); 2219 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { 2220 if (enable) 2221 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; 2222 else 2223 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; 2224 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); 2225 } 2226 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 2227 } 2228 } 2229 2230 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) 2231 { 2232 u32 reg; 2233 2234 if (!tg3_flag(tp, 5705_PLUS) || 2235 (tg3_flag(tp, 5717_PLUS) && 2236 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) 2237 return; 2238 2239 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2240 tg3_phy_fet_toggle_apd(tp, enable); 2241 return; 2242 } 2243 2244 reg = MII_TG3_MISC_SHDW_SCR5_LPED | 2245 MII_TG3_MISC_SHDW_SCR5_DLPTLM | 2246 MII_TG3_MISC_SHDW_SCR5_SDTL | 2247 MII_TG3_MISC_SHDW_SCR5_C125OE; 2248 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable) 2249 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; 2250 2251 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg); 2252 2253 2254 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS; 2255 if (enable) 2256 reg |= MII_TG3_MISC_SHDW_APD_ENABLE; 2257 2258 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg); 2259 } 2260 2261 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable) 2262 { 2263 u32 phy; 2264 2265 if (!tg3_flag(tp, 5705_PLUS) || 2266 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 2267 return; 2268 2269 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2270 u32 ephy; 2271 2272 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { 2273 u32 reg = MII_TG3_FET_SHDW_MISCCTRL; 2274 2275 tg3_writephy(tp, MII_TG3_FET_TEST, 2276 ephy | MII_TG3_FET_SHADOW_EN); 2277 if (!tg3_readphy(tp, reg, &phy)) { 2278 if (enable) 2279 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2280 else 2281 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2282 tg3_writephy(tp, reg, phy); 2283 } 2284 tg3_writephy(tp, MII_TG3_FET_TEST, ephy); 2285 } 2286 } else { 2287 int ret; 2288 2289 ret = tg3_phy_auxctl_read(tp, 2290 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); 2291 if (!ret) { 2292 if (enable) 2293 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2294 else 2295 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2296 tg3_phy_auxctl_write(tp, 2297 MII_TG3_AUXCTL_SHDWSEL_MISC, phy); 2298 } 2299 } 2300 } 2301 2302 static void tg3_phy_set_wirespeed(struct tg3 *tp) 2303 { 2304 int ret; 2305 u32 val; 2306 2307 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) 2308 return; 2309 2310 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); 2311 if (!ret) 2312 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, 2313 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); 2314 } 2315 2316 static void tg3_phy_apply_otp(struct tg3 *tp) 2317 { 2318 u32 otp, phy; 2319 2320 if (!tp->phy_otp) 2321 return; 2322 2323 otp = tp->phy_otp; 2324 2325 if (tg3_phy_toggle_auxctl_smdsp(tp, true)) 2326 return; 2327 2328 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); 2329 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; 2330 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); 2331 2332 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | 2333 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); 2334 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); 2335 2336 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); 2337 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; 2338 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); 2339 2340 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); 2341 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); 2342 2343 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); 2344 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); 2345 2346 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | 2347 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); 2348 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); 2349 2350 tg3_phy_toggle_auxctl_smdsp(tp, false); 2351 } 2352 2353 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee) 2354 { 2355 u32 val; 2356 struct ethtool_eee *dest = &tp->eee; 2357 2358 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2359 return; 2360 2361 if (eee) 2362 dest = eee; 2363 2364 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val)) 2365 return; 2366 2367 /* Pull eee_active */ 2368 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || 2369 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) { 2370 dest->eee_active = 1; 2371 } else 2372 dest->eee_active = 0; 2373 2374 /* Pull lp advertised settings */ 2375 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val)) 2376 return; 2377 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2378 2379 /* Pull advertised and eee_enabled settings */ 2380 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) 2381 return; 2382 dest->eee_enabled = !!val; 2383 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2384 2385 /* Pull tx_lpi_enabled */ 2386 val = tr32(TG3_CPMU_EEE_MODE); 2387 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX); 2388 2389 /* Pull lpi timer value */ 2390 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff; 2391 } 2392 2393 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) 2394 { 2395 u32 val; 2396 2397 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2398 return; 2399 2400 tp->setlpicnt = 0; 2401 2402 if (tp->link_config.autoneg == AUTONEG_ENABLE && 2403 current_link_up && 2404 tp->link_config.active_duplex == DUPLEX_FULL && 2405 (tp->link_config.active_speed == SPEED_100 || 2406 tp->link_config.active_speed == SPEED_1000)) { 2407 u32 eeectl; 2408 2409 if (tp->link_config.active_speed == SPEED_1000) 2410 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; 2411 else 2412 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; 2413 2414 tw32(TG3_CPMU_EEE_CTRL, eeectl); 2415 2416 tg3_eee_pull_config(tp, NULL); 2417 if (tp->eee.eee_active) 2418 tp->setlpicnt = 2; 2419 } 2420 2421 if (!tp->setlpicnt) { 2422 if (current_link_up && 2423 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2424 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); 2425 tg3_phy_toggle_auxctl_smdsp(tp, false); 2426 } 2427 2428 val = tr32(TG3_CPMU_EEE_MODE); 2429 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); 2430 } 2431 } 2432 2433 static void tg3_phy_eee_enable(struct tg3 *tp) 2434 { 2435 u32 val; 2436 2437 if (tp->link_config.active_speed == SPEED_1000 && 2438 (tg3_asic_rev(tp) == ASIC_REV_5717 || 2439 tg3_asic_rev(tp) == ASIC_REV_5719 || 2440 tg3_flag(tp, 57765_CLASS)) && 2441 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2442 val = MII_TG3_DSP_TAP26_ALNOKO | 2443 MII_TG3_DSP_TAP26_RMRXSTO; 2444 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 2445 tg3_phy_toggle_auxctl_smdsp(tp, false); 2446 } 2447 2448 val = tr32(TG3_CPMU_EEE_MODE); 2449 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); 2450 } 2451 2452 static int tg3_wait_macro_done(struct tg3 *tp) 2453 { 2454 int limit = 100; 2455 2456 while (limit--) { 2457 u32 tmp32; 2458 2459 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { 2460 if ((tmp32 & 0x1000) == 0) 2461 break; 2462 } 2463 } 2464 if (limit < 0) 2465 return -EBUSY; 2466 2467 return 0; 2468 } 2469 2470 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) 2471 { 2472 static const u32 test_pat[4][6] = { 2473 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, 2474 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, 2475 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, 2476 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } 2477 }; 2478 int chan; 2479 2480 for (chan = 0; chan < 4; chan++) { 2481 int i; 2482 2483 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2484 (chan * 0x2000) | 0x0200); 2485 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2486 2487 for (i = 0; i < 6; i++) 2488 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 2489 test_pat[chan][i]); 2490 2491 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2492 if (tg3_wait_macro_done(tp)) { 2493 *resetp = 1; 2494 return -EBUSY; 2495 } 2496 2497 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2498 (chan * 0x2000) | 0x0200); 2499 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); 2500 if (tg3_wait_macro_done(tp)) { 2501 *resetp = 1; 2502 return -EBUSY; 2503 } 2504 2505 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); 2506 if (tg3_wait_macro_done(tp)) { 2507 *resetp = 1; 2508 return -EBUSY; 2509 } 2510 2511 for (i = 0; i < 6; i += 2) { 2512 u32 low, high; 2513 2514 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || 2515 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || 2516 tg3_wait_macro_done(tp)) { 2517 *resetp = 1; 2518 return -EBUSY; 2519 } 2520 low &= 0x7fff; 2521 high &= 0x000f; 2522 if (low != test_pat[chan][i] || 2523 high != test_pat[chan][i+1]) { 2524 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); 2525 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); 2526 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); 2527 2528 return -EBUSY; 2529 } 2530 } 2531 } 2532 2533 return 0; 2534 } 2535 2536 static int tg3_phy_reset_chanpat(struct tg3 *tp) 2537 { 2538 int chan; 2539 2540 for (chan = 0; chan < 4; chan++) { 2541 int i; 2542 2543 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2544 (chan * 0x2000) | 0x0200); 2545 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2546 for (i = 0; i < 6; i++) 2547 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); 2548 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2549 if (tg3_wait_macro_done(tp)) 2550 return -EBUSY; 2551 } 2552 2553 return 0; 2554 } 2555 2556 static int tg3_phy_reset_5703_4_5(struct tg3 *tp) 2557 { 2558 u32 reg32, phy9_orig; 2559 int retries, do_phy_reset, err; 2560 2561 retries = 10; 2562 do_phy_reset = 1; 2563 do { 2564 if (do_phy_reset) { 2565 err = tg3_bmcr_reset(tp); 2566 if (err) 2567 return err; 2568 do_phy_reset = 0; 2569 } 2570 2571 /* Disable transmitter and interrupt. */ 2572 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) 2573 continue; 2574 2575 reg32 |= 0x3000; 2576 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2577 2578 /* Set full-duplex, 1000 mbps. */ 2579 tg3_writephy(tp, MII_BMCR, 2580 BMCR_FULLDPLX | BMCR_SPEED1000); 2581 2582 /* Set to master mode. */ 2583 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) 2584 continue; 2585 2586 tg3_writephy(tp, MII_CTRL1000, 2587 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 2588 2589 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 2590 if (err) 2591 return err; 2592 2593 /* Block the PHY control access. */ 2594 tg3_phydsp_write(tp, 0x8005, 0x0800); 2595 2596 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); 2597 if (!err) 2598 break; 2599 } while (--retries); 2600 2601 err = tg3_phy_reset_chanpat(tp); 2602 if (err) 2603 return err; 2604 2605 tg3_phydsp_write(tp, 0x8005, 0x0000); 2606 2607 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); 2608 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); 2609 2610 tg3_phy_toggle_auxctl_smdsp(tp, false); 2611 2612 tg3_writephy(tp, MII_CTRL1000, phy9_orig); 2613 2614 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32); 2615 if (err) 2616 return err; 2617 2618 reg32 &= ~0x3000; 2619 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2620 2621 return 0; 2622 } 2623 2624 static void tg3_carrier_off(struct tg3 *tp) 2625 { 2626 netif_carrier_off(tp->dev); 2627 tp->link_up = false; 2628 } 2629 2630 static void tg3_warn_mgmt_link_flap(struct tg3 *tp) 2631 { 2632 if (tg3_flag(tp, ENABLE_ASF)) 2633 netdev_warn(tp->dev, 2634 "Management side-band traffic will be interrupted during phy settings change\n"); 2635 } 2636 2637 /* This will reset the tigon3 PHY if there is no valid 2638 * link unless the FORCE argument is non-zero. 2639 */ 2640 static int tg3_phy_reset(struct tg3 *tp) 2641 { 2642 u32 val, cpmuctrl; 2643 int err; 2644 2645 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2646 val = tr32(GRC_MISC_CFG); 2647 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); 2648 udelay(40); 2649 } 2650 err = tg3_readphy(tp, MII_BMSR, &val); 2651 err |= tg3_readphy(tp, MII_BMSR, &val); 2652 if (err != 0) 2653 return -EBUSY; 2654 2655 if (netif_running(tp->dev) && tp->link_up) { 2656 netif_carrier_off(tp->dev); 2657 tg3_link_report(tp); 2658 } 2659 2660 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 2661 tg3_asic_rev(tp) == ASIC_REV_5704 || 2662 tg3_asic_rev(tp) == ASIC_REV_5705) { 2663 err = tg3_phy_reset_5703_4_5(tp); 2664 if (err) 2665 return err; 2666 goto out; 2667 } 2668 2669 cpmuctrl = 0; 2670 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 2671 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 2672 cpmuctrl = tr32(TG3_CPMU_CTRL); 2673 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) 2674 tw32(TG3_CPMU_CTRL, 2675 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); 2676 } 2677 2678 err = tg3_bmcr_reset(tp); 2679 if (err) 2680 return err; 2681 2682 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { 2683 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; 2684 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); 2685 2686 tw32(TG3_CPMU_CTRL, cpmuctrl); 2687 } 2688 2689 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 2690 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 2691 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 2692 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == 2693 CPMU_LSPD_1000MB_MACCLK_12_5) { 2694 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 2695 udelay(40); 2696 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 2697 } 2698 } 2699 2700 if (tg3_flag(tp, 5717_PLUS) && 2701 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) 2702 return 0; 2703 2704 tg3_phy_apply_otp(tp); 2705 2706 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 2707 tg3_phy_toggle_apd(tp, true); 2708 else 2709 tg3_phy_toggle_apd(tp, false); 2710 2711 out: 2712 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && 2713 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2714 tg3_phydsp_write(tp, 0x201f, 0x2aaa); 2715 tg3_phydsp_write(tp, 0x000a, 0x0323); 2716 tg3_phy_toggle_auxctl_smdsp(tp, false); 2717 } 2718 2719 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { 2720 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2721 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2722 } 2723 2724 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { 2725 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2726 tg3_phydsp_write(tp, 0x000a, 0x310b); 2727 tg3_phydsp_write(tp, 0x201f, 0x9506); 2728 tg3_phydsp_write(tp, 0x401f, 0x14e2); 2729 tg3_phy_toggle_auxctl_smdsp(tp, false); 2730 } 2731 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { 2732 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2733 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 2734 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { 2735 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); 2736 tg3_writephy(tp, MII_TG3_TEST1, 2737 MII_TG3_TEST1_TRIM_EN | 0x4); 2738 } else 2739 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); 2740 2741 tg3_phy_toggle_auxctl_smdsp(tp, false); 2742 } 2743 } 2744 2745 /* Set Extended packet length bit (bit 14) on all chips that */ 2746 /* support jumbo frames */ 2747 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2748 /* Cannot do read-modify-write on 5401 */ 2749 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 2750 } else if (tg3_flag(tp, JUMBO_CAPABLE)) { 2751 /* Set bit 14 with read-modify-write to preserve other bits */ 2752 err = tg3_phy_auxctl_read(tp, 2753 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2754 if (!err) 2755 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2756 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); 2757 } 2758 2759 /* Set phy register 0x10 bit 0 to high fifo elasticity to support 2760 * jumbo frames transmission. 2761 */ 2762 if (tg3_flag(tp, JUMBO_CAPABLE)) { 2763 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) 2764 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2765 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); 2766 } 2767 2768 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2769 /* adjust output voltage */ 2770 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); 2771 } 2772 2773 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0) 2774 tg3_phydsp_write(tp, 0xffb, 0x4000); 2775 2776 tg3_phy_toggle_automdix(tp, true); 2777 tg3_phy_set_wirespeed(tp); 2778 return 0; 2779 } 2780 2781 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001 2782 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002 2783 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ 2784 TG3_GPIO_MSG_NEED_VAUX) 2785 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ 2786 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ 2787 (TG3_GPIO_MSG_DRVR_PRES << 4) | \ 2788 (TG3_GPIO_MSG_DRVR_PRES << 8) | \ 2789 (TG3_GPIO_MSG_DRVR_PRES << 12)) 2790 2791 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ 2792 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ 2793 (TG3_GPIO_MSG_NEED_VAUX << 4) | \ 2794 (TG3_GPIO_MSG_NEED_VAUX << 8) | \ 2795 (TG3_GPIO_MSG_NEED_VAUX << 12)) 2796 2797 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) 2798 { 2799 u32 status, shift; 2800 2801 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2802 tg3_asic_rev(tp) == ASIC_REV_5719) 2803 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); 2804 else 2805 status = tr32(TG3_CPMU_DRV_STATUS); 2806 2807 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; 2808 status &= ~(TG3_GPIO_MSG_MASK << shift); 2809 status |= (newstat << shift); 2810 2811 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2812 tg3_asic_rev(tp) == ASIC_REV_5719) 2813 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); 2814 else 2815 tw32(TG3_CPMU_DRV_STATUS, status); 2816 2817 return status >> TG3_APE_GPIO_MSG_SHIFT; 2818 } 2819 2820 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) 2821 { 2822 if (!tg3_flag(tp, IS_NIC)) 2823 return 0; 2824 2825 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2826 tg3_asic_rev(tp) == ASIC_REV_5719 || 2827 tg3_asic_rev(tp) == ASIC_REV_5720) { 2828 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2829 return -EIO; 2830 2831 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); 2832 2833 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2834 TG3_GRC_LCLCTL_PWRSW_DELAY); 2835 2836 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2837 } else { 2838 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2839 TG3_GRC_LCLCTL_PWRSW_DELAY); 2840 } 2841 2842 return 0; 2843 } 2844 2845 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) 2846 { 2847 u32 grc_local_ctrl; 2848 2849 if (!tg3_flag(tp, IS_NIC) || 2850 tg3_asic_rev(tp) == ASIC_REV_5700 || 2851 tg3_asic_rev(tp) == ASIC_REV_5701) 2852 return; 2853 2854 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; 2855 2856 tw32_wait_f(GRC_LOCAL_CTRL, 2857 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2858 TG3_GRC_LCLCTL_PWRSW_DELAY); 2859 2860 tw32_wait_f(GRC_LOCAL_CTRL, 2861 grc_local_ctrl, 2862 TG3_GRC_LCLCTL_PWRSW_DELAY); 2863 2864 tw32_wait_f(GRC_LOCAL_CTRL, 2865 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2866 TG3_GRC_LCLCTL_PWRSW_DELAY); 2867 } 2868 2869 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) 2870 { 2871 if (!tg3_flag(tp, IS_NIC)) 2872 return; 2873 2874 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 2875 tg3_asic_rev(tp) == ASIC_REV_5701) { 2876 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2877 (GRC_LCLCTRL_GPIO_OE0 | 2878 GRC_LCLCTRL_GPIO_OE1 | 2879 GRC_LCLCTRL_GPIO_OE2 | 2880 GRC_LCLCTRL_GPIO_OUTPUT0 | 2881 GRC_LCLCTRL_GPIO_OUTPUT1), 2882 TG3_GRC_LCLCTL_PWRSW_DELAY); 2883 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 2884 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 2885 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ 2886 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | 2887 GRC_LCLCTRL_GPIO_OE1 | 2888 GRC_LCLCTRL_GPIO_OE2 | 2889 GRC_LCLCTRL_GPIO_OUTPUT0 | 2890 GRC_LCLCTRL_GPIO_OUTPUT1 | 2891 tp->grc_local_ctrl; 2892 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2893 TG3_GRC_LCLCTL_PWRSW_DELAY); 2894 2895 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; 2896 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2897 TG3_GRC_LCLCTL_PWRSW_DELAY); 2898 2899 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; 2900 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2901 TG3_GRC_LCLCTL_PWRSW_DELAY); 2902 } else { 2903 u32 no_gpio2; 2904 u32 grc_local_ctrl = 0; 2905 2906 /* Workaround to prevent overdrawing Amps. */ 2907 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 2908 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 2909 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2910 grc_local_ctrl, 2911 TG3_GRC_LCLCTL_PWRSW_DELAY); 2912 } 2913 2914 /* On 5753 and variants, GPIO2 cannot be used. */ 2915 no_gpio2 = tp->nic_sram_data_cfg & 2916 NIC_SRAM_DATA_CFG_NO_GPIO2; 2917 2918 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 2919 GRC_LCLCTRL_GPIO_OE1 | 2920 GRC_LCLCTRL_GPIO_OE2 | 2921 GRC_LCLCTRL_GPIO_OUTPUT1 | 2922 GRC_LCLCTRL_GPIO_OUTPUT2; 2923 if (no_gpio2) { 2924 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | 2925 GRC_LCLCTRL_GPIO_OUTPUT2); 2926 } 2927 tw32_wait_f(GRC_LOCAL_CTRL, 2928 tp->grc_local_ctrl | grc_local_ctrl, 2929 TG3_GRC_LCLCTL_PWRSW_DELAY); 2930 2931 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; 2932 2933 tw32_wait_f(GRC_LOCAL_CTRL, 2934 tp->grc_local_ctrl | grc_local_ctrl, 2935 TG3_GRC_LCLCTL_PWRSW_DELAY); 2936 2937 if (!no_gpio2) { 2938 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; 2939 tw32_wait_f(GRC_LOCAL_CTRL, 2940 tp->grc_local_ctrl | grc_local_ctrl, 2941 TG3_GRC_LCLCTL_PWRSW_DELAY); 2942 } 2943 } 2944 } 2945 2946 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) 2947 { 2948 u32 msg = 0; 2949 2950 /* Serialize power state transitions */ 2951 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2952 return; 2953 2954 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) 2955 msg = TG3_GPIO_MSG_NEED_VAUX; 2956 2957 msg = tg3_set_function_status(tp, msg); 2958 2959 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) 2960 goto done; 2961 2962 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) 2963 tg3_pwrsrc_switch_to_vaux(tp); 2964 else 2965 tg3_pwrsrc_die_with_vmain(tp); 2966 2967 done: 2968 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2969 } 2970 2971 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) 2972 { 2973 bool need_vaux = false; 2974 2975 /* The GPIOs do something completely different on 57765. */ 2976 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS)) 2977 return; 2978 2979 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2980 tg3_asic_rev(tp) == ASIC_REV_5719 || 2981 tg3_asic_rev(tp) == ASIC_REV_5720) { 2982 tg3_frob_aux_power_5717(tp, include_wol ? 2983 tg3_flag(tp, WOL_ENABLE) != 0 : 0); 2984 return; 2985 } 2986 2987 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { 2988 struct net_device *dev_peer; 2989 2990 dev_peer = pci_get_drvdata(tp->pdev_peer); 2991 2992 /* remove_one() may have been run on the peer. */ 2993 if (dev_peer) { 2994 struct tg3 *tp_peer = netdev_priv(dev_peer); 2995 2996 if (tg3_flag(tp_peer, INIT_COMPLETE)) 2997 return; 2998 2999 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || 3000 tg3_flag(tp_peer, ENABLE_ASF)) 3001 need_vaux = true; 3002 } 3003 } 3004 3005 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || 3006 tg3_flag(tp, ENABLE_ASF)) 3007 need_vaux = true; 3008 3009 if (need_vaux) 3010 tg3_pwrsrc_switch_to_vaux(tp); 3011 else 3012 tg3_pwrsrc_die_with_vmain(tp); 3013 } 3014 3015 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) 3016 { 3017 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) 3018 return 1; 3019 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { 3020 if (speed != SPEED_10) 3021 return 1; 3022 } else if (speed == SPEED_10) 3023 return 1; 3024 3025 return 0; 3026 } 3027 3028 static bool tg3_phy_power_bug(struct tg3 *tp) 3029 { 3030 switch (tg3_asic_rev(tp)) { 3031 case ASIC_REV_5700: 3032 case ASIC_REV_5704: 3033 return true; 3034 case ASIC_REV_5780: 3035 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 3036 return true; 3037 return false; 3038 case ASIC_REV_5717: 3039 if (!tp->pci_fn) 3040 return true; 3041 return false; 3042 case ASIC_REV_5719: 3043 case ASIC_REV_5720: 3044 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 3045 !tp->pci_fn) 3046 return true; 3047 return false; 3048 } 3049 3050 return false; 3051 } 3052 3053 static bool tg3_phy_led_bug(struct tg3 *tp) 3054 { 3055 switch (tg3_asic_rev(tp)) { 3056 case ASIC_REV_5719: 3057 case ASIC_REV_5720: 3058 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 3059 !tp->pci_fn) 3060 return true; 3061 return false; 3062 } 3063 3064 return false; 3065 } 3066 3067 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) 3068 { 3069 u32 val; 3070 3071 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) 3072 return; 3073 3074 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 3075 if (tg3_asic_rev(tp) == ASIC_REV_5704) { 3076 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); 3077 u32 serdes_cfg = tr32(MAC_SERDES_CFG); 3078 3079 sg_dig_ctrl |= 3080 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; 3081 tw32(SG_DIG_CTRL, sg_dig_ctrl); 3082 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); 3083 } 3084 return; 3085 } 3086 3087 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3088 tg3_bmcr_reset(tp); 3089 val = tr32(GRC_MISC_CFG); 3090 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 3091 udelay(40); 3092 return; 3093 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 3094 u32 phytest; 3095 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 3096 u32 phy; 3097 3098 tg3_writephy(tp, MII_ADVERTISE, 0); 3099 tg3_writephy(tp, MII_BMCR, 3100 BMCR_ANENABLE | BMCR_ANRESTART); 3101 3102 tg3_writephy(tp, MII_TG3_FET_TEST, 3103 phytest | MII_TG3_FET_SHADOW_EN); 3104 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { 3105 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; 3106 tg3_writephy(tp, 3107 MII_TG3_FET_SHDW_AUXMODE4, 3108 phy); 3109 } 3110 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 3111 } 3112 return; 3113 } else if (do_low_power) { 3114 if (!tg3_phy_led_bug(tp)) 3115 tg3_writephy(tp, MII_TG3_EXT_CTRL, 3116 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 3117 3118 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | 3119 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | 3120 MII_TG3_AUXCTL_PCTL_VREG_11V; 3121 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); 3122 } 3123 3124 /* The PHY should not be powered down on some chips because 3125 * of bugs. 3126 */ 3127 if (tg3_phy_power_bug(tp)) 3128 return; 3129 3130 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 3131 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 3132 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 3133 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 3134 val |= CPMU_LSPD_1000MB_MACCLK_12_5; 3135 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 3136 } 3137 3138 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); 3139 } 3140 3141 /* tp->lock is held. */ 3142 static int tg3_nvram_lock(struct tg3 *tp) 3143 { 3144 if (tg3_flag(tp, NVRAM)) { 3145 int i; 3146 3147 if (tp->nvram_lock_cnt == 0) { 3148 tw32(NVRAM_SWARB, SWARB_REQ_SET1); 3149 for (i = 0; i < 8000; i++) { 3150 if (tr32(NVRAM_SWARB) & SWARB_GNT1) 3151 break; 3152 udelay(20); 3153 } 3154 if (i == 8000) { 3155 tw32(NVRAM_SWARB, SWARB_REQ_CLR1); 3156 return -ENODEV; 3157 } 3158 } 3159 tp->nvram_lock_cnt++; 3160 } 3161 return 0; 3162 } 3163 3164 /* tp->lock is held. */ 3165 static void tg3_nvram_unlock(struct tg3 *tp) 3166 { 3167 if (tg3_flag(tp, NVRAM)) { 3168 if (tp->nvram_lock_cnt > 0) 3169 tp->nvram_lock_cnt--; 3170 if (tp->nvram_lock_cnt == 0) 3171 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); 3172 } 3173 } 3174 3175 /* tp->lock is held. */ 3176 static void tg3_enable_nvram_access(struct tg3 *tp) 3177 { 3178 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3179 u32 nvaccess = tr32(NVRAM_ACCESS); 3180 3181 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); 3182 } 3183 } 3184 3185 /* tp->lock is held. */ 3186 static void tg3_disable_nvram_access(struct tg3 *tp) 3187 { 3188 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3189 u32 nvaccess = tr32(NVRAM_ACCESS); 3190 3191 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); 3192 } 3193 } 3194 3195 static int tg3_nvram_read_using_eeprom(struct tg3 *tp, 3196 u32 offset, u32 *val) 3197 { 3198 u32 tmp; 3199 int i; 3200 3201 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) 3202 return -EINVAL; 3203 3204 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | 3205 EEPROM_ADDR_DEVID_MASK | 3206 EEPROM_ADDR_READ); 3207 tw32(GRC_EEPROM_ADDR, 3208 tmp | 3209 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3210 ((offset << EEPROM_ADDR_ADDR_SHIFT) & 3211 EEPROM_ADDR_ADDR_MASK) | 3212 EEPROM_ADDR_READ | EEPROM_ADDR_START); 3213 3214 for (i = 0; i < 1000; i++) { 3215 tmp = tr32(GRC_EEPROM_ADDR); 3216 3217 if (tmp & EEPROM_ADDR_COMPLETE) 3218 break; 3219 msleep(1); 3220 } 3221 if (!(tmp & EEPROM_ADDR_COMPLETE)) 3222 return -EBUSY; 3223 3224 tmp = tr32(GRC_EEPROM_DATA); 3225 3226 /* 3227 * The data will always be opposite the native endian 3228 * format. Perform a blind byteswap to compensate. 3229 */ 3230 *val = swab32(tmp); 3231 3232 return 0; 3233 } 3234 3235 #define NVRAM_CMD_TIMEOUT 10000 3236 3237 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) 3238 { 3239 int i; 3240 3241 tw32(NVRAM_CMD, nvram_cmd); 3242 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { 3243 usleep_range(10, 40); 3244 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { 3245 udelay(10); 3246 break; 3247 } 3248 } 3249 3250 if (i == NVRAM_CMD_TIMEOUT) 3251 return -EBUSY; 3252 3253 return 0; 3254 } 3255 3256 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) 3257 { 3258 if (tg3_flag(tp, NVRAM) && 3259 tg3_flag(tp, NVRAM_BUFFERED) && 3260 tg3_flag(tp, FLASH) && 3261 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3262 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3263 3264 addr = ((addr / tp->nvram_pagesize) << 3265 ATMEL_AT45DB0X1B_PAGE_POS) + 3266 (addr % tp->nvram_pagesize); 3267 3268 return addr; 3269 } 3270 3271 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) 3272 { 3273 if (tg3_flag(tp, NVRAM) && 3274 tg3_flag(tp, NVRAM_BUFFERED) && 3275 tg3_flag(tp, FLASH) && 3276 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3277 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3278 3279 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * 3280 tp->nvram_pagesize) + 3281 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); 3282 3283 return addr; 3284 } 3285 3286 /* NOTE: Data read in from NVRAM is byteswapped according to 3287 * the byteswapping settings for all other register accesses. 3288 * tg3 devices are BE devices, so on a BE machine, the data 3289 * returned will be exactly as it is seen in NVRAM. On a LE 3290 * machine, the 32-bit value will be byteswapped. 3291 */ 3292 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) 3293 { 3294 int ret; 3295 3296 if (!tg3_flag(tp, NVRAM)) 3297 return tg3_nvram_read_using_eeprom(tp, offset, val); 3298 3299 offset = tg3_nvram_phys_addr(tp, offset); 3300 3301 if (offset > NVRAM_ADDR_MSK) 3302 return -EINVAL; 3303 3304 ret = tg3_nvram_lock(tp); 3305 if (ret) 3306 return ret; 3307 3308 tg3_enable_nvram_access(tp); 3309 3310 tw32(NVRAM_ADDR, offset); 3311 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | 3312 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); 3313 3314 if (ret == 0) 3315 *val = tr32(NVRAM_RDDATA); 3316 3317 tg3_disable_nvram_access(tp); 3318 3319 tg3_nvram_unlock(tp); 3320 3321 return ret; 3322 } 3323 3324 /* Ensures NVRAM data is in bytestream format. */ 3325 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) 3326 { 3327 u32 v; 3328 int res = tg3_nvram_read(tp, offset, &v); 3329 if (!res) 3330 *val = cpu_to_be32(v); 3331 return res; 3332 } 3333 3334 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, 3335 u32 offset, u32 len, u8 *buf) 3336 { 3337 int i, j, rc = 0; 3338 u32 val; 3339 3340 for (i = 0; i < len; i += 4) { 3341 u32 addr; 3342 __be32 data; 3343 3344 addr = offset + i; 3345 3346 memcpy(&data, buf + i, 4); 3347 3348 /* 3349 * The SEEPROM interface expects the data to always be opposite 3350 * the native endian format. We accomplish this by reversing 3351 * all the operations that would have been performed on the 3352 * data from a call to tg3_nvram_read_be32(). 3353 */ 3354 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); 3355 3356 val = tr32(GRC_EEPROM_ADDR); 3357 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); 3358 3359 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | 3360 EEPROM_ADDR_READ); 3361 tw32(GRC_EEPROM_ADDR, val | 3362 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3363 (addr & EEPROM_ADDR_ADDR_MASK) | 3364 EEPROM_ADDR_START | 3365 EEPROM_ADDR_WRITE); 3366 3367 for (j = 0; j < 1000; j++) { 3368 val = tr32(GRC_EEPROM_ADDR); 3369 3370 if (val & EEPROM_ADDR_COMPLETE) 3371 break; 3372 msleep(1); 3373 } 3374 if (!(val & EEPROM_ADDR_COMPLETE)) { 3375 rc = -EBUSY; 3376 break; 3377 } 3378 } 3379 3380 return rc; 3381 } 3382 3383 /* offset and length are dword aligned */ 3384 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, 3385 u8 *buf) 3386 { 3387 int ret = 0; 3388 u32 pagesize = tp->nvram_pagesize; 3389 u32 pagemask = pagesize - 1; 3390 u32 nvram_cmd; 3391 u8 *tmp; 3392 3393 tmp = kmalloc(pagesize, GFP_KERNEL); 3394 if (tmp == NULL) 3395 return -ENOMEM; 3396 3397 while (len) { 3398 int j; 3399 u32 phy_addr, page_off, size; 3400 3401 phy_addr = offset & ~pagemask; 3402 3403 for (j = 0; j < pagesize; j += 4) { 3404 ret = tg3_nvram_read_be32(tp, phy_addr + j, 3405 (__be32 *) (tmp + j)); 3406 if (ret) 3407 break; 3408 } 3409 if (ret) 3410 break; 3411 3412 page_off = offset & pagemask; 3413 size = pagesize; 3414 if (len < size) 3415 size = len; 3416 3417 len -= size; 3418 3419 memcpy(tmp + page_off, buf, size); 3420 3421 offset = offset + (pagesize - page_off); 3422 3423 tg3_enable_nvram_access(tp); 3424 3425 /* 3426 * Before we can erase the flash page, we need 3427 * to issue a special "write enable" command. 3428 */ 3429 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3430 3431 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3432 break; 3433 3434 /* Erase the target page */ 3435 tw32(NVRAM_ADDR, phy_addr); 3436 3437 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | 3438 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; 3439 3440 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3441 break; 3442 3443 /* Issue another write enable to start the write. */ 3444 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3445 3446 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3447 break; 3448 3449 for (j = 0; j < pagesize; j += 4) { 3450 __be32 data; 3451 3452 data = *((__be32 *) (tmp + j)); 3453 3454 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3455 3456 tw32(NVRAM_ADDR, phy_addr + j); 3457 3458 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | 3459 NVRAM_CMD_WR; 3460 3461 if (j == 0) 3462 nvram_cmd |= NVRAM_CMD_FIRST; 3463 else if (j == (pagesize - 4)) 3464 nvram_cmd |= NVRAM_CMD_LAST; 3465 3466 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3467 if (ret) 3468 break; 3469 } 3470 if (ret) 3471 break; 3472 } 3473 3474 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3475 tg3_nvram_exec_cmd(tp, nvram_cmd); 3476 3477 kfree(tmp); 3478 3479 return ret; 3480 } 3481 3482 /* offset and length are dword aligned */ 3483 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, 3484 u8 *buf) 3485 { 3486 int i, ret = 0; 3487 3488 for (i = 0; i < len; i += 4, offset += 4) { 3489 u32 page_off, phy_addr, nvram_cmd; 3490 __be32 data; 3491 3492 memcpy(&data, buf + i, 4); 3493 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3494 3495 page_off = offset % tp->nvram_pagesize; 3496 3497 phy_addr = tg3_nvram_phys_addr(tp, offset); 3498 3499 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; 3500 3501 if (page_off == 0 || i == 0) 3502 nvram_cmd |= NVRAM_CMD_FIRST; 3503 if (page_off == (tp->nvram_pagesize - 4)) 3504 nvram_cmd |= NVRAM_CMD_LAST; 3505 3506 if (i == (len - 4)) 3507 nvram_cmd |= NVRAM_CMD_LAST; 3508 3509 if ((nvram_cmd & NVRAM_CMD_FIRST) || 3510 !tg3_flag(tp, FLASH) || 3511 !tg3_flag(tp, 57765_PLUS)) 3512 tw32(NVRAM_ADDR, phy_addr); 3513 3514 if (tg3_asic_rev(tp) != ASIC_REV_5752 && 3515 !tg3_flag(tp, 5755_PLUS) && 3516 (tp->nvram_jedecnum == JEDEC_ST) && 3517 (nvram_cmd & NVRAM_CMD_FIRST)) { 3518 u32 cmd; 3519 3520 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3521 ret = tg3_nvram_exec_cmd(tp, cmd); 3522 if (ret) 3523 break; 3524 } 3525 if (!tg3_flag(tp, FLASH)) { 3526 /* We always do complete word writes to eeprom. */ 3527 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); 3528 } 3529 3530 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3531 if (ret) 3532 break; 3533 } 3534 return ret; 3535 } 3536 3537 /* offset and length are dword aligned */ 3538 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) 3539 { 3540 int ret; 3541 3542 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3543 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & 3544 ~GRC_LCLCTRL_GPIO_OUTPUT1); 3545 udelay(40); 3546 } 3547 3548 if (!tg3_flag(tp, NVRAM)) { 3549 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); 3550 } else { 3551 u32 grc_mode; 3552 3553 ret = tg3_nvram_lock(tp); 3554 if (ret) 3555 return ret; 3556 3557 tg3_enable_nvram_access(tp); 3558 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) 3559 tw32(NVRAM_WRITE1, 0x406); 3560 3561 grc_mode = tr32(GRC_MODE); 3562 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); 3563 3564 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { 3565 ret = tg3_nvram_write_block_buffered(tp, offset, len, 3566 buf); 3567 } else { 3568 ret = tg3_nvram_write_block_unbuffered(tp, offset, len, 3569 buf); 3570 } 3571 3572 grc_mode = tr32(GRC_MODE); 3573 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); 3574 3575 tg3_disable_nvram_access(tp); 3576 tg3_nvram_unlock(tp); 3577 } 3578 3579 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3580 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 3581 udelay(40); 3582 } 3583 3584 return ret; 3585 } 3586 3587 #define RX_CPU_SCRATCH_BASE 0x30000 3588 #define RX_CPU_SCRATCH_SIZE 0x04000 3589 #define TX_CPU_SCRATCH_BASE 0x34000 3590 #define TX_CPU_SCRATCH_SIZE 0x04000 3591 3592 /* tp->lock is held. */ 3593 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base) 3594 { 3595 int i; 3596 const int iters = 10000; 3597 3598 for (i = 0; i < iters; i++) { 3599 tw32(cpu_base + CPU_STATE, 0xffffffff); 3600 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3601 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) 3602 break; 3603 if (pci_channel_offline(tp->pdev)) 3604 return -EBUSY; 3605 } 3606 3607 return (i == iters) ? -EBUSY : 0; 3608 } 3609 3610 /* tp->lock is held. */ 3611 static int tg3_rxcpu_pause(struct tg3 *tp) 3612 { 3613 int rc = tg3_pause_cpu(tp, RX_CPU_BASE); 3614 3615 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 3616 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); 3617 udelay(10); 3618 3619 return rc; 3620 } 3621 3622 /* tp->lock is held. */ 3623 static int tg3_txcpu_pause(struct tg3 *tp) 3624 { 3625 return tg3_pause_cpu(tp, TX_CPU_BASE); 3626 } 3627 3628 /* tp->lock is held. */ 3629 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base) 3630 { 3631 tw32(cpu_base + CPU_STATE, 0xffffffff); 3632 tw32_f(cpu_base + CPU_MODE, 0x00000000); 3633 } 3634 3635 /* tp->lock is held. */ 3636 static void tg3_rxcpu_resume(struct tg3 *tp) 3637 { 3638 tg3_resume_cpu(tp, RX_CPU_BASE); 3639 } 3640 3641 /* tp->lock is held. */ 3642 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base) 3643 { 3644 int rc; 3645 3646 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); 3647 3648 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3649 u32 val = tr32(GRC_VCPU_EXT_CTRL); 3650 3651 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); 3652 return 0; 3653 } 3654 if (cpu_base == RX_CPU_BASE) { 3655 rc = tg3_rxcpu_pause(tp); 3656 } else { 3657 /* 3658 * There is only an Rx CPU for the 5750 derivative in the 3659 * BCM4785. 3660 */ 3661 if (tg3_flag(tp, IS_SSB_CORE)) 3662 return 0; 3663 3664 rc = tg3_txcpu_pause(tp); 3665 } 3666 3667 if (rc) { 3668 netdev_err(tp->dev, "%s timed out, %s CPU\n", 3669 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX"); 3670 return -ENODEV; 3671 } 3672 3673 /* Clear firmware's nvram arbitration. */ 3674 if (tg3_flag(tp, NVRAM)) 3675 tw32(NVRAM_SWARB, SWARB_REQ_CLR0); 3676 return 0; 3677 } 3678 3679 static int tg3_fw_data_len(struct tg3 *tp, 3680 const struct tg3_firmware_hdr *fw_hdr) 3681 { 3682 int fw_len; 3683 3684 /* Non fragmented firmware have one firmware header followed by a 3685 * contiguous chunk of data to be written. The length field in that 3686 * header is not the length of data to be written but the complete 3687 * length of the bss. The data length is determined based on 3688 * tp->fw->size minus headers. 3689 * 3690 * Fragmented firmware have a main header followed by multiple 3691 * fragments. Each fragment is identical to non fragmented firmware 3692 * with a firmware header followed by a contiguous chunk of data. In 3693 * the main header, the length field is unused and set to 0xffffffff. 3694 * In each fragment header the length is the entire size of that 3695 * fragment i.e. fragment data + header length. Data length is 3696 * therefore length field in the header minus TG3_FW_HDR_LEN. 3697 */ 3698 if (tp->fw_len == 0xffffffff) 3699 fw_len = be32_to_cpu(fw_hdr->len); 3700 else 3701 fw_len = tp->fw->size; 3702 3703 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32); 3704 } 3705 3706 /* tp->lock is held. */ 3707 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, 3708 u32 cpu_scratch_base, int cpu_scratch_size, 3709 const struct tg3_firmware_hdr *fw_hdr) 3710 { 3711 int err, i; 3712 void (*write_op)(struct tg3 *, u32, u32); 3713 int total_len = tp->fw->size; 3714 3715 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { 3716 netdev_err(tp->dev, 3717 "%s: Trying to load TX cpu firmware which is 5705\n", 3718 __func__); 3719 return -EINVAL; 3720 } 3721 3722 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766) 3723 write_op = tg3_write_mem; 3724 else 3725 write_op = tg3_write_indirect_reg32; 3726 3727 if (tg3_asic_rev(tp) != ASIC_REV_57766) { 3728 /* It is possible that bootcode is still loading at this point. 3729 * Get the nvram lock first before halting the cpu. 3730 */ 3731 int lock_err = tg3_nvram_lock(tp); 3732 err = tg3_halt_cpu(tp, cpu_base); 3733 if (!lock_err) 3734 tg3_nvram_unlock(tp); 3735 if (err) 3736 goto out; 3737 3738 for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) 3739 write_op(tp, cpu_scratch_base + i, 0); 3740 tw32(cpu_base + CPU_STATE, 0xffffffff); 3741 tw32(cpu_base + CPU_MODE, 3742 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT); 3743 } else { 3744 /* Subtract additional main header for fragmented firmware and 3745 * advance to the first fragment 3746 */ 3747 total_len -= TG3_FW_HDR_LEN; 3748 fw_hdr++; 3749 } 3750 3751 do { 3752 u32 *fw_data = (u32 *)(fw_hdr + 1); 3753 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++) 3754 write_op(tp, cpu_scratch_base + 3755 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) + 3756 (i * sizeof(u32)), 3757 be32_to_cpu(fw_data[i])); 3758 3759 total_len -= be32_to_cpu(fw_hdr->len); 3760 3761 /* Advance to next fragment */ 3762 fw_hdr = (struct tg3_firmware_hdr *) 3763 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len)); 3764 } while (total_len > 0); 3765 3766 err = 0; 3767 3768 out: 3769 return err; 3770 } 3771 3772 /* tp->lock is held. */ 3773 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc) 3774 { 3775 int i; 3776 const int iters = 5; 3777 3778 tw32(cpu_base + CPU_STATE, 0xffffffff); 3779 tw32_f(cpu_base + CPU_PC, pc); 3780 3781 for (i = 0; i < iters; i++) { 3782 if (tr32(cpu_base + CPU_PC) == pc) 3783 break; 3784 tw32(cpu_base + CPU_STATE, 0xffffffff); 3785 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3786 tw32_f(cpu_base + CPU_PC, pc); 3787 udelay(1000); 3788 } 3789 3790 return (i == iters) ? -EBUSY : 0; 3791 } 3792 3793 /* tp->lock is held. */ 3794 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) 3795 { 3796 const struct tg3_firmware_hdr *fw_hdr; 3797 int err; 3798 3799 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3800 3801 /* Firmware blob starts with version numbers, followed by 3802 start address and length. We are setting complete length. 3803 length = end_address_of_bss - start_address_of_text. 3804 Remainder is the blob to be loaded contiguously 3805 from start address. */ 3806 3807 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, 3808 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, 3809 fw_hdr); 3810 if (err) 3811 return err; 3812 3813 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, 3814 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, 3815 fw_hdr); 3816 if (err) 3817 return err; 3818 3819 /* Now startup only the RX cpu. */ 3820 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE, 3821 be32_to_cpu(fw_hdr->base_addr)); 3822 if (err) { 3823 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " 3824 "should be %08x\n", __func__, 3825 tr32(RX_CPU_BASE + CPU_PC), 3826 be32_to_cpu(fw_hdr->base_addr)); 3827 return -ENODEV; 3828 } 3829 3830 tg3_rxcpu_resume(tp); 3831 3832 return 0; 3833 } 3834 3835 static int tg3_validate_rxcpu_state(struct tg3 *tp) 3836 { 3837 const int iters = 1000; 3838 int i; 3839 u32 val; 3840 3841 /* Wait for boot code to complete initialization and enter service 3842 * loop. It is then safe to download service patches 3843 */ 3844 for (i = 0; i < iters; i++) { 3845 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP) 3846 break; 3847 3848 udelay(10); 3849 } 3850 3851 if (i == iters) { 3852 netdev_err(tp->dev, "Boot code not ready for service patches\n"); 3853 return -EBUSY; 3854 } 3855 3856 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE); 3857 if (val & 0xff) { 3858 netdev_warn(tp->dev, 3859 "Other patches exist. Not downloading EEE patch\n"); 3860 return -EEXIST; 3861 } 3862 3863 return 0; 3864 } 3865 3866 /* tp->lock is held. */ 3867 static void tg3_load_57766_firmware(struct tg3 *tp) 3868 { 3869 struct tg3_firmware_hdr *fw_hdr; 3870 3871 if (!tg3_flag(tp, NO_NVRAM)) 3872 return; 3873 3874 if (tg3_validate_rxcpu_state(tp)) 3875 return; 3876 3877 if (!tp->fw) 3878 return; 3879 3880 /* This firmware blob has a different format than older firmware 3881 * releases as given below. The main difference is we have fragmented 3882 * data to be written to non-contiguous locations. 3883 * 3884 * In the beginning we have a firmware header identical to other 3885 * firmware which consists of version, base addr and length. The length 3886 * here is unused and set to 0xffffffff. 3887 * 3888 * This is followed by a series of firmware fragments which are 3889 * individually identical to previous firmware. i.e. they have the 3890 * firmware header and followed by data for that fragment. The version 3891 * field of the individual fragment header is unused. 3892 */ 3893 3894 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3895 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR) 3896 return; 3897 3898 if (tg3_rxcpu_pause(tp)) 3899 return; 3900 3901 /* tg3_load_firmware_cpu() will always succeed for the 57766 */ 3902 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr); 3903 3904 tg3_rxcpu_resume(tp); 3905 } 3906 3907 /* tp->lock is held. */ 3908 static int tg3_load_tso_firmware(struct tg3 *tp) 3909 { 3910 const struct tg3_firmware_hdr *fw_hdr; 3911 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; 3912 int err; 3913 3914 if (!tg3_flag(tp, FW_TSO)) 3915 return 0; 3916 3917 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3918 3919 /* Firmware blob starts with version numbers, followed by 3920 start address and length. We are setting complete length. 3921 length = end_address_of_bss - start_address_of_text. 3922 Remainder is the blob to be loaded contiguously 3923 from start address. */ 3924 3925 cpu_scratch_size = tp->fw_len; 3926 3927 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 3928 cpu_base = RX_CPU_BASE; 3929 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; 3930 } else { 3931 cpu_base = TX_CPU_BASE; 3932 cpu_scratch_base = TX_CPU_SCRATCH_BASE; 3933 cpu_scratch_size = TX_CPU_SCRATCH_SIZE; 3934 } 3935 3936 err = tg3_load_firmware_cpu(tp, cpu_base, 3937 cpu_scratch_base, cpu_scratch_size, 3938 fw_hdr); 3939 if (err) 3940 return err; 3941 3942 /* Now startup the cpu. */ 3943 err = tg3_pause_cpu_and_set_pc(tp, cpu_base, 3944 be32_to_cpu(fw_hdr->base_addr)); 3945 if (err) { 3946 netdev_err(tp->dev, 3947 "%s fails to set CPU PC, is %08x should be %08x\n", 3948 __func__, tr32(cpu_base + CPU_PC), 3949 be32_to_cpu(fw_hdr->base_addr)); 3950 return -ENODEV; 3951 } 3952 3953 tg3_resume_cpu(tp, cpu_base); 3954 return 0; 3955 } 3956 3957 /* tp->lock is held. */ 3958 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index) 3959 { 3960 u32 addr_high, addr_low; 3961 3962 addr_high = ((mac_addr[0] << 8) | mac_addr[1]); 3963 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) | 3964 (mac_addr[4] << 8) | mac_addr[5]); 3965 3966 if (index < 4) { 3967 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high); 3968 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low); 3969 } else { 3970 index -= 4; 3971 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high); 3972 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low); 3973 } 3974 } 3975 3976 /* tp->lock is held. */ 3977 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1) 3978 { 3979 u32 addr_high; 3980 int i; 3981 3982 for (i = 0; i < 4; i++) { 3983 if (i == 1 && skip_mac_1) 3984 continue; 3985 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3986 } 3987 3988 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 3989 tg3_asic_rev(tp) == ASIC_REV_5704) { 3990 for (i = 4; i < 16; i++) 3991 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3992 } 3993 3994 addr_high = (tp->dev->dev_addr[0] + 3995 tp->dev->dev_addr[1] + 3996 tp->dev->dev_addr[2] + 3997 tp->dev->dev_addr[3] + 3998 tp->dev->dev_addr[4] + 3999 tp->dev->dev_addr[5]) & 4000 TX_BACKOFF_SEED_MASK; 4001 tw32(MAC_TX_BACKOFF_SEED, addr_high); 4002 } 4003 4004 static void tg3_enable_register_access(struct tg3 *tp) 4005 { 4006 /* 4007 * Make sure register accesses (indirect or otherwise) will function 4008 * correctly. 4009 */ 4010 pci_write_config_dword(tp->pdev, 4011 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); 4012 } 4013 4014 static int tg3_power_up(struct tg3 *tp) 4015 { 4016 int err; 4017 4018 tg3_enable_register_access(tp); 4019 4020 err = pci_set_power_state(tp->pdev, PCI_D0); 4021 if (!err) { 4022 /* Switch out of Vaux if it is a NIC */ 4023 tg3_pwrsrc_switch_to_vmain(tp); 4024 } else { 4025 netdev_err(tp->dev, "Transition to D0 failed\n"); 4026 } 4027 4028 return err; 4029 } 4030 4031 static int tg3_setup_phy(struct tg3 *, bool); 4032 4033 static int tg3_power_down_prepare(struct tg3 *tp) 4034 { 4035 u32 misc_host_ctrl; 4036 bool device_should_wake, do_low_power; 4037 4038 tg3_enable_register_access(tp); 4039 4040 /* Restore the CLKREQ setting. */ 4041 if (tg3_flag(tp, CLKREQ_BUG)) 4042 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 4043 PCI_EXP_LNKCTL_CLKREQ_EN); 4044 4045 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 4046 tw32(TG3PCI_MISC_HOST_CTRL, 4047 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 4048 4049 device_should_wake = device_may_wakeup(&tp->pdev->dev) && 4050 tg3_flag(tp, WOL_ENABLE); 4051 4052 if (tg3_flag(tp, USE_PHYLIB)) { 4053 do_low_power = false; 4054 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && 4055 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4056 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, }; 4057 struct phy_device *phydev; 4058 u32 phyid; 4059 4060 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 4061 4062 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4063 4064 tp->link_config.speed = phydev->speed; 4065 tp->link_config.duplex = phydev->duplex; 4066 tp->link_config.autoneg = phydev->autoneg; 4067 ethtool_convert_link_mode_to_legacy_u32( 4068 &tp->link_config.advertising, 4069 phydev->advertising); 4070 4071 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising); 4072 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 4073 advertising); 4074 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 4075 advertising); 4076 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, 4077 advertising); 4078 4079 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { 4080 if (tg3_flag(tp, WOL_SPEED_100MB)) { 4081 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, 4082 advertising); 4083 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, 4084 advertising); 4085 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, 4086 advertising); 4087 } else { 4088 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, 4089 advertising); 4090 } 4091 } 4092 4093 linkmode_copy(phydev->advertising, advertising); 4094 phy_start_aneg(phydev); 4095 4096 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; 4097 if (phyid != PHY_ID_BCMAC131) { 4098 phyid &= PHY_BCM_OUI_MASK; 4099 if (phyid == PHY_BCM_OUI_1 || 4100 phyid == PHY_BCM_OUI_2 || 4101 phyid == PHY_BCM_OUI_3) 4102 do_low_power = true; 4103 } 4104 } 4105 } else { 4106 do_low_power = true; 4107 4108 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) 4109 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4110 4111 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 4112 tg3_setup_phy(tp, false); 4113 } 4114 4115 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 4116 u32 val; 4117 4118 val = tr32(GRC_VCPU_EXT_CTRL); 4119 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); 4120 } else if (!tg3_flag(tp, ENABLE_ASF)) { 4121 int i; 4122 u32 val; 4123 4124 for (i = 0; i < 200; i++) { 4125 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); 4126 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 4127 break; 4128 msleep(1); 4129 } 4130 } 4131 if (tg3_flag(tp, WOL_CAP)) 4132 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | 4133 WOL_DRV_STATE_SHUTDOWN | 4134 WOL_DRV_WOL | 4135 WOL_SET_MAGIC_PKT); 4136 4137 if (device_should_wake) { 4138 u32 mac_mode; 4139 4140 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 4141 if (do_low_power && 4142 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 4143 tg3_phy_auxctl_write(tp, 4144 MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 4145 MII_TG3_AUXCTL_PCTL_WOL_EN | 4146 MII_TG3_AUXCTL_PCTL_100TX_LPWR | 4147 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); 4148 udelay(40); 4149 } 4150 4151 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4152 mac_mode = MAC_MODE_PORT_MODE_GMII; 4153 else if (tp->phy_flags & 4154 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) { 4155 if (tp->link_config.active_speed == SPEED_1000) 4156 mac_mode = MAC_MODE_PORT_MODE_GMII; 4157 else 4158 mac_mode = MAC_MODE_PORT_MODE_MII; 4159 } else 4160 mac_mode = MAC_MODE_PORT_MODE_MII; 4161 4162 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; 4163 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 4164 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? 4165 SPEED_100 : SPEED_10; 4166 if (tg3_5700_link_polarity(tp, speed)) 4167 mac_mode |= MAC_MODE_LINK_POLARITY; 4168 else 4169 mac_mode &= ~MAC_MODE_LINK_POLARITY; 4170 } 4171 } else { 4172 mac_mode = MAC_MODE_PORT_MODE_TBI; 4173 } 4174 4175 if (!tg3_flag(tp, 5750_PLUS)) 4176 tw32(MAC_LED_CTRL, tp->led_ctrl); 4177 4178 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; 4179 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && 4180 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) 4181 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; 4182 4183 if (tg3_flag(tp, ENABLE_APE)) 4184 mac_mode |= MAC_MODE_APE_TX_EN | 4185 MAC_MODE_APE_RX_EN | 4186 MAC_MODE_TDE_ENABLE; 4187 4188 tw32_f(MAC_MODE, mac_mode); 4189 udelay(100); 4190 4191 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); 4192 udelay(10); 4193 } 4194 4195 if (!tg3_flag(tp, WOL_SPEED_100MB) && 4196 (tg3_asic_rev(tp) == ASIC_REV_5700 || 4197 tg3_asic_rev(tp) == ASIC_REV_5701)) { 4198 u32 base_val; 4199 4200 base_val = tp->pci_clock_ctrl; 4201 base_val |= (CLOCK_CTRL_RXCLK_DISABLE | 4202 CLOCK_CTRL_TXCLK_DISABLE); 4203 4204 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | 4205 CLOCK_CTRL_PWRDOWN_PLL133, 40); 4206 } else if (tg3_flag(tp, 5780_CLASS) || 4207 tg3_flag(tp, CPMU_PRESENT) || 4208 tg3_asic_rev(tp) == ASIC_REV_5906) { 4209 /* do nothing */ 4210 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { 4211 u32 newbits1, newbits2; 4212 4213 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4214 tg3_asic_rev(tp) == ASIC_REV_5701) { 4215 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | 4216 CLOCK_CTRL_TXCLK_DISABLE | 4217 CLOCK_CTRL_ALTCLK); 4218 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4219 } else if (tg3_flag(tp, 5705_PLUS)) { 4220 newbits1 = CLOCK_CTRL_625_CORE; 4221 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; 4222 } else { 4223 newbits1 = CLOCK_CTRL_ALTCLK; 4224 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4225 } 4226 4227 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, 4228 40); 4229 4230 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, 4231 40); 4232 4233 if (!tg3_flag(tp, 5705_PLUS)) { 4234 u32 newbits3; 4235 4236 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4237 tg3_asic_rev(tp) == ASIC_REV_5701) { 4238 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | 4239 CLOCK_CTRL_TXCLK_DISABLE | 4240 CLOCK_CTRL_44MHZ_CORE); 4241 } else { 4242 newbits3 = CLOCK_CTRL_44MHZ_CORE; 4243 } 4244 4245 tw32_wait_f(TG3PCI_CLOCK_CTRL, 4246 tp->pci_clock_ctrl | newbits3, 40); 4247 } 4248 } 4249 4250 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) 4251 tg3_power_down_phy(tp, do_low_power); 4252 4253 tg3_frob_aux_power(tp, true); 4254 4255 /* Workaround for unstable PLL clock */ 4256 if ((!tg3_flag(tp, IS_SSB_CORE)) && 4257 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) || 4258 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) { 4259 u32 val = tr32(0x7d00); 4260 4261 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); 4262 tw32(0x7d00, val); 4263 if (!tg3_flag(tp, ENABLE_ASF)) { 4264 int err; 4265 4266 err = tg3_nvram_lock(tp); 4267 tg3_halt_cpu(tp, RX_CPU_BASE); 4268 if (!err) 4269 tg3_nvram_unlock(tp); 4270 } 4271 } 4272 4273 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 4274 4275 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); 4276 4277 return 0; 4278 } 4279 4280 static void tg3_power_down(struct tg3 *tp) 4281 { 4282 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); 4283 pci_set_power_state(tp->pdev, PCI_D3hot); 4284 } 4285 4286 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) 4287 { 4288 switch (val & MII_TG3_AUX_STAT_SPDMASK) { 4289 case MII_TG3_AUX_STAT_10HALF: 4290 *speed = SPEED_10; 4291 *duplex = DUPLEX_HALF; 4292 break; 4293 4294 case MII_TG3_AUX_STAT_10FULL: 4295 *speed = SPEED_10; 4296 *duplex = DUPLEX_FULL; 4297 break; 4298 4299 case MII_TG3_AUX_STAT_100HALF: 4300 *speed = SPEED_100; 4301 *duplex = DUPLEX_HALF; 4302 break; 4303 4304 case MII_TG3_AUX_STAT_100FULL: 4305 *speed = SPEED_100; 4306 *duplex = DUPLEX_FULL; 4307 break; 4308 4309 case MII_TG3_AUX_STAT_1000HALF: 4310 *speed = SPEED_1000; 4311 *duplex = DUPLEX_HALF; 4312 break; 4313 4314 case MII_TG3_AUX_STAT_1000FULL: 4315 *speed = SPEED_1000; 4316 *duplex = DUPLEX_FULL; 4317 break; 4318 4319 default: 4320 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4321 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : 4322 SPEED_10; 4323 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : 4324 DUPLEX_HALF; 4325 break; 4326 } 4327 *speed = SPEED_UNKNOWN; 4328 *duplex = DUPLEX_UNKNOWN; 4329 break; 4330 } 4331 } 4332 4333 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) 4334 { 4335 int err = 0; 4336 u32 val, new_adv; 4337 4338 new_adv = ADVERTISE_CSMA; 4339 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; 4340 new_adv |= mii_advertise_flowctrl(flowctrl); 4341 4342 err = tg3_writephy(tp, MII_ADVERTISE, new_adv); 4343 if (err) 4344 goto done; 4345 4346 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4347 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); 4348 4349 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4350 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) 4351 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4352 4353 err = tg3_writephy(tp, MII_CTRL1000, new_adv); 4354 if (err) 4355 goto done; 4356 } 4357 4358 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4359 goto done; 4360 4361 tw32(TG3_CPMU_EEE_MODE, 4362 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); 4363 4364 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 4365 if (!err) { 4366 u32 err2; 4367 4368 val = 0; 4369 /* Advertise 100-BaseTX EEE ability */ 4370 if (advertise & ADVERTISED_100baseT_Full) 4371 val |= MDIO_AN_EEE_ADV_100TX; 4372 /* Advertise 1000-BaseT EEE ability */ 4373 if (advertise & ADVERTISED_1000baseT_Full) 4374 val |= MDIO_AN_EEE_ADV_1000T; 4375 4376 if (!tp->eee.eee_enabled) { 4377 val = 0; 4378 tp->eee.advertised = 0; 4379 } else { 4380 tp->eee.advertised = advertise & 4381 (ADVERTISED_100baseT_Full | 4382 ADVERTISED_1000baseT_Full); 4383 } 4384 4385 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); 4386 if (err) 4387 val = 0; 4388 4389 switch (tg3_asic_rev(tp)) { 4390 case ASIC_REV_5717: 4391 case ASIC_REV_57765: 4392 case ASIC_REV_57766: 4393 case ASIC_REV_5719: 4394 /* If we advertised any eee advertisements above... */ 4395 if (val) 4396 val = MII_TG3_DSP_TAP26_ALNOKO | 4397 MII_TG3_DSP_TAP26_RMRXSTO | 4398 MII_TG3_DSP_TAP26_OPCSINPT; 4399 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 4400 /* Fall through */ 4401 case ASIC_REV_5720: 4402 case ASIC_REV_5762: 4403 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) 4404 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | 4405 MII_TG3_DSP_CH34TP2_HIBW01); 4406 } 4407 4408 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false); 4409 if (!err) 4410 err = err2; 4411 } 4412 4413 done: 4414 return err; 4415 } 4416 4417 static void tg3_phy_copper_begin(struct tg3 *tp) 4418 { 4419 if (tp->link_config.autoneg == AUTONEG_ENABLE || 4420 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4421 u32 adv, fc; 4422 4423 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4424 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4425 adv = ADVERTISED_10baseT_Half | 4426 ADVERTISED_10baseT_Full; 4427 if (tg3_flag(tp, WOL_SPEED_100MB)) 4428 adv |= ADVERTISED_100baseT_Half | 4429 ADVERTISED_100baseT_Full; 4430 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) { 4431 if (!(tp->phy_flags & 4432 TG3_PHYFLG_DISABLE_1G_HD_ADV)) 4433 adv |= ADVERTISED_1000baseT_Half; 4434 adv |= ADVERTISED_1000baseT_Full; 4435 } 4436 4437 fc = FLOW_CTRL_TX | FLOW_CTRL_RX; 4438 } else { 4439 adv = tp->link_config.advertising; 4440 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 4441 adv &= ~(ADVERTISED_1000baseT_Half | 4442 ADVERTISED_1000baseT_Full); 4443 4444 fc = tp->link_config.flowctrl; 4445 } 4446 4447 tg3_phy_autoneg_cfg(tp, adv, fc); 4448 4449 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4450 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4451 /* Normally during power down we want to autonegotiate 4452 * the lowest possible speed for WOL. However, to avoid 4453 * link flap, we leave it untouched. 4454 */ 4455 return; 4456 } 4457 4458 tg3_writephy(tp, MII_BMCR, 4459 BMCR_ANENABLE | BMCR_ANRESTART); 4460 } else { 4461 int i; 4462 u32 bmcr, orig_bmcr; 4463 4464 tp->link_config.active_speed = tp->link_config.speed; 4465 tp->link_config.active_duplex = tp->link_config.duplex; 4466 4467 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 4468 /* With autoneg disabled, 5715 only links up when the 4469 * advertisement register has the configured speed 4470 * enabled. 4471 */ 4472 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); 4473 } 4474 4475 bmcr = 0; 4476 switch (tp->link_config.speed) { 4477 default: 4478 case SPEED_10: 4479 break; 4480 4481 case SPEED_100: 4482 bmcr |= BMCR_SPEED100; 4483 break; 4484 4485 case SPEED_1000: 4486 bmcr |= BMCR_SPEED1000; 4487 break; 4488 } 4489 4490 if (tp->link_config.duplex == DUPLEX_FULL) 4491 bmcr |= BMCR_FULLDPLX; 4492 4493 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && 4494 (bmcr != orig_bmcr)) { 4495 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); 4496 for (i = 0; i < 1500; i++) { 4497 u32 tmp; 4498 4499 udelay(10); 4500 if (tg3_readphy(tp, MII_BMSR, &tmp) || 4501 tg3_readphy(tp, MII_BMSR, &tmp)) 4502 continue; 4503 if (!(tmp & BMSR_LSTATUS)) { 4504 udelay(40); 4505 break; 4506 } 4507 } 4508 tg3_writephy(tp, MII_BMCR, bmcr); 4509 udelay(40); 4510 } 4511 } 4512 } 4513 4514 static int tg3_phy_pull_config(struct tg3 *tp) 4515 { 4516 int err; 4517 u32 val; 4518 4519 err = tg3_readphy(tp, MII_BMCR, &val); 4520 if (err) 4521 goto done; 4522 4523 if (!(val & BMCR_ANENABLE)) { 4524 tp->link_config.autoneg = AUTONEG_DISABLE; 4525 tp->link_config.advertising = 0; 4526 tg3_flag_clear(tp, PAUSE_AUTONEG); 4527 4528 err = -EIO; 4529 4530 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) { 4531 case 0: 4532 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4533 goto done; 4534 4535 tp->link_config.speed = SPEED_10; 4536 break; 4537 case BMCR_SPEED100: 4538 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4539 goto done; 4540 4541 tp->link_config.speed = SPEED_100; 4542 break; 4543 case BMCR_SPEED1000: 4544 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4545 tp->link_config.speed = SPEED_1000; 4546 break; 4547 } 4548 /* Fall through */ 4549 default: 4550 goto done; 4551 } 4552 4553 if (val & BMCR_FULLDPLX) 4554 tp->link_config.duplex = DUPLEX_FULL; 4555 else 4556 tp->link_config.duplex = DUPLEX_HALF; 4557 4558 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; 4559 4560 err = 0; 4561 goto done; 4562 } 4563 4564 tp->link_config.autoneg = AUTONEG_ENABLE; 4565 tp->link_config.advertising = ADVERTISED_Autoneg; 4566 tg3_flag_set(tp, PAUSE_AUTONEG); 4567 4568 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4569 u32 adv; 4570 4571 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4572 if (err) 4573 goto done; 4574 4575 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL); 4576 tp->link_config.advertising |= adv | ADVERTISED_TP; 4577 4578 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val); 4579 } else { 4580 tp->link_config.advertising |= ADVERTISED_FIBRE; 4581 } 4582 4583 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4584 u32 adv; 4585 4586 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4587 err = tg3_readphy(tp, MII_CTRL1000, &val); 4588 if (err) 4589 goto done; 4590 4591 adv = mii_ctrl1000_to_ethtool_adv_t(val); 4592 } else { 4593 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4594 if (err) 4595 goto done; 4596 4597 adv = tg3_decode_flowctrl_1000X(val); 4598 tp->link_config.flowctrl = adv; 4599 4600 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL); 4601 adv = mii_adv_to_ethtool_adv_x(val); 4602 } 4603 4604 tp->link_config.advertising |= adv; 4605 } 4606 4607 done: 4608 return err; 4609 } 4610 4611 static int tg3_init_5401phy_dsp(struct tg3 *tp) 4612 { 4613 int err; 4614 4615 /* Turn off tap power management. */ 4616 /* Set Extended packet length bit */ 4617 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 4618 4619 err |= tg3_phydsp_write(tp, 0x0012, 0x1804); 4620 err |= tg3_phydsp_write(tp, 0x0013, 0x1204); 4621 err |= tg3_phydsp_write(tp, 0x8006, 0x0132); 4622 err |= tg3_phydsp_write(tp, 0x8006, 0x0232); 4623 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); 4624 4625 udelay(40); 4626 4627 return err; 4628 } 4629 4630 static bool tg3_phy_eee_config_ok(struct tg3 *tp) 4631 { 4632 struct ethtool_eee eee; 4633 4634 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4635 return true; 4636 4637 tg3_eee_pull_config(tp, &eee); 4638 4639 if (tp->eee.eee_enabled) { 4640 if (tp->eee.advertised != eee.advertised || 4641 tp->eee.tx_lpi_timer != eee.tx_lpi_timer || 4642 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) 4643 return false; 4644 } else { 4645 /* EEE is disabled but we're advertising */ 4646 if (eee.advertised) 4647 return false; 4648 } 4649 4650 return true; 4651 } 4652 4653 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) 4654 { 4655 u32 advmsk, tgtadv, advertising; 4656 4657 advertising = tp->link_config.advertising; 4658 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; 4659 4660 advmsk = ADVERTISE_ALL; 4661 if (tp->link_config.active_duplex == DUPLEX_FULL) { 4662 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); 4663 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4664 } 4665 4666 if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) 4667 return false; 4668 4669 if ((*lcladv & advmsk) != tgtadv) 4670 return false; 4671 4672 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4673 u32 tg3_ctrl; 4674 4675 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); 4676 4677 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) 4678 return false; 4679 4680 if (tgtadv && 4681 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4682 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) { 4683 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4684 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL | 4685 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 4686 } else { 4687 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); 4688 } 4689 4690 if (tg3_ctrl != tgtadv) 4691 return false; 4692 } 4693 4694 return true; 4695 } 4696 4697 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) 4698 { 4699 u32 lpeth = 0; 4700 4701 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4702 u32 val; 4703 4704 if (tg3_readphy(tp, MII_STAT1000, &val)) 4705 return false; 4706 4707 lpeth = mii_stat1000_to_ethtool_lpa_t(val); 4708 } 4709 4710 if (tg3_readphy(tp, MII_LPA, rmtadv)) 4711 return false; 4712 4713 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); 4714 tp->link_config.rmt_adv = lpeth; 4715 4716 return true; 4717 } 4718 4719 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up) 4720 { 4721 if (curr_link_up != tp->link_up) { 4722 if (curr_link_up) { 4723 netif_carrier_on(tp->dev); 4724 } else { 4725 netif_carrier_off(tp->dev); 4726 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4727 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 4728 } 4729 4730 tg3_link_report(tp); 4731 return true; 4732 } 4733 4734 return false; 4735 } 4736 4737 static void tg3_clear_mac_status(struct tg3 *tp) 4738 { 4739 tw32(MAC_EVENT, 0); 4740 4741 tw32_f(MAC_STATUS, 4742 MAC_STATUS_SYNC_CHANGED | 4743 MAC_STATUS_CFG_CHANGED | 4744 MAC_STATUS_MI_COMPLETION | 4745 MAC_STATUS_LNKSTATE_CHANGED); 4746 udelay(40); 4747 } 4748 4749 static void tg3_setup_eee(struct tg3 *tp) 4750 { 4751 u32 val; 4752 4753 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | 4754 TG3_CPMU_EEE_LNKIDL_UART_IDL; 4755 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 4756 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; 4757 4758 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); 4759 4760 tw32_f(TG3_CPMU_EEE_CTRL, 4761 TG3_CPMU_EEE_CTRL_EXIT_20_1_US); 4762 4763 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | 4764 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) | 4765 TG3_CPMU_EEEMD_LPI_IN_RX | 4766 TG3_CPMU_EEEMD_EEE_ENABLE; 4767 4768 if (tg3_asic_rev(tp) != ASIC_REV_5717) 4769 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; 4770 4771 if (tg3_flag(tp, ENABLE_APE)) 4772 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; 4773 4774 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0); 4775 4776 tw32_f(TG3_CPMU_EEE_DBTMR1, 4777 TG3_CPMU_DBTMR1_PCIEXIT_2047US | 4778 (tp->eee.tx_lpi_timer & 0xffff)); 4779 4780 tw32_f(TG3_CPMU_EEE_DBTMR2, 4781 TG3_CPMU_DBTMR2_APE_TX_2047US | 4782 TG3_CPMU_DBTMR2_TXIDXEQ_2047US); 4783 } 4784 4785 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) 4786 { 4787 bool current_link_up; 4788 u32 bmsr, val; 4789 u32 lcl_adv, rmt_adv; 4790 u16 current_speed; 4791 u8 current_duplex; 4792 int i, err; 4793 4794 tg3_clear_mac_status(tp); 4795 4796 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 4797 tw32_f(MAC_MI_MODE, 4798 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 4799 udelay(80); 4800 } 4801 4802 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); 4803 4804 /* Some third-party PHYs need to be reset on link going 4805 * down. 4806 */ 4807 if ((tg3_asic_rev(tp) == ASIC_REV_5703 || 4808 tg3_asic_rev(tp) == ASIC_REV_5704 || 4809 tg3_asic_rev(tp) == ASIC_REV_5705) && 4810 tp->link_up) { 4811 tg3_readphy(tp, MII_BMSR, &bmsr); 4812 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4813 !(bmsr & BMSR_LSTATUS)) 4814 force_reset = true; 4815 } 4816 if (force_reset) 4817 tg3_phy_reset(tp); 4818 4819 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 4820 tg3_readphy(tp, MII_BMSR, &bmsr); 4821 if (tg3_readphy(tp, MII_BMSR, &bmsr) || 4822 !tg3_flag(tp, INIT_COMPLETE)) 4823 bmsr = 0; 4824 4825 if (!(bmsr & BMSR_LSTATUS)) { 4826 err = tg3_init_5401phy_dsp(tp); 4827 if (err) 4828 return err; 4829 4830 tg3_readphy(tp, MII_BMSR, &bmsr); 4831 for (i = 0; i < 1000; i++) { 4832 udelay(10); 4833 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4834 (bmsr & BMSR_LSTATUS)) { 4835 udelay(40); 4836 break; 4837 } 4838 } 4839 4840 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == 4841 TG3_PHY_REV_BCM5401_B0 && 4842 !(bmsr & BMSR_LSTATUS) && 4843 tp->link_config.active_speed == SPEED_1000) { 4844 err = tg3_phy_reset(tp); 4845 if (!err) 4846 err = tg3_init_5401phy_dsp(tp); 4847 if (err) 4848 return err; 4849 } 4850 } 4851 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4852 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) { 4853 /* 5701 {A0,B0} CRC bug workaround */ 4854 tg3_writephy(tp, 0x15, 0x0a75); 4855 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4856 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 4857 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4858 } 4859 4860 /* Clear pending interrupts... */ 4861 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4862 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4863 4864 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) 4865 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); 4866 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) 4867 tg3_writephy(tp, MII_TG3_IMASK, ~0); 4868 4869 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4870 tg3_asic_rev(tp) == ASIC_REV_5701) { 4871 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) 4872 tg3_writephy(tp, MII_TG3_EXT_CTRL, 4873 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 4874 else 4875 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); 4876 } 4877 4878 current_link_up = false; 4879 current_speed = SPEED_UNKNOWN; 4880 current_duplex = DUPLEX_UNKNOWN; 4881 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; 4882 tp->link_config.rmt_adv = 0; 4883 4884 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { 4885 err = tg3_phy_auxctl_read(tp, 4886 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4887 &val); 4888 if (!err && !(val & (1 << 10))) { 4889 tg3_phy_auxctl_write(tp, 4890 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4891 val | (1 << 10)); 4892 goto relink; 4893 } 4894 } 4895 4896 bmsr = 0; 4897 for (i = 0; i < 100; i++) { 4898 tg3_readphy(tp, MII_BMSR, &bmsr); 4899 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4900 (bmsr & BMSR_LSTATUS)) 4901 break; 4902 udelay(40); 4903 } 4904 4905 if (bmsr & BMSR_LSTATUS) { 4906 u32 aux_stat, bmcr; 4907 4908 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); 4909 for (i = 0; i < 2000; i++) { 4910 udelay(10); 4911 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && 4912 aux_stat) 4913 break; 4914 } 4915 4916 tg3_aux_stat_to_speed_duplex(tp, aux_stat, 4917 ¤t_speed, 4918 ¤t_duplex); 4919 4920 bmcr = 0; 4921 for (i = 0; i < 200; i++) { 4922 tg3_readphy(tp, MII_BMCR, &bmcr); 4923 if (tg3_readphy(tp, MII_BMCR, &bmcr)) 4924 continue; 4925 if (bmcr && bmcr != 0x7fff) 4926 break; 4927 udelay(10); 4928 } 4929 4930 lcl_adv = 0; 4931 rmt_adv = 0; 4932 4933 tp->link_config.active_speed = current_speed; 4934 tp->link_config.active_duplex = current_duplex; 4935 4936 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 4937 bool eee_config_ok = tg3_phy_eee_config_ok(tp); 4938 4939 if ((bmcr & BMCR_ANENABLE) && 4940 eee_config_ok && 4941 tg3_phy_copper_an_config_ok(tp, &lcl_adv) && 4942 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) 4943 current_link_up = true; 4944 4945 /* EEE settings changes take effect only after a phy 4946 * reset. If we have skipped a reset due to Link Flap 4947 * Avoidance being enabled, do it now. 4948 */ 4949 if (!eee_config_ok && 4950 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 4951 !force_reset) { 4952 tg3_setup_eee(tp); 4953 tg3_phy_reset(tp); 4954 } 4955 } else { 4956 if (!(bmcr & BMCR_ANENABLE) && 4957 tp->link_config.speed == current_speed && 4958 tp->link_config.duplex == current_duplex) { 4959 current_link_up = true; 4960 } 4961 } 4962 4963 if (current_link_up && 4964 tp->link_config.active_duplex == DUPLEX_FULL) { 4965 u32 reg, bit; 4966 4967 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4968 reg = MII_TG3_FET_GEN_STAT; 4969 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; 4970 } else { 4971 reg = MII_TG3_EXT_STAT; 4972 bit = MII_TG3_EXT_STAT_MDIX; 4973 } 4974 4975 if (!tg3_readphy(tp, reg, &val) && (val & bit)) 4976 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; 4977 4978 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 4979 } 4980 } 4981 4982 relink: 4983 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4984 tg3_phy_copper_begin(tp); 4985 4986 if (tg3_flag(tp, ROBOSWITCH)) { 4987 current_link_up = true; 4988 /* FIXME: when BCM5325 switch is used use 100 MBit/s */ 4989 current_speed = SPEED_1000; 4990 current_duplex = DUPLEX_FULL; 4991 tp->link_config.active_speed = current_speed; 4992 tp->link_config.active_duplex = current_duplex; 4993 } 4994 4995 tg3_readphy(tp, MII_BMSR, &bmsr); 4996 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || 4997 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 4998 current_link_up = true; 4999 } 5000 5001 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 5002 if (current_link_up) { 5003 if (tp->link_config.active_speed == SPEED_100 || 5004 tp->link_config.active_speed == SPEED_10) 5005 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5006 else 5007 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5008 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) 5009 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5010 else 5011 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5012 5013 /* In order for the 5750 core in BCM4785 chip to work properly 5014 * in RGMII mode, the Led Control Register must be set up. 5015 */ 5016 if (tg3_flag(tp, RGMII_MODE)) { 5017 u32 led_ctrl = tr32(MAC_LED_CTRL); 5018 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON); 5019 5020 if (tp->link_config.active_speed == SPEED_10) 5021 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE; 5022 else if (tp->link_config.active_speed == SPEED_100) 5023 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5024 LED_CTRL_100MBPS_ON); 5025 else if (tp->link_config.active_speed == SPEED_1000) 5026 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5027 LED_CTRL_1000MBPS_ON); 5028 5029 tw32(MAC_LED_CTRL, led_ctrl); 5030 udelay(40); 5031 } 5032 5033 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5034 if (tp->link_config.active_duplex == DUPLEX_HALF) 5035 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5036 5037 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 5038 if (current_link_up && 5039 tg3_5700_link_polarity(tp, tp->link_config.active_speed)) 5040 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 5041 else 5042 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 5043 } 5044 5045 /* ??? Without this setting Netgear GA302T PHY does not 5046 * ??? send/receive packets... 5047 */ 5048 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && 5049 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) { 5050 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; 5051 tw32_f(MAC_MI_MODE, tp->mi_mode); 5052 udelay(80); 5053 } 5054 5055 tw32_f(MAC_MODE, tp->mac_mode); 5056 udelay(40); 5057 5058 tg3_phy_eee_adjust(tp, current_link_up); 5059 5060 if (tg3_flag(tp, USE_LINKCHG_REG)) { 5061 /* Polled via timer. */ 5062 tw32_f(MAC_EVENT, 0); 5063 } else { 5064 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5065 } 5066 udelay(40); 5067 5068 if (tg3_asic_rev(tp) == ASIC_REV_5700 && 5069 current_link_up && 5070 tp->link_config.active_speed == SPEED_1000 && 5071 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { 5072 udelay(120); 5073 tw32_f(MAC_STATUS, 5074 (MAC_STATUS_SYNC_CHANGED | 5075 MAC_STATUS_CFG_CHANGED)); 5076 udelay(40); 5077 tg3_write_mem(tp, 5078 NIC_SRAM_FIRMWARE_MBOX, 5079 NIC_SRAM_FIRMWARE_MBOX_MAGIC2); 5080 } 5081 5082 /* Prevent send BD corruption. */ 5083 if (tg3_flag(tp, CLKREQ_BUG)) { 5084 if (tp->link_config.active_speed == SPEED_100 || 5085 tp->link_config.active_speed == SPEED_10) 5086 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL, 5087 PCI_EXP_LNKCTL_CLKREQ_EN); 5088 else 5089 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 5090 PCI_EXP_LNKCTL_CLKREQ_EN); 5091 } 5092 5093 tg3_test_and_report_link_chg(tp, current_link_up); 5094 5095 return 0; 5096 } 5097 5098 struct tg3_fiber_aneginfo { 5099 int state; 5100 #define ANEG_STATE_UNKNOWN 0 5101 #define ANEG_STATE_AN_ENABLE 1 5102 #define ANEG_STATE_RESTART_INIT 2 5103 #define ANEG_STATE_RESTART 3 5104 #define ANEG_STATE_DISABLE_LINK_OK 4 5105 #define ANEG_STATE_ABILITY_DETECT_INIT 5 5106 #define ANEG_STATE_ABILITY_DETECT 6 5107 #define ANEG_STATE_ACK_DETECT_INIT 7 5108 #define ANEG_STATE_ACK_DETECT 8 5109 #define ANEG_STATE_COMPLETE_ACK_INIT 9 5110 #define ANEG_STATE_COMPLETE_ACK 10 5111 #define ANEG_STATE_IDLE_DETECT_INIT 11 5112 #define ANEG_STATE_IDLE_DETECT 12 5113 #define ANEG_STATE_LINK_OK 13 5114 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 5115 #define ANEG_STATE_NEXT_PAGE_WAIT 15 5116 5117 u32 flags; 5118 #define MR_AN_ENABLE 0x00000001 5119 #define MR_RESTART_AN 0x00000002 5120 #define MR_AN_COMPLETE 0x00000004 5121 #define MR_PAGE_RX 0x00000008 5122 #define MR_NP_LOADED 0x00000010 5123 #define MR_TOGGLE_TX 0x00000020 5124 #define MR_LP_ADV_FULL_DUPLEX 0x00000040 5125 #define MR_LP_ADV_HALF_DUPLEX 0x00000080 5126 #define MR_LP_ADV_SYM_PAUSE 0x00000100 5127 #define MR_LP_ADV_ASYM_PAUSE 0x00000200 5128 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400 5129 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800 5130 #define MR_LP_ADV_NEXT_PAGE 0x00001000 5131 #define MR_TOGGLE_RX 0x00002000 5132 #define MR_NP_RX 0x00004000 5133 5134 #define MR_LINK_OK 0x80000000 5135 5136 unsigned long link_time, cur_time; 5137 5138 u32 ability_match_cfg; 5139 int ability_match_count; 5140 5141 char ability_match, idle_match, ack_match; 5142 5143 u32 txconfig, rxconfig; 5144 #define ANEG_CFG_NP 0x00000080 5145 #define ANEG_CFG_ACK 0x00000040 5146 #define ANEG_CFG_RF2 0x00000020 5147 #define ANEG_CFG_RF1 0x00000010 5148 #define ANEG_CFG_PS2 0x00000001 5149 #define ANEG_CFG_PS1 0x00008000 5150 #define ANEG_CFG_HD 0x00004000 5151 #define ANEG_CFG_FD 0x00002000 5152 #define ANEG_CFG_INVAL 0x00001f06 5153 5154 }; 5155 #define ANEG_OK 0 5156 #define ANEG_DONE 1 5157 #define ANEG_TIMER_ENAB 2 5158 #define ANEG_FAILED -1 5159 5160 #define ANEG_STATE_SETTLE_TIME 10000 5161 5162 static int tg3_fiber_aneg_smachine(struct tg3 *tp, 5163 struct tg3_fiber_aneginfo *ap) 5164 { 5165 u16 flowctrl; 5166 unsigned long delta; 5167 u32 rx_cfg_reg; 5168 int ret; 5169 5170 if (ap->state == ANEG_STATE_UNKNOWN) { 5171 ap->rxconfig = 0; 5172 ap->link_time = 0; 5173 ap->cur_time = 0; 5174 ap->ability_match_cfg = 0; 5175 ap->ability_match_count = 0; 5176 ap->ability_match = 0; 5177 ap->idle_match = 0; 5178 ap->ack_match = 0; 5179 } 5180 ap->cur_time++; 5181 5182 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { 5183 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); 5184 5185 if (rx_cfg_reg != ap->ability_match_cfg) { 5186 ap->ability_match_cfg = rx_cfg_reg; 5187 ap->ability_match = 0; 5188 ap->ability_match_count = 0; 5189 } else { 5190 if (++ap->ability_match_count > 1) { 5191 ap->ability_match = 1; 5192 ap->ability_match_cfg = rx_cfg_reg; 5193 } 5194 } 5195 if (rx_cfg_reg & ANEG_CFG_ACK) 5196 ap->ack_match = 1; 5197 else 5198 ap->ack_match = 0; 5199 5200 ap->idle_match = 0; 5201 } else { 5202 ap->idle_match = 1; 5203 ap->ability_match_cfg = 0; 5204 ap->ability_match_count = 0; 5205 ap->ability_match = 0; 5206 ap->ack_match = 0; 5207 5208 rx_cfg_reg = 0; 5209 } 5210 5211 ap->rxconfig = rx_cfg_reg; 5212 ret = ANEG_OK; 5213 5214 switch (ap->state) { 5215 case ANEG_STATE_UNKNOWN: 5216 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) 5217 ap->state = ANEG_STATE_AN_ENABLE; 5218 5219 /* fall through */ 5220 case ANEG_STATE_AN_ENABLE: 5221 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); 5222 if (ap->flags & MR_AN_ENABLE) { 5223 ap->link_time = 0; 5224 ap->cur_time = 0; 5225 ap->ability_match_cfg = 0; 5226 ap->ability_match_count = 0; 5227 ap->ability_match = 0; 5228 ap->idle_match = 0; 5229 ap->ack_match = 0; 5230 5231 ap->state = ANEG_STATE_RESTART_INIT; 5232 } else { 5233 ap->state = ANEG_STATE_DISABLE_LINK_OK; 5234 } 5235 break; 5236 5237 case ANEG_STATE_RESTART_INIT: 5238 ap->link_time = ap->cur_time; 5239 ap->flags &= ~(MR_NP_LOADED); 5240 ap->txconfig = 0; 5241 tw32(MAC_TX_AUTO_NEG, 0); 5242 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5243 tw32_f(MAC_MODE, tp->mac_mode); 5244 udelay(40); 5245 5246 ret = ANEG_TIMER_ENAB; 5247 ap->state = ANEG_STATE_RESTART; 5248 5249 /* fall through */ 5250 case ANEG_STATE_RESTART: 5251 delta = ap->cur_time - ap->link_time; 5252 if (delta > ANEG_STATE_SETTLE_TIME) 5253 ap->state = ANEG_STATE_ABILITY_DETECT_INIT; 5254 else 5255 ret = ANEG_TIMER_ENAB; 5256 break; 5257 5258 case ANEG_STATE_DISABLE_LINK_OK: 5259 ret = ANEG_DONE; 5260 break; 5261 5262 case ANEG_STATE_ABILITY_DETECT_INIT: 5263 ap->flags &= ~(MR_TOGGLE_TX); 5264 ap->txconfig = ANEG_CFG_FD; 5265 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5266 if (flowctrl & ADVERTISE_1000XPAUSE) 5267 ap->txconfig |= ANEG_CFG_PS1; 5268 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5269 ap->txconfig |= ANEG_CFG_PS2; 5270 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5271 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5272 tw32_f(MAC_MODE, tp->mac_mode); 5273 udelay(40); 5274 5275 ap->state = ANEG_STATE_ABILITY_DETECT; 5276 break; 5277 5278 case ANEG_STATE_ABILITY_DETECT: 5279 if (ap->ability_match != 0 && ap->rxconfig != 0) 5280 ap->state = ANEG_STATE_ACK_DETECT_INIT; 5281 break; 5282 5283 case ANEG_STATE_ACK_DETECT_INIT: 5284 ap->txconfig |= ANEG_CFG_ACK; 5285 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5286 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5287 tw32_f(MAC_MODE, tp->mac_mode); 5288 udelay(40); 5289 5290 ap->state = ANEG_STATE_ACK_DETECT; 5291 5292 /* fall through */ 5293 case ANEG_STATE_ACK_DETECT: 5294 if (ap->ack_match != 0) { 5295 if ((ap->rxconfig & ~ANEG_CFG_ACK) == 5296 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { 5297 ap->state = ANEG_STATE_COMPLETE_ACK_INIT; 5298 } else { 5299 ap->state = ANEG_STATE_AN_ENABLE; 5300 } 5301 } else if (ap->ability_match != 0 && 5302 ap->rxconfig == 0) { 5303 ap->state = ANEG_STATE_AN_ENABLE; 5304 } 5305 break; 5306 5307 case ANEG_STATE_COMPLETE_ACK_INIT: 5308 if (ap->rxconfig & ANEG_CFG_INVAL) { 5309 ret = ANEG_FAILED; 5310 break; 5311 } 5312 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | 5313 MR_LP_ADV_HALF_DUPLEX | 5314 MR_LP_ADV_SYM_PAUSE | 5315 MR_LP_ADV_ASYM_PAUSE | 5316 MR_LP_ADV_REMOTE_FAULT1 | 5317 MR_LP_ADV_REMOTE_FAULT2 | 5318 MR_LP_ADV_NEXT_PAGE | 5319 MR_TOGGLE_RX | 5320 MR_NP_RX); 5321 if (ap->rxconfig & ANEG_CFG_FD) 5322 ap->flags |= MR_LP_ADV_FULL_DUPLEX; 5323 if (ap->rxconfig & ANEG_CFG_HD) 5324 ap->flags |= MR_LP_ADV_HALF_DUPLEX; 5325 if (ap->rxconfig & ANEG_CFG_PS1) 5326 ap->flags |= MR_LP_ADV_SYM_PAUSE; 5327 if (ap->rxconfig & ANEG_CFG_PS2) 5328 ap->flags |= MR_LP_ADV_ASYM_PAUSE; 5329 if (ap->rxconfig & ANEG_CFG_RF1) 5330 ap->flags |= MR_LP_ADV_REMOTE_FAULT1; 5331 if (ap->rxconfig & ANEG_CFG_RF2) 5332 ap->flags |= MR_LP_ADV_REMOTE_FAULT2; 5333 if (ap->rxconfig & ANEG_CFG_NP) 5334 ap->flags |= MR_LP_ADV_NEXT_PAGE; 5335 5336 ap->link_time = ap->cur_time; 5337 5338 ap->flags ^= (MR_TOGGLE_TX); 5339 if (ap->rxconfig & 0x0008) 5340 ap->flags |= MR_TOGGLE_RX; 5341 if (ap->rxconfig & ANEG_CFG_NP) 5342 ap->flags |= MR_NP_RX; 5343 ap->flags |= MR_PAGE_RX; 5344 5345 ap->state = ANEG_STATE_COMPLETE_ACK; 5346 ret = ANEG_TIMER_ENAB; 5347 break; 5348 5349 case ANEG_STATE_COMPLETE_ACK: 5350 if (ap->ability_match != 0 && 5351 ap->rxconfig == 0) { 5352 ap->state = ANEG_STATE_AN_ENABLE; 5353 break; 5354 } 5355 delta = ap->cur_time - ap->link_time; 5356 if (delta > ANEG_STATE_SETTLE_TIME) { 5357 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { 5358 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5359 } else { 5360 if ((ap->txconfig & ANEG_CFG_NP) == 0 && 5361 !(ap->flags & MR_NP_RX)) { 5362 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5363 } else { 5364 ret = ANEG_FAILED; 5365 } 5366 } 5367 } 5368 break; 5369 5370 case ANEG_STATE_IDLE_DETECT_INIT: 5371 ap->link_time = ap->cur_time; 5372 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5373 tw32_f(MAC_MODE, tp->mac_mode); 5374 udelay(40); 5375 5376 ap->state = ANEG_STATE_IDLE_DETECT; 5377 ret = ANEG_TIMER_ENAB; 5378 break; 5379 5380 case ANEG_STATE_IDLE_DETECT: 5381 if (ap->ability_match != 0 && 5382 ap->rxconfig == 0) { 5383 ap->state = ANEG_STATE_AN_ENABLE; 5384 break; 5385 } 5386 delta = ap->cur_time - ap->link_time; 5387 if (delta > ANEG_STATE_SETTLE_TIME) { 5388 /* XXX another gem from the Broadcom driver :( */ 5389 ap->state = ANEG_STATE_LINK_OK; 5390 } 5391 break; 5392 5393 case ANEG_STATE_LINK_OK: 5394 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); 5395 ret = ANEG_DONE; 5396 break; 5397 5398 case ANEG_STATE_NEXT_PAGE_WAIT_INIT: 5399 /* ??? unimplemented */ 5400 break; 5401 5402 case ANEG_STATE_NEXT_PAGE_WAIT: 5403 /* ??? unimplemented */ 5404 break; 5405 5406 default: 5407 ret = ANEG_FAILED; 5408 break; 5409 } 5410 5411 return ret; 5412 } 5413 5414 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) 5415 { 5416 int res = 0; 5417 struct tg3_fiber_aneginfo aninfo; 5418 int status = ANEG_FAILED; 5419 unsigned int tick; 5420 u32 tmp; 5421 5422 tw32_f(MAC_TX_AUTO_NEG, 0); 5423 5424 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 5425 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); 5426 udelay(40); 5427 5428 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); 5429 udelay(40); 5430 5431 memset(&aninfo, 0, sizeof(aninfo)); 5432 aninfo.flags |= MR_AN_ENABLE; 5433 aninfo.state = ANEG_STATE_UNKNOWN; 5434 aninfo.cur_time = 0; 5435 tick = 0; 5436 while (++tick < 195000) { 5437 status = tg3_fiber_aneg_smachine(tp, &aninfo); 5438 if (status == ANEG_DONE || status == ANEG_FAILED) 5439 break; 5440 5441 udelay(1); 5442 } 5443 5444 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5445 tw32_f(MAC_MODE, tp->mac_mode); 5446 udelay(40); 5447 5448 *txflags = aninfo.txconfig; 5449 *rxflags = aninfo.flags; 5450 5451 if (status == ANEG_DONE && 5452 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | 5453 MR_LP_ADV_FULL_DUPLEX))) 5454 res = 1; 5455 5456 return res; 5457 } 5458 5459 static void tg3_init_bcm8002(struct tg3 *tp) 5460 { 5461 u32 mac_status = tr32(MAC_STATUS); 5462 int i; 5463 5464 /* Reset when initting first time or we have a link. */ 5465 if (tg3_flag(tp, INIT_COMPLETE) && 5466 !(mac_status & MAC_STATUS_PCS_SYNCED)) 5467 return; 5468 5469 /* Set PLL lock range. */ 5470 tg3_writephy(tp, 0x16, 0x8007); 5471 5472 /* SW reset */ 5473 tg3_writephy(tp, MII_BMCR, BMCR_RESET); 5474 5475 /* Wait for reset to complete. */ 5476 /* XXX schedule_timeout() ... */ 5477 for (i = 0; i < 500; i++) 5478 udelay(10); 5479 5480 /* Config mode; select PMA/Ch 1 regs. */ 5481 tg3_writephy(tp, 0x10, 0x8411); 5482 5483 /* Enable auto-lock and comdet, select txclk for tx. */ 5484 tg3_writephy(tp, 0x11, 0x0a10); 5485 5486 tg3_writephy(tp, 0x18, 0x00a0); 5487 tg3_writephy(tp, 0x16, 0x41ff); 5488 5489 /* Assert and deassert POR. */ 5490 tg3_writephy(tp, 0x13, 0x0400); 5491 udelay(40); 5492 tg3_writephy(tp, 0x13, 0x0000); 5493 5494 tg3_writephy(tp, 0x11, 0x0a50); 5495 udelay(40); 5496 tg3_writephy(tp, 0x11, 0x0a10); 5497 5498 /* Wait for signal to stabilize */ 5499 /* XXX schedule_timeout() ... */ 5500 for (i = 0; i < 15000; i++) 5501 udelay(10); 5502 5503 /* Deselect the channel register so we can read the PHYID 5504 * later. 5505 */ 5506 tg3_writephy(tp, 0x10, 0x8011); 5507 } 5508 5509 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) 5510 { 5511 u16 flowctrl; 5512 bool current_link_up; 5513 u32 sg_dig_ctrl, sg_dig_status; 5514 u32 serdes_cfg, expected_sg_dig_ctrl; 5515 int workaround, port_a; 5516 5517 serdes_cfg = 0; 5518 expected_sg_dig_ctrl = 0; 5519 workaround = 0; 5520 port_a = 1; 5521 current_link_up = false; 5522 5523 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 && 5524 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) { 5525 workaround = 1; 5526 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 5527 port_a = 0; 5528 5529 /* preserve bits 0-11,13,14 for signal pre-emphasis */ 5530 /* preserve bits 20-23 for voltage regulator */ 5531 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; 5532 } 5533 5534 sg_dig_ctrl = tr32(SG_DIG_CTRL); 5535 5536 if (tp->link_config.autoneg != AUTONEG_ENABLE) { 5537 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { 5538 if (workaround) { 5539 u32 val = serdes_cfg; 5540 5541 if (port_a) 5542 val |= 0xc010000; 5543 else 5544 val |= 0x4010000; 5545 tw32_f(MAC_SERDES_CFG, val); 5546 } 5547 5548 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5549 } 5550 if (mac_status & MAC_STATUS_PCS_SYNCED) { 5551 tg3_setup_flow_control(tp, 0, 0); 5552 current_link_up = true; 5553 } 5554 goto out; 5555 } 5556 5557 /* Want auto-negotiation. */ 5558 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; 5559 5560 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5561 if (flowctrl & ADVERTISE_1000XPAUSE) 5562 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; 5563 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5564 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; 5565 5566 if (sg_dig_ctrl != expected_sg_dig_ctrl) { 5567 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && 5568 tp->serdes_counter && 5569 ((mac_status & (MAC_STATUS_PCS_SYNCED | 5570 MAC_STATUS_RCVD_CFG)) == 5571 MAC_STATUS_PCS_SYNCED)) { 5572 tp->serdes_counter--; 5573 current_link_up = true; 5574 goto out; 5575 } 5576 restart_autoneg: 5577 if (workaround) 5578 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); 5579 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); 5580 udelay(5); 5581 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); 5582 5583 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5584 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5585 } else if (mac_status & (MAC_STATUS_PCS_SYNCED | 5586 MAC_STATUS_SIGNAL_DET)) { 5587 sg_dig_status = tr32(SG_DIG_STATUS); 5588 mac_status = tr32(MAC_STATUS); 5589 5590 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && 5591 (mac_status & MAC_STATUS_PCS_SYNCED)) { 5592 u32 local_adv = 0, remote_adv = 0; 5593 5594 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) 5595 local_adv |= ADVERTISE_1000XPAUSE; 5596 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) 5597 local_adv |= ADVERTISE_1000XPSE_ASYM; 5598 5599 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) 5600 remote_adv |= LPA_1000XPAUSE; 5601 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) 5602 remote_adv |= LPA_1000XPAUSE_ASYM; 5603 5604 tp->link_config.rmt_adv = 5605 mii_adv_to_ethtool_adv_x(remote_adv); 5606 5607 tg3_setup_flow_control(tp, local_adv, remote_adv); 5608 current_link_up = true; 5609 tp->serdes_counter = 0; 5610 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5611 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { 5612 if (tp->serdes_counter) 5613 tp->serdes_counter--; 5614 else { 5615 if (workaround) { 5616 u32 val = serdes_cfg; 5617 5618 if (port_a) 5619 val |= 0xc010000; 5620 else 5621 val |= 0x4010000; 5622 5623 tw32_f(MAC_SERDES_CFG, val); 5624 } 5625 5626 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5627 udelay(40); 5628 5629 /* Link parallel detection - link is up */ 5630 /* only if we have PCS_SYNC and not */ 5631 /* receiving config code words */ 5632 mac_status = tr32(MAC_STATUS); 5633 if ((mac_status & MAC_STATUS_PCS_SYNCED) && 5634 !(mac_status & MAC_STATUS_RCVD_CFG)) { 5635 tg3_setup_flow_control(tp, 0, 0); 5636 current_link_up = true; 5637 tp->phy_flags |= 5638 TG3_PHYFLG_PARALLEL_DETECT; 5639 tp->serdes_counter = 5640 SERDES_PARALLEL_DET_TIMEOUT; 5641 } else 5642 goto restart_autoneg; 5643 } 5644 } 5645 } else { 5646 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5647 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5648 } 5649 5650 out: 5651 return current_link_up; 5652 } 5653 5654 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) 5655 { 5656 bool current_link_up = false; 5657 5658 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) 5659 goto out; 5660 5661 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5662 u32 txflags, rxflags; 5663 int i; 5664 5665 if (fiber_autoneg(tp, &txflags, &rxflags)) { 5666 u32 local_adv = 0, remote_adv = 0; 5667 5668 if (txflags & ANEG_CFG_PS1) 5669 local_adv |= ADVERTISE_1000XPAUSE; 5670 if (txflags & ANEG_CFG_PS2) 5671 local_adv |= ADVERTISE_1000XPSE_ASYM; 5672 5673 if (rxflags & MR_LP_ADV_SYM_PAUSE) 5674 remote_adv |= LPA_1000XPAUSE; 5675 if (rxflags & MR_LP_ADV_ASYM_PAUSE) 5676 remote_adv |= LPA_1000XPAUSE_ASYM; 5677 5678 tp->link_config.rmt_adv = 5679 mii_adv_to_ethtool_adv_x(remote_adv); 5680 5681 tg3_setup_flow_control(tp, local_adv, remote_adv); 5682 5683 current_link_up = true; 5684 } 5685 for (i = 0; i < 30; i++) { 5686 udelay(20); 5687 tw32_f(MAC_STATUS, 5688 (MAC_STATUS_SYNC_CHANGED | 5689 MAC_STATUS_CFG_CHANGED)); 5690 udelay(40); 5691 if ((tr32(MAC_STATUS) & 5692 (MAC_STATUS_SYNC_CHANGED | 5693 MAC_STATUS_CFG_CHANGED)) == 0) 5694 break; 5695 } 5696 5697 mac_status = tr32(MAC_STATUS); 5698 if (!current_link_up && 5699 (mac_status & MAC_STATUS_PCS_SYNCED) && 5700 !(mac_status & MAC_STATUS_RCVD_CFG)) 5701 current_link_up = true; 5702 } else { 5703 tg3_setup_flow_control(tp, 0, 0); 5704 5705 /* Forcing 1000FD link up. */ 5706 current_link_up = true; 5707 5708 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); 5709 udelay(40); 5710 5711 tw32_f(MAC_MODE, tp->mac_mode); 5712 udelay(40); 5713 } 5714 5715 out: 5716 return current_link_up; 5717 } 5718 5719 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) 5720 { 5721 u32 orig_pause_cfg; 5722 u16 orig_active_speed; 5723 u8 orig_active_duplex; 5724 u32 mac_status; 5725 bool current_link_up; 5726 int i; 5727 5728 orig_pause_cfg = tp->link_config.active_flowctrl; 5729 orig_active_speed = tp->link_config.active_speed; 5730 orig_active_duplex = tp->link_config.active_duplex; 5731 5732 if (!tg3_flag(tp, HW_AUTONEG) && 5733 tp->link_up && 5734 tg3_flag(tp, INIT_COMPLETE)) { 5735 mac_status = tr32(MAC_STATUS); 5736 mac_status &= (MAC_STATUS_PCS_SYNCED | 5737 MAC_STATUS_SIGNAL_DET | 5738 MAC_STATUS_CFG_CHANGED | 5739 MAC_STATUS_RCVD_CFG); 5740 if (mac_status == (MAC_STATUS_PCS_SYNCED | 5741 MAC_STATUS_SIGNAL_DET)) { 5742 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5743 MAC_STATUS_CFG_CHANGED)); 5744 return 0; 5745 } 5746 } 5747 5748 tw32_f(MAC_TX_AUTO_NEG, 0); 5749 5750 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 5751 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; 5752 tw32_f(MAC_MODE, tp->mac_mode); 5753 udelay(40); 5754 5755 if (tp->phy_id == TG3_PHY_ID_BCM8002) 5756 tg3_init_bcm8002(tp); 5757 5758 /* Enable link change event even when serdes polling. */ 5759 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5760 udelay(40); 5761 5762 current_link_up = false; 5763 tp->link_config.rmt_adv = 0; 5764 mac_status = tr32(MAC_STATUS); 5765 5766 if (tg3_flag(tp, HW_AUTONEG)) 5767 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); 5768 else 5769 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); 5770 5771 tp->napi[0].hw_status->status = 5772 (SD_STATUS_UPDATED | 5773 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); 5774 5775 for (i = 0; i < 100; i++) { 5776 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5777 MAC_STATUS_CFG_CHANGED)); 5778 udelay(5); 5779 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | 5780 MAC_STATUS_CFG_CHANGED | 5781 MAC_STATUS_LNKSTATE_CHANGED)) == 0) 5782 break; 5783 } 5784 5785 mac_status = tr32(MAC_STATUS); 5786 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { 5787 current_link_up = false; 5788 if (tp->link_config.autoneg == AUTONEG_ENABLE && 5789 tp->serdes_counter == 0) { 5790 tw32_f(MAC_MODE, (tp->mac_mode | 5791 MAC_MODE_SEND_CONFIGS)); 5792 udelay(1); 5793 tw32_f(MAC_MODE, tp->mac_mode); 5794 } 5795 } 5796 5797 if (current_link_up) { 5798 tp->link_config.active_speed = SPEED_1000; 5799 tp->link_config.active_duplex = DUPLEX_FULL; 5800 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5801 LED_CTRL_LNKLED_OVERRIDE | 5802 LED_CTRL_1000MBPS_ON)); 5803 } else { 5804 tp->link_config.active_speed = SPEED_UNKNOWN; 5805 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 5806 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5807 LED_CTRL_LNKLED_OVERRIDE | 5808 LED_CTRL_TRAFFIC_OVERRIDE)); 5809 } 5810 5811 if (!tg3_test_and_report_link_chg(tp, current_link_up)) { 5812 u32 now_pause_cfg = tp->link_config.active_flowctrl; 5813 if (orig_pause_cfg != now_pause_cfg || 5814 orig_active_speed != tp->link_config.active_speed || 5815 orig_active_duplex != tp->link_config.active_duplex) 5816 tg3_link_report(tp); 5817 } 5818 5819 return 0; 5820 } 5821 5822 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) 5823 { 5824 int err = 0; 5825 u32 bmsr, bmcr; 5826 u16 current_speed = SPEED_UNKNOWN; 5827 u8 current_duplex = DUPLEX_UNKNOWN; 5828 bool current_link_up = false; 5829 u32 local_adv, remote_adv, sgsr; 5830 5831 if ((tg3_asic_rev(tp) == ASIC_REV_5719 || 5832 tg3_asic_rev(tp) == ASIC_REV_5720) && 5833 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) && 5834 (sgsr & SERDES_TG3_SGMII_MODE)) { 5835 5836 if (force_reset) 5837 tg3_phy_reset(tp); 5838 5839 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 5840 5841 if (!(sgsr & SERDES_TG3_LINK_UP)) { 5842 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5843 } else { 5844 current_link_up = true; 5845 if (sgsr & SERDES_TG3_SPEED_1000) { 5846 current_speed = SPEED_1000; 5847 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5848 } else if (sgsr & SERDES_TG3_SPEED_100) { 5849 current_speed = SPEED_100; 5850 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5851 } else { 5852 current_speed = SPEED_10; 5853 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5854 } 5855 5856 if (sgsr & SERDES_TG3_FULL_DUPLEX) 5857 current_duplex = DUPLEX_FULL; 5858 else 5859 current_duplex = DUPLEX_HALF; 5860 } 5861 5862 tw32_f(MAC_MODE, tp->mac_mode); 5863 udelay(40); 5864 5865 tg3_clear_mac_status(tp); 5866 5867 goto fiber_setup_done; 5868 } 5869 5870 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5871 tw32_f(MAC_MODE, tp->mac_mode); 5872 udelay(40); 5873 5874 tg3_clear_mac_status(tp); 5875 5876 if (force_reset) 5877 tg3_phy_reset(tp); 5878 5879 tp->link_config.rmt_adv = 0; 5880 5881 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5882 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5883 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5884 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5885 bmsr |= BMSR_LSTATUS; 5886 else 5887 bmsr &= ~BMSR_LSTATUS; 5888 } 5889 5890 err |= tg3_readphy(tp, MII_BMCR, &bmcr); 5891 5892 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && 5893 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 5894 /* do nothing, just check for link up at the end */ 5895 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5896 u32 adv, newadv; 5897 5898 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5899 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | 5900 ADVERTISE_1000XPAUSE | 5901 ADVERTISE_1000XPSE_ASYM | 5902 ADVERTISE_SLCT); 5903 5904 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5905 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); 5906 5907 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { 5908 tg3_writephy(tp, MII_ADVERTISE, newadv); 5909 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; 5910 tg3_writephy(tp, MII_BMCR, bmcr); 5911 5912 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5913 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; 5914 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5915 5916 return err; 5917 } 5918 } else { 5919 u32 new_bmcr; 5920 5921 bmcr &= ~BMCR_SPEED1000; 5922 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); 5923 5924 if (tp->link_config.duplex == DUPLEX_FULL) 5925 new_bmcr |= BMCR_FULLDPLX; 5926 5927 if (new_bmcr != bmcr) { 5928 /* BMCR_SPEED1000 is a reserved bit that needs 5929 * to be set on write. 5930 */ 5931 new_bmcr |= BMCR_SPEED1000; 5932 5933 /* Force a linkdown */ 5934 if (tp->link_up) { 5935 u32 adv; 5936 5937 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5938 adv &= ~(ADVERTISE_1000XFULL | 5939 ADVERTISE_1000XHALF | 5940 ADVERTISE_SLCT); 5941 tg3_writephy(tp, MII_ADVERTISE, adv); 5942 tg3_writephy(tp, MII_BMCR, bmcr | 5943 BMCR_ANRESTART | 5944 BMCR_ANENABLE); 5945 udelay(10); 5946 tg3_carrier_off(tp); 5947 } 5948 tg3_writephy(tp, MII_BMCR, new_bmcr); 5949 bmcr = new_bmcr; 5950 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5951 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5952 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5953 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5954 bmsr |= BMSR_LSTATUS; 5955 else 5956 bmsr &= ~BMSR_LSTATUS; 5957 } 5958 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5959 } 5960 } 5961 5962 if (bmsr & BMSR_LSTATUS) { 5963 current_speed = SPEED_1000; 5964 current_link_up = true; 5965 if (bmcr & BMCR_FULLDPLX) 5966 current_duplex = DUPLEX_FULL; 5967 else 5968 current_duplex = DUPLEX_HALF; 5969 5970 local_adv = 0; 5971 remote_adv = 0; 5972 5973 if (bmcr & BMCR_ANENABLE) { 5974 u32 common; 5975 5976 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); 5977 err |= tg3_readphy(tp, MII_LPA, &remote_adv); 5978 common = local_adv & remote_adv; 5979 if (common & (ADVERTISE_1000XHALF | 5980 ADVERTISE_1000XFULL)) { 5981 if (common & ADVERTISE_1000XFULL) 5982 current_duplex = DUPLEX_FULL; 5983 else 5984 current_duplex = DUPLEX_HALF; 5985 5986 tp->link_config.rmt_adv = 5987 mii_adv_to_ethtool_adv_x(remote_adv); 5988 } else if (!tg3_flag(tp, 5780_CLASS)) { 5989 /* Link is up via parallel detect */ 5990 } else { 5991 current_link_up = false; 5992 } 5993 } 5994 } 5995 5996 fiber_setup_done: 5997 if (current_link_up && current_duplex == DUPLEX_FULL) 5998 tg3_setup_flow_control(tp, local_adv, remote_adv); 5999 6000 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 6001 if (tp->link_config.active_duplex == DUPLEX_HALF) 6002 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 6003 6004 tw32_f(MAC_MODE, tp->mac_mode); 6005 udelay(40); 6006 6007 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 6008 6009 tp->link_config.active_speed = current_speed; 6010 tp->link_config.active_duplex = current_duplex; 6011 6012 tg3_test_and_report_link_chg(tp, current_link_up); 6013 return err; 6014 } 6015 6016 static void tg3_serdes_parallel_detect(struct tg3 *tp) 6017 { 6018 if (tp->serdes_counter) { 6019 /* Give autoneg time to complete. */ 6020 tp->serdes_counter--; 6021 return; 6022 } 6023 6024 if (!tp->link_up && 6025 (tp->link_config.autoneg == AUTONEG_ENABLE)) { 6026 u32 bmcr; 6027 6028 tg3_readphy(tp, MII_BMCR, &bmcr); 6029 if (bmcr & BMCR_ANENABLE) { 6030 u32 phy1, phy2; 6031 6032 /* Select shadow register 0x1f */ 6033 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); 6034 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); 6035 6036 /* Select expansion interrupt status register */ 6037 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6038 MII_TG3_DSP_EXP1_INT_STAT); 6039 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6040 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6041 6042 if ((phy1 & 0x10) && !(phy2 & 0x20)) { 6043 /* We have signal detect and not receiving 6044 * config code words, link is up by parallel 6045 * detection. 6046 */ 6047 6048 bmcr &= ~BMCR_ANENABLE; 6049 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; 6050 tg3_writephy(tp, MII_BMCR, bmcr); 6051 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; 6052 } 6053 } 6054 } else if (tp->link_up && 6055 (tp->link_config.autoneg == AUTONEG_ENABLE) && 6056 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 6057 u32 phy2; 6058 6059 /* Select expansion interrupt status register */ 6060 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6061 MII_TG3_DSP_EXP1_INT_STAT); 6062 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6063 if (phy2 & 0x20) { 6064 u32 bmcr; 6065 6066 /* Config code words received, turn on autoneg. */ 6067 tg3_readphy(tp, MII_BMCR, &bmcr); 6068 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); 6069 6070 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 6071 6072 } 6073 } 6074 } 6075 6076 static int tg3_setup_phy(struct tg3 *tp, bool force_reset) 6077 { 6078 u32 val; 6079 int err; 6080 6081 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 6082 err = tg3_setup_fiber_phy(tp, force_reset); 6083 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 6084 err = tg3_setup_fiber_mii_phy(tp, force_reset); 6085 else 6086 err = tg3_setup_copper_phy(tp, force_reset); 6087 6088 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 6089 u32 scale; 6090 6091 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; 6092 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) 6093 scale = 65; 6094 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) 6095 scale = 6; 6096 else 6097 scale = 12; 6098 6099 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; 6100 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); 6101 tw32(GRC_MISC_CFG, val); 6102 } 6103 6104 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 6105 (6 << TX_LENGTHS_IPG_SHIFT); 6106 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 6107 tg3_asic_rev(tp) == ASIC_REV_5762) 6108 val |= tr32(MAC_TX_LENGTHS) & 6109 (TX_LENGTHS_JMB_FRM_LEN_MSK | 6110 TX_LENGTHS_CNT_DWN_VAL_MSK); 6111 6112 if (tp->link_config.active_speed == SPEED_1000 && 6113 tp->link_config.active_duplex == DUPLEX_HALF) 6114 tw32(MAC_TX_LENGTHS, val | 6115 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); 6116 else 6117 tw32(MAC_TX_LENGTHS, val | 6118 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); 6119 6120 if (!tg3_flag(tp, 5705_PLUS)) { 6121 if (tp->link_up) { 6122 tw32(HOSTCC_STAT_COAL_TICKS, 6123 tp->coal.stats_block_coalesce_usecs); 6124 } else { 6125 tw32(HOSTCC_STAT_COAL_TICKS, 0); 6126 } 6127 } 6128 6129 if (tg3_flag(tp, ASPM_WORKAROUND)) { 6130 val = tr32(PCIE_PWR_MGMT_THRESH); 6131 if (!tp->link_up) 6132 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | 6133 tp->pwrmgmt_thresh; 6134 else 6135 val |= PCIE_PWR_MGMT_L1_THRESH_MSK; 6136 tw32(PCIE_PWR_MGMT_THRESH, val); 6137 } 6138 6139 return err; 6140 } 6141 6142 /* tp->lock must be held */ 6143 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts) 6144 { 6145 u64 stamp; 6146 6147 ptp_read_system_prets(sts); 6148 stamp = tr32(TG3_EAV_REF_CLCK_LSB); 6149 ptp_read_system_postts(sts); 6150 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32; 6151 6152 return stamp; 6153 } 6154 6155 /* tp->lock must be held */ 6156 static void tg3_refclk_write(struct tg3 *tp, u64 newval) 6157 { 6158 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6159 6160 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP); 6161 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff); 6162 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32); 6163 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME); 6164 } 6165 6166 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); 6167 static inline void tg3_full_unlock(struct tg3 *tp); 6168 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) 6169 { 6170 struct tg3 *tp = netdev_priv(dev); 6171 6172 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 6173 SOF_TIMESTAMPING_RX_SOFTWARE | 6174 SOF_TIMESTAMPING_SOFTWARE; 6175 6176 if (tg3_flag(tp, PTP_CAPABLE)) { 6177 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 6178 SOF_TIMESTAMPING_RX_HARDWARE | 6179 SOF_TIMESTAMPING_RAW_HARDWARE; 6180 } 6181 6182 if (tp->ptp_clock) 6183 info->phc_index = ptp_clock_index(tp->ptp_clock); 6184 else 6185 info->phc_index = -1; 6186 6187 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 6188 6189 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 6190 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 6191 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 6192 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 6193 return 0; 6194 } 6195 6196 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 6197 { 6198 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6199 bool neg_adj = false; 6200 u32 correction = 0; 6201 6202 if (ppb < 0) { 6203 neg_adj = true; 6204 ppb = -ppb; 6205 } 6206 6207 /* Frequency adjustment is performed using hardware with a 24 bit 6208 * accumulator and a programmable correction value. On each clk, the 6209 * correction value gets added to the accumulator and when it 6210 * overflows, the time counter is incremented/decremented. 6211 * 6212 * So conversion from ppb to correction value is 6213 * ppb * (1 << 24) / 1000000000 6214 */ 6215 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) & 6216 TG3_EAV_REF_CLK_CORRECT_MASK; 6217 6218 tg3_full_lock(tp, 0); 6219 6220 if (correction) 6221 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 6222 TG3_EAV_REF_CLK_CORRECT_EN | 6223 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction); 6224 else 6225 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0); 6226 6227 tg3_full_unlock(tp); 6228 6229 return 0; 6230 } 6231 6232 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 6233 { 6234 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6235 6236 tg3_full_lock(tp, 0); 6237 tp->ptp_adjust += delta; 6238 tg3_full_unlock(tp); 6239 6240 return 0; 6241 } 6242 6243 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, 6244 struct ptp_system_timestamp *sts) 6245 { 6246 u64 ns; 6247 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6248 6249 tg3_full_lock(tp, 0); 6250 ns = tg3_refclk_read(tp, sts); 6251 ns += tp->ptp_adjust; 6252 tg3_full_unlock(tp); 6253 6254 *ts = ns_to_timespec64(ns); 6255 6256 return 0; 6257 } 6258 6259 static int tg3_ptp_settime(struct ptp_clock_info *ptp, 6260 const struct timespec64 *ts) 6261 { 6262 u64 ns; 6263 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6264 6265 ns = timespec64_to_ns(ts); 6266 6267 tg3_full_lock(tp, 0); 6268 tg3_refclk_write(tp, ns); 6269 tp->ptp_adjust = 0; 6270 tg3_full_unlock(tp); 6271 6272 return 0; 6273 } 6274 6275 static int tg3_ptp_enable(struct ptp_clock_info *ptp, 6276 struct ptp_clock_request *rq, int on) 6277 { 6278 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6279 u32 clock_ctl; 6280 int rval = 0; 6281 6282 switch (rq->type) { 6283 case PTP_CLK_REQ_PEROUT: 6284 if (rq->perout.index != 0) 6285 return -EINVAL; 6286 6287 tg3_full_lock(tp, 0); 6288 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6289 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK; 6290 6291 if (on) { 6292 u64 nsec; 6293 6294 nsec = rq->perout.start.sec * 1000000000ULL + 6295 rq->perout.start.nsec; 6296 6297 if (rq->perout.period.sec || rq->perout.period.nsec) { 6298 netdev_warn(tp->dev, 6299 "Device supports only a one-shot timesync output, period must be 0\n"); 6300 rval = -EINVAL; 6301 goto err_out; 6302 } 6303 6304 if (nsec & (1ULL << 63)) { 6305 netdev_warn(tp->dev, 6306 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n"); 6307 rval = -EINVAL; 6308 goto err_out; 6309 } 6310 6311 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff)); 6312 tw32(TG3_EAV_WATCHDOG0_MSB, 6313 TG3_EAV_WATCHDOG0_EN | 6314 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK)); 6315 6316 tw32(TG3_EAV_REF_CLCK_CTL, 6317 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0); 6318 } else { 6319 tw32(TG3_EAV_WATCHDOG0_MSB, 0); 6320 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl); 6321 } 6322 6323 err_out: 6324 tg3_full_unlock(tp); 6325 return rval; 6326 6327 default: 6328 break; 6329 } 6330 6331 return -EOPNOTSUPP; 6332 } 6333 6334 static const struct ptp_clock_info tg3_ptp_caps = { 6335 .owner = THIS_MODULE, 6336 .name = "tg3 clock", 6337 .max_adj = 250000000, 6338 .n_alarm = 0, 6339 .n_ext_ts = 0, 6340 .n_per_out = 1, 6341 .n_pins = 0, 6342 .pps = 0, 6343 .adjfreq = tg3_ptp_adjfreq, 6344 .adjtime = tg3_ptp_adjtime, 6345 .gettimex64 = tg3_ptp_gettimex, 6346 .settime64 = tg3_ptp_settime, 6347 .enable = tg3_ptp_enable, 6348 }; 6349 6350 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, 6351 struct skb_shared_hwtstamps *timestamp) 6352 { 6353 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); 6354 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + 6355 tp->ptp_adjust); 6356 } 6357 6358 /* tp->lock must be held */ 6359 static void tg3_ptp_init(struct tg3 *tp) 6360 { 6361 if (!tg3_flag(tp, PTP_CAPABLE)) 6362 return; 6363 6364 /* Initialize the hardware clock to the system time. */ 6365 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real())); 6366 tp->ptp_adjust = 0; 6367 tp->ptp_info = tg3_ptp_caps; 6368 } 6369 6370 /* tp->lock must be held */ 6371 static void tg3_ptp_resume(struct tg3 *tp) 6372 { 6373 if (!tg3_flag(tp, PTP_CAPABLE)) 6374 return; 6375 6376 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust); 6377 tp->ptp_adjust = 0; 6378 } 6379 6380 static void tg3_ptp_fini(struct tg3 *tp) 6381 { 6382 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock) 6383 return; 6384 6385 ptp_clock_unregister(tp->ptp_clock); 6386 tp->ptp_clock = NULL; 6387 tp->ptp_adjust = 0; 6388 } 6389 6390 static inline int tg3_irq_sync(struct tg3 *tp) 6391 { 6392 return tp->irq_sync; 6393 } 6394 6395 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) 6396 { 6397 int i; 6398 6399 dst = (u32 *)((u8 *)dst + off); 6400 for (i = 0; i < len; i += sizeof(u32)) 6401 *dst++ = tr32(off + i); 6402 } 6403 6404 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) 6405 { 6406 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); 6407 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); 6408 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); 6409 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); 6410 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); 6411 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); 6412 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); 6413 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); 6414 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); 6415 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); 6416 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); 6417 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); 6418 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); 6419 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); 6420 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); 6421 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); 6422 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); 6423 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); 6424 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); 6425 6426 if (tg3_flag(tp, SUPPORT_MSIX)) 6427 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); 6428 6429 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); 6430 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); 6431 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); 6432 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); 6433 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); 6434 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); 6435 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); 6436 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); 6437 6438 if (!tg3_flag(tp, 5705_PLUS)) { 6439 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); 6440 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); 6441 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); 6442 } 6443 6444 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); 6445 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); 6446 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); 6447 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); 6448 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); 6449 6450 if (tg3_flag(tp, NVRAM)) 6451 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); 6452 } 6453 6454 static void tg3_dump_state(struct tg3 *tp) 6455 { 6456 int i; 6457 u32 *regs; 6458 6459 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); 6460 if (!regs) 6461 return; 6462 6463 if (tg3_flag(tp, PCI_EXPRESS)) { 6464 /* Read up to but not including private PCI registers */ 6465 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) 6466 regs[i / sizeof(u32)] = tr32(i); 6467 } else 6468 tg3_dump_legacy_regs(tp, regs); 6469 6470 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { 6471 if (!regs[i + 0] && !regs[i + 1] && 6472 !regs[i + 2] && !regs[i + 3]) 6473 continue; 6474 6475 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", 6476 i * 4, 6477 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); 6478 } 6479 6480 kfree(regs); 6481 6482 for (i = 0; i < tp->irq_cnt; i++) { 6483 struct tg3_napi *tnapi = &tp->napi[i]; 6484 6485 /* SW status block */ 6486 netdev_err(tp->dev, 6487 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", 6488 i, 6489 tnapi->hw_status->status, 6490 tnapi->hw_status->status_tag, 6491 tnapi->hw_status->rx_jumbo_consumer, 6492 tnapi->hw_status->rx_consumer, 6493 tnapi->hw_status->rx_mini_consumer, 6494 tnapi->hw_status->idx[0].rx_producer, 6495 tnapi->hw_status->idx[0].tx_consumer); 6496 6497 netdev_err(tp->dev, 6498 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", 6499 i, 6500 tnapi->last_tag, tnapi->last_irq_tag, 6501 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, 6502 tnapi->rx_rcb_ptr, 6503 tnapi->prodring.rx_std_prod_idx, 6504 tnapi->prodring.rx_std_cons_idx, 6505 tnapi->prodring.rx_jmb_prod_idx, 6506 tnapi->prodring.rx_jmb_cons_idx); 6507 } 6508 } 6509 6510 /* This is called whenever we suspect that the system chipset is re- 6511 * ordering the sequence of MMIO to the tx send mailbox. The symptom 6512 * is bogus tx completions. We try to recover by setting the 6513 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later 6514 * in the workqueue. 6515 */ 6516 static void tg3_tx_recover(struct tg3 *tp) 6517 { 6518 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || 6519 tp->write32_tx_mbox == tg3_write_indirect_mbox); 6520 6521 netdev_warn(tp->dev, 6522 "The system may be re-ordering memory-mapped I/O " 6523 "cycles to the network device, attempting to recover. " 6524 "Please report the problem to the driver maintainer " 6525 "and include system chipset information.\n"); 6526 6527 tg3_flag_set(tp, TX_RECOVERY_PENDING); 6528 } 6529 6530 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) 6531 { 6532 /* Tell compiler to fetch tx indices from memory. */ 6533 barrier(); 6534 return tnapi->tx_pending - 6535 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); 6536 } 6537 6538 /* Tigon3 never reports partial packet sends. So we do not 6539 * need special logic to handle SKBs that have not had all 6540 * of their frags sent yet, like SunGEM does. 6541 */ 6542 static void tg3_tx(struct tg3_napi *tnapi) 6543 { 6544 struct tg3 *tp = tnapi->tp; 6545 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; 6546 u32 sw_idx = tnapi->tx_cons; 6547 struct netdev_queue *txq; 6548 int index = tnapi - tp->napi; 6549 unsigned int pkts_compl = 0, bytes_compl = 0; 6550 6551 if (tg3_flag(tp, ENABLE_TSS)) 6552 index--; 6553 6554 txq = netdev_get_tx_queue(tp->dev, index); 6555 6556 while (sw_idx != hw_idx) { 6557 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; 6558 struct sk_buff *skb = ri->skb; 6559 int i, tx_bug = 0; 6560 6561 if (unlikely(skb == NULL)) { 6562 tg3_tx_recover(tp); 6563 return; 6564 } 6565 6566 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) { 6567 struct skb_shared_hwtstamps timestamp; 6568 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB); 6569 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; 6570 6571 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); 6572 6573 skb_tstamp_tx(skb, ×tamp); 6574 } 6575 6576 pci_unmap_single(tp->pdev, 6577 dma_unmap_addr(ri, mapping), 6578 skb_headlen(skb), 6579 PCI_DMA_TODEVICE); 6580 6581 ri->skb = NULL; 6582 6583 while (ri->fragmented) { 6584 ri->fragmented = false; 6585 sw_idx = NEXT_TX(sw_idx); 6586 ri = &tnapi->tx_buffers[sw_idx]; 6587 } 6588 6589 sw_idx = NEXT_TX(sw_idx); 6590 6591 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 6592 ri = &tnapi->tx_buffers[sw_idx]; 6593 if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) 6594 tx_bug = 1; 6595 6596 pci_unmap_page(tp->pdev, 6597 dma_unmap_addr(ri, mapping), 6598 skb_frag_size(&skb_shinfo(skb)->frags[i]), 6599 PCI_DMA_TODEVICE); 6600 6601 while (ri->fragmented) { 6602 ri->fragmented = false; 6603 sw_idx = NEXT_TX(sw_idx); 6604 ri = &tnapi->tx_buffers[sw_idx]; 6605 } 6606 6607 sw_idx = NEXT_TX(sw_idx); 6608 } 6609 6610 pkts_compl++; 6611 bytes_compl += skb->len; 6612 6613 dev_consume_skb_any(skb); 6614 6615 if (unlikely(tx_bug)) { 6616 tg3_tx_recover(tp); 6617 return; 6618 } 6619 } 6620 6621 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); 6622 6623 tnapi->tx_cons = sw_idx; 6624 6625 /* Need to make the tx_cons update visible to tg3_start_xmit() 6626 * before checking for netif_queue_stopped(). Without the 6627 * memory barrier, there is a small possibility that tg3_start_xmit() 6628 * will miss it and cause the queue to be stopped forever. 6629 */ 6630 smp_mb(); 6631 6632 if (unlikely(netif_tx_queue_stopped(txq) && 6633 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { 6634 __netif_tx_lock(txq, smp_processor_id()); 6635 if (netif_tx_queue_stopped(txq) && 6636 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) 6637 netif_tx_wake_queue(txq); 6638 __netif_tx_unlock(txq); 6639 } 6640 } 6641 6642 static void tg3_frag_free(bool is_frag, void *data) 6643 { 6644 if (is_frag) 6645 skb_free_frag(data); 6646 else 6647 kfree(data); 6648 } 6649 6650 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) 6651 { 6652 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) + 6653 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6654 6655 if (!ri->data) 6656 return; 6657 6658 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), 6659 map_sz, PCI_DMA_FROMDEVICE); 6660 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data); 6661 ri->data = NULL; 6662 } 6663 6664 6665 /* Returns size of skb allocated or < 0 on error. 6666 * 6667 * We only need to fill in the address because the other members 6668 * of the RX descriptor are invariant, see tg3_init_rings. 6669 * 6670 * Note the purposeful assymetry of cpu vs. chip accesses. For 6671 * posting buffers we only dirty the first cache line of the RX 6672 * descriptor (containing the address). Whereas for the RX status 6673 * buffers the cpu only reads the last cacheline of the RX descriptor 6674 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 6675 */ 6676 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, 6677 u32 opaque_key, u32 dest_idx_unmasked, 6678 unsigned int *frag_size) 6679 { 6680 struct tg3_rx_buffer_desc *desc; 6681 struct ring_info *map; 6682 u8 *data; 6683 dma_addr_t mapping; 6684 int skb_size, data_size, dest_idx; 6685 6686 switch (opaque_key) { 6687 case RXD_OPAQUE_RING_STD: 6688 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6689 desc = &tpr->rx_std[dest_idx]; 6690 map = &tpr->rx_std_buffers[dest_idx]; 6691 data_size = tp->rx_pkt_map_sz; 6692 break; 6693 6694 case RXD_OPAQUE_RING_JUMBO: 6695 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6696 desc = &tpr->rx_jmb[dest_idx].std; 6697 map = &tpr->rx_jmb_buffers[dest_idx]; 6698 data_size = TG3_RX_JMB_MAP_SZ; 6699 break; 6700 6701 default: 6702 return -EINVAL; 6703 } 6704 6705 /* Do not overwrite any of the map or rp information 6706 * until we are sure we can commit to a new buffer. 6707 * 6708 * Callers depend upon this behavior and assume that 6709 * we leave everything unchanged if we fail. 6710 */ 6711 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + 6712 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6713 if (skb_size <= PAGE_SIZE) { 6714 data = netdev_alloc_frag(skb_size); 6715 *frag_size = skb_size; 6716 } else { 6717 data = kmalloc(skb_size, GFP_ATOMIC); 6718 *frag_size = 0; 6719 } 6720 if (!data) 6721 return -ENOMEM; 6722 6723 mapping = pci_map_single(tp->pdev, 6724 data + TG3_RX_OFFSET(tp), 6725 data_size, 6726 PCI_DMA_FROMDEVICE); 6727 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) { 6728 tg3_frag_free(skb_size <= PAGE_SIZE, data); 6729 return -EIO; 6730 } 6731 6732 map->data = data; 6733 dma_unmap_addr_set(map, mapping, mapping); 6734 6735 desc->addr_hi = ((u64)mapping >> 32); 6736 desc->addr_lo = ((u64)mapping & 0xffffffff); 6737 6738 return data_size; 6739 } 6740 6741 /* We only need to move over in the address because the other 6742 * members of the RX descriptor are invariant. See notes above 6743 * tg3_alloc_rx_data for full details. 6744 */ 6745 static void tg3_recycle_rx(struct tg3_napi *tnapi, 6746 struct tg3_rx_prodring_set *dpr, 6747 u32 opaque_key, int src_idx, 6748 u32 dest_idx_unmasked) 6749 { 6750 struct tg3 *tp = tnapi->tp; 6751 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 6752 struct ring_info *src_map, *dest_map; 6753 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; 6754 int dest_idx; 6755 6756 switch (opaque_key) { 6757 case RXD_OPAQUE_RING_STD: 6758 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6759 dest_desc = &dpr->rx_std[dest_idx]; 6760 dest_map = &dpr->rx_std_buffers[dest_idx]; 6761 src_desc = &spr->rx_std[src_idx]; 6762 src_map = &spr->rx_std_buffers[src_idx]; 6763 break; 6764 6765 case RXD_OPAQUE_RING_JUMBO: 6766 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6767 dest_desc = &dpr->rx_jmb[dest_idx].std; 6768 dest_map = &dpr->rx_jmb_buffers[dest_idx]; 6769 src_desc = &spr->rx_jmb[src_idx].std; 6770 src_map = &spr->rx_jmb_buffers[src_idx]; 6771 break; 6772 6773 default: 6774 return; 6775 } 6776 6777 dest_map->data = src_map->data; 6778 dma_unmap_addr_set(dest_map, mapping, 6779 dma_unmap_addr(src_map, mapping)); 6780 dest_desc->addr_hi = src_desc->addr_hi; 6781 dest_desc->addr_lo = src_desc->addr_lo; 6782 6783 /* Ensure that the update to the skb happens after the physical 6784 * addresses have been transferred to the new BD location. 6785 */ 6786 smp_wmb(); 6787 6788 src_map->data = NULL; 6789 } 6790 6791 /* The RX ring scheme is composed of multiple rings which post fresh 6792 * buffers to the chip, and one special ring the chip uses to report 6793 * status back to the host. 6794 * 6795 * The special ring reports the status of received packets to the 6796 * host. The chip does not write into the original descriptor the 6797 * RX buffer was obtained from. The chip simply takes the original 6798 * descriptor as provided by the host, updates the status and length 6799 * field, then writes this into the next status ring entry. 6800 * 6801 * Each ring the host uses to post buffers to the chip is described 6802 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, 6803 * it is first placed into the on-chip ram. When the packet's length 6804 * is known, it walks down the TG3_BDINFO entries to select the ring. 6805 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO 6806 * which is within the range of the new packet's length is chosen. 6807 * 6808 * The "separate ring for rx status" scheme may sound queer, but it makes 6809 * sense from a cache coherency perspective. If only the host writes 6810 * to the buffer post rings, and only the chip writes to the rx status 6811 * rings, then cache lines never move beyond shared-modified state. 6812 * If both the host and chip were to write into the same ring, cache line 6813 * eviction could occur since both entities want it in an exclusive state. 6814 */ 6815 static int tg3_rx(struct tg3_napi *tnapi, int budget) 6816 { 6817 struct tg3 *tp = tnapi->tp; 6818 u32 work_mask, rx_std_posted = 0; 6819 u32 std_prod_idx, jmb_prod_idx; 6820 u32 sw_idx = tnapi->rx_rcb_ptr; 6821 u16 hw_idx; 6822 int received; 6823 struct tg3_rx_prodring_set *tpr = &tnapi->prodring; 6824 6825 hw_idx = *(tnapi->rx_rcb_prod_idx); 6826 /* 6827 * We need to order the read of hw_idx and the read of 6828 * the opaque cookie. 6829 */ 6830 rmb(); 6831 work_mask = 0; 6832 received = 0; 6833 std_prod_idx = tpr->rx_std_prod_idx; 6834 jmb_prod_idx = tpr->rx_jmb_prod_idx; 6835 while (sw_idx != hw_idx && budget > 0) { 6836 struct ring_info *ri; 6837 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; 6838 unsigned int len; 6839 struct sk_buff *skb; 6840 dma_addr_t dma_addr; 6841 u32 opaque_key, desc_idx, *post_ptr; 6842 u8 *data; 6843 u64 tstamp = 0; 6844 6845 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 6846 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 6847 if (opaque_key == RXD_OPAQUE_RING_STD) { 6848 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; 6849 dma_addr = dma_unmap_addr(ri, mapping); 6850 data = ri->data; 6851 post_ptr = &std_prod_idx; 6852 rx_std_posted++; 6853 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 6854 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; 6855 dma_addr = dma_unmap_addr(ri, mapping); 6856 data = ri->data; 6857 post_ptr = &jmb_prod_idx; 6858 } else 6859 goto next_pkt_nopost; 6860 6861 work_mask |= opaque_key; 6862 6863 if (desc->err_vlan & RXD_ERR_MASK) { 6864 drop_it: 6865 tg3_recycle_rx(tnapi, tpr, opaque_key, 6866 desc_idx, *post_ptr); 6867 drop_it_no_recycle: 6868 /* Other statistics kept track of by card. */ 6869 tp->rx_dropped++; 6870 goto next_pkt; 6871 } 6872 6873 prefetch(data + TG3_RX_OFFSET(tp)); 6874 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 6875 ETH_FCS_LEN; 6876 6877 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6878 RXD_FLAG_PTPSTAT_PTPV1 || 6879 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6880 RXD_FLAG_PTPSTAT_PTPV2) { 6881 tstamp = tr32(TG3_RX_TSTAMP_LSB); 6882 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32; 6883 } 6884 6885 if (len > TG3_RX_COPY_THRESH(tp)) { 6886 int skb_size; 6887 unsigned int frag_size; 6888 6889 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, 6890 *post_ptr, &frag_size); 6891 if (skb_size < 0) 6892 goto drop_it; 6893 6894 pci_unmap_single(tp->pdev, dma_addr, skb_size, 6895 PCI_DMA_FROMDEVICE); 6896 6897 /* Ensure that the update to the data happens 6898 * after the usage of the old DMA mapping. 6899 */ 6900 smp_wmb(); 6901 6902 ri->data = NULL; 6903 6904 skb = build_skb(data, frag_size); 6905 if (!skb) { 6906 tg3_frag_free(frag_size != 0, data); 6907 goto drop_it_no_recycle; 6908 } 6909 skb_reserve(skb, TG3_RX_OFFSET(tp)); 6910 } else { 6911 tg3_recycle_rx(tnapi, tpr, opaque_key, 6912 desc_idx, *post_ptr); 6913 6914 skb = netdev_alloc_skb(tp->dev, 6915 len + TG3_RAW_IP_ALIGN); 6916 if (skb == NULL) 6917 goto drop_it_no_recycle; 6918 6919 skb_reserve(skb, TG3_RAW_IP_ALIGN); 6920 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 6921 memcpy(skb->data, 6922 data + TG3_RX_OFFSET(tp), 6923 len); 6924 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 6925 } 6926 6927 skb_put(skb, len); 6928 if (tstamp) 6929 tg3_hwclock_to_timestamp(tp, tstamp, 6930 skb_hwtstamps(skb)); 6931 6932 if ((tp->dev->features & NETIF_F_RXCSUM) && 6933 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 6934 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 6935 >> RXD_TCPCSUM_SHIFT) == 0xffff)) 6936 skb->ip_summed = CHECKSUM_UNNECESSARY; 6937 else 6938 skb_checksum_none_assert(skb); 6939 6940 skb->protocol = eth_type_trans(skb, tp->dev); 6941 6942 if (len > (tp->dev->mtu + ETH_HLEN) && 6943 skb->protocol != htons(ETH_P_8021Q) && 6944 skb->protocol != htons(ETH_P_8021AD)) { 6945 dev_kfree_skb_any(skb); 6946 goto drop_it_no_recycle; 6947 } 6948 6949 if (desc->type_flags & RXD_FLAG_VLAN && 6950 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) 6951 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 6952 desc->err_vlan & RXD_VLAN_MASK); 6953 6954 napi_gro_receive(&tnapi->napi, skb); 6955 6956 received++; 6957 budget--; 6958 6959 next_pkt: 6960 (*post_ptr)++; 6961 6962 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 6963 tpr->rx_std_prod_idx = std_prod_idx & 6964 tp->rx_std_ring_mask; 6965 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 6966 tpr->rx_std_prod_idx); 6967 work_mask &= ~RXD_OPAQUE_RING_STD; 6968 rx_std_posted = 0; 6969 } 6970 next_pkt_nopost: 6971 sw_idx++; 6972 sw_idx &= tp->rx_ret_ring_mask; 6973 6974 /* Refresh hw_idx to see if there is new work */ 6975 if (sw_idx == hw_idx) { 6976 hw_idx = *(tnapi->rx_rcb_prod_idx); 6977 rmb(); 6978 } 6979 } 6980 6981 /* ACK the status ring. */ 6982 tnapi->rx_rcb_ptr = sw_idx; 6983 tw32_rx_mbox(tnapi->consmbox, sw_idx); 6984 6985 /* Refill RX ring(s). */ 6986 if (!tg3_flag(tp, ENABLE_RSS)) { 6987 /* Sync BD data before updating mailbox */ 6988 wmb(); 6989 6990 if (work_mask & RXD_OPAQUE_RING_STD) { 6991 tpr->rx_std_prod_idx = std_prod_idx & 6992 tp->rx_std_ring_mask; 6993 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 6994 tpr->rx_std_prod_idx); 6995 } 6996 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 6997 tpr->rx_jmb_prod_idx = jmb_prod_idx & 6998 tp->rx_jmb_ring_mask; 6999 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 7000 tpr->rx_jmb_prod_idx); 7001 } 7002 mmiowb(); 7003 } else if (work_mask) { 7004 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be 7005 * updated before the producer indices can be updated. 7006 */ 7007 smp_wmb(); 7008 7009 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; 7010 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; 7011 7012 if (tnapi != &tp->napi[1]) { 7013 tp->rx_refill = true; 7014 napi_schedule(&tp->napi[1].napi); 7015 } 7016 } 7017 7018 return received; 7019 } 7020 7021 static void tg3_poll_link(struct tg3 *tp) 7022 { 7023 /* handle link change and other phy events */ 7024 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 7025 struct tg3_hw_status *sblk = tp->napi[0].hw_status; 7026 7027 if (sblk->status & SD_STATUS_LINK_CHG) { 7028 sblk->status = SD_STATUS_UPDATED | 7029 (sblk->status & ~SD_STATUS_LINK_CHG); 7030 spin_lock(&tp->lock); 7031 if (tg3_flag(tp, USE_PHYLIB)) { 7032 tw32_f(MAC_STATUS, 7033 (MAC_STATUS_SYNC_CHANGED | 7034 MAC_STATUS_CFG_CHANGED | 7035 MAC_STATUS_MI_COMPLETION | 7036 MAC_STATUS_LNKSTATE_CHANGED)); 7037 udelay(40); 7038 } else 7039 tg3_setup_phy(tp, false); 7040 spin_unlock(&tp->lock); 7041 } 7042 } 7043 } 7044 7045 static int tg3_rx_prodring_xfer(struct tg3 *tp, 7046 struct tg3_rx_prodring_set *dpr, 7047 struct tg3_rx_prodring_set *spr) 7048 { 7049 u32 si, di, cpycnt, src_prod_idx; 7050 int i, err = 0; 7051 7052 while (1) { 7053 src_prod_idx = spr->rx_std_prod_idx; 7054 7055 /* Make sure updates to the rx_std_buffers[] entries and the 7056 * standard producer index are seen in the correct order. 7057 */ 7058 smp_rmb(); 7059 7060 if (spr->rx_std_cons_idx == src_prod_idx) 7061 break; 7062 7063 if (spr->rx_std_cons_idx < src_prod_idx) 7064 cpycnt = src_prod_idx - spr->rx_std_cons_idx; 7065 else 7066 cpycnt = tp->rx_std_ring_mask + 1 - 7067 spr->rx_std_cons_idx; 7068 7069 cpycnt = min(cpycnt, 7070 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); 7071 7072 si = spr->rx_std_cons_idx; 7073 di = dpr->rx_std_prod_idx; 7074 7075 for (i = di; i < di + cpycnt; i++) { 7076 if (dpr->rx_std_buffers[i].data) { 7077 cpycnt = i - di; 7078 err = -ENOSPC; 7079 break; 7080 } 7081 } 7082 7083 if (!cpycnt) 7084 break; 7085 7086 /* Ensure that updates to the rx_std_buffers ring and the 7087 * shadowed hardware producer ring from tg3_recycle_skb() are 7088 * ordered correctly WRT the skb check above. 7089 */ 7090 smp_rmb(); 7091 7092 memcpy(&dpr->rx_std_buffers[di], 7093 &spr->rx_std_buffers[si], 7094 cpycnt * sizeof(struct ring_info)); 7095 7096 for (i = 0; i < cpycnt; i++, di++, si++) { 7097 struct tg3_rx_buffer_desc *sbd, *dbd; 7098 sbd = &spr->rx_std[si]; 7099 dbd = &dpr->rx_std[di]; 7100 dbd->addr_hi = sbd->addr_hi; 7101 dbd->addr_lo = sbd->addr_lo; 7102 } 7103 7104 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & 7105 tp->rx_std_ring_mask; 7106 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & 7107 tp->rx_std_ring_mask; 7108 } 7109 7110 while (1) { 7111 src_prod_idx = spr->rx_jmb_prod_idx; 7112 7113 /* Make sure updates to the rx_jmb_buffers[] entries and 7114 * the jumbo producer index are seen in the correct order. 7115 */ 7116 smp_rmb(); 7117 7118 if (spr->rx_jmb_cons_idx == src_prod_idx) 7119 break; 7120 7121 if (spr->rx_jmb_cons_idx < src_prod_idx) 7122 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; 7123 else 7124 cpycnt = tp->rx_jmb_ring_mask + 1 - 7125 spr->rx_jmb_cons_idx; 7126 7127 cpycnt = min(cpycnt, 7128 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); 7129 7130 si = spr->rx_jmb_cons_idx; 7131 di = dpr->rx_jmb_prod_idx; 7132 7133 for (i = di; i < di + cpycnt; i++) { 7134 if (dpr->rx_jmb_buffers[i].data) { 7135 cpycnt = i - di; 7136 err = -ENOSPC; 7137 break; 7138 } 7139 } 7140 7141 if (!cpycnt) 7142 break; 7143 7144 /* Ensure that updates to the rx_jmb_buffers ring and the 7145 * shadowed hardware producer ring from tg3_recycle_skb() are 7146 * ordered correctly WRT the skb check above. 7147 */ 7148 smp_rmb(); 7149 7150 memcpy(&dpr->rx_jmb_buffers[di], 7151 &spr->rx_jmb_buffers[si], 7152 cpycnt * sizeof(struct ring_info)); 7153 7154 for (i = 0; i < cpycnt; i++, di++, si++) { 7155 struct tg3_rx_buffer_desc *sbd, *dbd; 7156 sbd = &spr->rx_jmb[si].std; 7157 dbd = &dpr->rx_jmb[di].std; 7158 dbd->addr_hi = sbd->addr_hi; 7159 dbd->addr_lo = sbd->addr_lo; 7160 } 7161 7162 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & 7163 tp->rx_jmb_ring_mask; 7164 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & 7165 tp->rx_jmb_ring_mask; 7166 } 7167 7168 return err; 7169 } 7170 7171 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) 7172 { 7173 struct tg3 *tp = tnapi->tp; 7174 7175 /* run TX completion thread */ 7176 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { 7177 tg3_tx(tnapi); 7178 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7179 return work_done; 7180 } 7181 7182 if (!tnapi->rx_rcb_prod_idx) 7183 return work_done; 7184 7185 /* run RX thread, within the bounds set by NAPI. 7186 * All RX "locking" is done by ensuring outside 7187 * code synchronizes with tg3->napi.poll() 7188 */ 7189 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 7190 work_done += tg3_rx(tnapi, budget - work_done); 7191 7192 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { 7193 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; 7194 int i, err = 0; 7195 u32 std_prod_idx = dpr->rx_std_prod_idx; 7196 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; 7197 7198 tp->rx_refill = false; 7199 for (i = 1; i <= tp->rxq_cnt; i++) 7200 err |= tg3_rx_prodring_xfer(tp, dpr, 7201 &tp->napi[i].prodring); 7202 7203 wmb(); 7204 7205 if (std_prod_idx != dpr->rx_std_prod_idx) 7206 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 7207 dpr->rx_std_prod_idx); 7208 7209 if (jmb_prod_idx != dpr->rx_jmb_prod_idx) 7210 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 7211 dpr->rx_jmb_prod_idx); 7212 7213 mmiowb(); 7214 7215 if (err) 7216 tw32_f(HOSTCC_MODE, tp->coal_now); 7217 } 7218 7219 return work_done; 7220 } 7221 7222 static inline void tg3_reset_task_schedule(struct tg3 *tp) 7223 { 7224 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) 7225 schedule_work(&tp->reset_task); 7226 } 7227 7228 static inline void tg3_reset_task_cancel(struct tg3 *tp) 7229 { 7230 cancel_work_sync(&tp->reset_task); 7231 tg3_flag_clear(tp, RESET_TASK_PENDING); 7232 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 7233 } 7234 7235 static int tg3_poll_msix(struct napi_struct *napi, int budget) 7236 { 7237 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7238 struct tg3 *tp = tnapi->tp; 7239 int work_done = 0; 7240 struct tg3_hw_status *sblk = tnapi->hw_status; 7241 7242 while (1) { 7243 work_done = tg3_poll_work(tnapi, work_done, budget); 7244 7245 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7246 goto tx_recovery; 7247 7248 if (unlikely(work_done >= budget)) 7249 break; 7250 7251 /* tp->last_tag is used in tg3_int_reenable() below 7252 * to tell the hw how much work has been processed, 7253 * so we must read it before checking for more work. 7254 */ 7255 tnapi->last_tag = sblk->status_tag; 7256 tnapi->last_irq_tag = tnapi->last_tag; 7257 rmb(); 7258 7259 /* check for RX/TX work to do */ 7260 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && 7261 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { 7262 7263 /* This test here is not race free, but will reduce 7264 * the number of interrupts by looping again. 7265 */ 7266 if (tnapi == &tp->napi[1] && tp->rx_refill) 7267 continue; 7268 7269 napi_complete_done(napi, work_done); 7270 /* Reenable interrupts. */ 7271 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 7272 7273 /* This test here is synchronized by napi_schedule() 7274 * and napi_complete() to close the race condition. 7275 */ 7276 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) { 7277 tw32(HOSTCC_MODE, tp->coalesce_mode | 7278 HOSTCC_MODE_ENABLE | 7279 tnapi->coal_now); 7280 } 7281 mmiowb(); 7282 break; 7283 } 7284 } 7285 7286 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); 7287 return work_done; 7288 7289 tx_recovery: 7290 /* work_done is guaranteed to be less than budget. */ 7291 napi_complete(napi); 7292 tg3_reset_task_schedule(tp); 7293 return work_done; 7294 } 7295 7296 static void tg3_process_error(struct tg3 *tp) 7297 { 7298 u32 val; 7299 bool real_error = false; 7300 7301 if (tg3_flag(tp, ERROR_PROCESSED)) 7302 return; 7303 7304 /* Check Flow Attention register */ 7305 val = tr32(HOSTCC_FLOW_ATTN); 7306 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { 7307 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); 7308 real_error = true; 7309 } 7310 7311 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { 7312 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); 7313 real_error = true; 7314 } 7315 7316 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { 7317 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); 7318 real_error = true; 7319 } 7320 7321 if (!real_error) 7322 return; 7323 7324 tg3_dump_state(tp); 7325 7326 tg3_flag_set(tp, ERROR_PROCESSED); 7327 tg3_reset_task_schedule(tp); 7328 } 7329 7330 static int tg3_poll(struct napi_struct *napi, int budget) 7331 { 7332 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7333 struct tg3 *tp = tnapi->tp; 7334 int work_done = 0; 7335 struct tg3_hw_status *sblk = tnapi->hw_status; 7336 7337 while (1) { 7338 if (sblk->status & SD_STATUS_ERROR) 7339 tg3_process_error(tp); 7340 7341 tg3_poll_link(tp); 7342 7343 work_done = tg3_poll_work(tnapi, work_done, budget); 7344 7345 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7346 goto tx_recovery; 7347 7348 if (unlikely(work_done >= budget)) 7349 break; 7350 7351 if (tg3_flag(tp, TAGGED_STATUS)) { 7352 /* tp->last_tag is used in tg3_int_reenable() below 7353 * to tell the hw how much work has been processed, 7354 * so we must read it before checking for more work. 7355 */ 7356 tnapi->last_tag = sblk->status_tag; 7357 tnapi->last_irq_tag = tnapi->last_tag; 7358 rmb(); 7359 } else 7360 sblk->status &= ~SD_STATUS_UPDATED; 7361 7362 if (likely(!tg3_has_work(tnapi))) { 7363 napi_complete_done(napi, work_done); 7364 tg3_int_reenable(tnapi); 7365 break; 7366 } 7367 } 7368 7369 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); 7370 return work_done; 7371 7372 tx_recovery: 7373 /* work_done is guaranteed to be less than budget. */ 7374 napi_complete(napi); 7375 tg3_reset_task_schedule(tp); 7376 return work_done; 7377 } 7378 7379 static void tg3_napi_disable(struct tg3 *tp) 7380 { 7381 int i; 7382 7383 for (i = tp->irq_cnt - 1; i >= 0; i--) 7384 napi_disable(&tp->napi[i].napi); 7385 } 7386 7387 static void tg3_napi_enable(struct tg3 *tp) 7388 { 7389 int i; 7390 7391 for (i = 0; i < tp->irq_cnt; i++) 7392 napi_enable(&tp->napi[i].napi); 7393 } 7394 7395 static void tg3_napi_init(struct tg3 *tp) 7396 { 7397 int i; 7398 7399 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64); 7400 for (i = 1; i < tp->irq_cnt; i++) 7401 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64); 7402 } 7403 7404 static void tg3_napi_fini(struct tg3 *tp) 7405 { 7406 int i; 7407 7408 for (i = 0; i < tp->irq_cnt; i++) 7409 netif_napi_del(&tp->napi[i].napi); 7410 } 7411 7412 static inline void tg3_netif_stop(struct tg3 *tp) 7413 { 7414 netif_trans_update(tp->dev); /* prevent tx timeout */ 7415 tg3_napi_disable(tp); 7416 netif_carrier_off(tp->dev); 7417 netif_tx_disable(tp->dev); 7418 } 7419 7420 /* tp->lock must be held */ 7421 static inline void tg3_netif_start(struct tg3 *tp) 7422 { 7423 tg3_ptp_resume(tp); 7424 7425 /* NOTE: unconditional netif_tx_wake_all_queues is only 7426 * appropriate so long as all callers are assured to 7427 * have free tx slots (such as after tg3_init_hw) 7428 */ 7429 netif_tx_wake_all_queues(tp->dev); 7430 7431 if (tp->link_up) 7432 netif_carrier_on(tp->dev); 7433 7434 tg3_napi_enable(tp); 7435 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; 7436 tg3_enable_ints(tp); 7437 } 7438 7439 static void tg3_irq_quiesce(struct tg3 *tp) 7440 __releases(tp->lock) 7441 __acquires(tp->lock) 7442 { 7443 int i; 7444 7445 BUG_ON(tp->irq_sync); 7446 7447 tp->irq_sync = 1; 7448 smp_mb(); 7449 7450 spin_unlock_bh(&tp->lock); 7451 7452 for (i = 0; i < tp->irq_cnt; i++) 7453 synchronize_irq(tp->napi[i].irq_vec); 7454 7455 spin_lock_bh(&tp->lock); 7456 } 7457 7458 /* Fully shutdown all tg3 driver activity elsewhere in the system. 7459 * If irq_sync is non-zero, then the IRQ handler must be synchronized 7460 * with as well. Most of the time, this is not necessary except when 7461 * shutting down the device. 7462 */ 7463 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) 7464 { 7465 spin_lock_bh(&tp->lock); 7466 if (irq_sync) 7467 tg3_irq_quiesce(tp); 7468 } 7469 7470 static inline void tg3_full_unlock(struct tg3 *tp) 7471 { 7472 spin_unlock_bh(&tp->lock); 7473 } 7474 7475 /* One-shot MSI handler - Chip automatically disables interrupt 7476 * after sending MSI so driver doesn't have to do it. 7477 */ 7478 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) 7479 { 7480 struct tg3_napi *tnapi = dev_id; 7481 struct tg3 *tp = tnapi->tp; 7482 7483 prefetch(tnapi->hw_status); 7484 if (tnapi->rx_rcb) 7485 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7486 7487 if (likely(!tg3_irq_sync(tp))) 7488 napi_schedule(&tnapi->napi); 7489 7490 return IRQ_HANDLED; 7491 } 7492 7493 /* MSI ISR - No need to check for interrupt sharing and no need to 7494 * flush status block and interrupt mailbox. PCI ordering rules 7495 * guarantee that MSI will arrive after the status block. 7496 */ 7497 static irqreturn_t tg3_msi(int irq, void *dev_id) 7498 { 7499 struct tg3_napi *tnapi = dev_id; 7500 struct tg3 *tp = tnapi->tp; 7501 7502 prefetch(tnapi->hw_status); 7503 if (tnapi->rx_rcb) 7504 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7505 /* 7506 * Writing any value to intr-mbox-0 clears PCI INTA# and 7507 * chip-internal interrupt pending events. 7508 * Writing non-zero to intr-mbox-0 additional tells the 7509 * NIC to stop sending us irqs, engaging "in-intr-handler" 7510 * event coalescing. 7511 */ 7512 tw32_mailbox(tnapi->int_mbox, 0x00000001); 7513 if (likely(!tg3_irq_sync(tp))) 7514 napi_schedule(&tnapi->napi); 7515 7516 return IRQ_RETVAL(1); 7517 } 7518 7519 static irqreturn_t tg3_interrupt(int irq, void *dev_id) 7520 { 7521 struct tg3_napi *tnapi = dev_id; 7522 struct tg3 *tp = tnapi->tp; 7523 struct tg3_hw_status *sblk = tnapi->hw_status; 7524 unsigned int handled = 1; 7525 7526 /* In INTx mode, it is possible for the interrupt to arrive at 7527 * the CPU before the status block posted prior to the interrupt. 7528 * Reading the PCI State register will confirm whether the 7529 * interrupt is ours and will flush the status block. 7530 */ 7531 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { 7532 if (tg3_flag(tp, CHIP_RESETTING) || 7533 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7534 handled = 0; 7535 goto out; 7536 } 7537 } 7538 7539 /* 7540 * Writing any value to intr-mbox-0 clears PCI INTA# and 7541 * chip-internal interrupt pending events. 7542 * Writing non-zero to intr-mbox-0 additional tells the 7543 * NIC to stop sending us irqs, engaging "in-intr-handler" 7544 * event coalescing. 7545 * 7546 * Flush the mailbox to de-assert the IRQ immediately to prevent 7547 * spurious interrupts. The flush impacts performance but 7548 * excessive spurious interrupts can be worse in some cases. 7549 */ 7550 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7551 if (tg3_irq_sync(tp)) 7552 goto out; 7553 sblk->status &= ~SD_STATUS_UPDATED; 7554 if (likely(tg3_has_work(tnapi))) { 7555 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7556 napi_schedule(&tnapi->napi); 7557 } else { 7558 /* No work, shared interrupt perhaps? re-enable 7559 * interrupts, and flush that PCI write 7560 */ 7561 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 7562 0x00000000); 7563 } 7564 out: 7565 return IRQ_RETVAL(handled); 7566 } 7567 7568 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) 7569 { 7570 struct tg3_napi *tnapi = dev_id; 7571 struct tg3 *tp = tnapi->tp; 7572 struct tg3_hw_status *sblk = tnapi->hw_status; 7573 unsigned int handled = 1; 7574 7575 /* In INTx mode, it is possible for the interrupt to arrive at 7576 * the CPU before the status block posted prior to the interrupt. 7577 * Reading the PCI State register will confirm whether the 7578 * interrupt is ours and will flush the status block. 7579 */ 7580 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { 7581 if (tg3_flag(tp, CHIP_RESETTING) || 7582 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7583 handled = 0; 7584 goto out; 7585 } 7586 } 7587 7588 /* 7589 * writing any value to intr-mbox-0 clears PCI INTA# and 7590 * chip-internal interrupt pending events. 7591 * writing non-zero to intr-mbox-0 additional tells the 7592 * NIC to stop sending us irqs, engaging "in-intr-handler" 7593 * event coalescing. 7594 * 7595 * Flush the mailbox to de-assert the IRQ immediately to prevent 7596 * spurious interrupts. The flush impacts performance but 7597 * excessive spurious interrupts can be worse in some cases. 7598 */ 7599 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7600 7601 /* 7602 * In a shared interrupt configuration, sometimes other devices' 7603 * interrupts will scream. We record the current status tag here 7604 * so that the above check can report that the screaming interrupts 7605 * are unhandled. Eventually they will be silenced. 7606 */ 7607 tnapi->last_irq_tag = sblk->status_tag; 7608 7609 if (tg3_irq_sync(tp)) 7610 goto out; 7611 7612 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7613 7614 napi_schedule(&tnapi->napi); 7615 7616 out: 7617 return IRQ_RETVAL(handled); 7618 } 7619 7620 /* ISR for interrupt test */ 7621 static irqreturn_t tg3_test_isr(int irq, void *dev_id) 7622 { 7623 struct tg3_napi *tnapi = dev_id; 7624 struct tg3 *tp = tnapi->tp; 7625 struct tg3_hw_status *sblk = tnapi->hw_status; 7626 7627 if ((sblk->status & SD_STATUS_UPDATED) || 7628 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7629 tg3_disable_ints(tp); 7630 return IRQ_RETVAL(1); 7631 } 7632 return IRQ_RETVAL(0); 7633 } 7634 7635 #ifdef CONFIG_NET_POLL_CONTROLLER 7636 static void tg3_poll_controller(struct net_device *dev) 7637 { 7638 int i; 7639 struct tg3 *tp = netdev_priv(dev); 7640 7641 if (tg3_irq_sync(tp)) 7642 return; 7643 7644 for (i = 0; i < tp->irq_cnt; i++) 7645 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); 7646 } 7647 #endif 7648 7649 static void tg3_tx_timeout(struct net_device *dev) 7650 { 7651 struct tg3 *tp = netdev_priv(dev); 7652 7653 if (netif_msg_tx_err(tp)) { 7654 netdev_err(dev, "transmit timed out, resetting\n"); 7655 tg3_dump_state(tp); 7656 } 7657 7658 tg3_reset_task_schedule(tp); 7659 } 7660 7661 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ 7662 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) 7663 { 7664 u32 base = (u32) mapping & 0xffffffff; 7665 7666 return base + len + 8 < base; 7667 } 7668 7669 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes 7670 * of any 4GB boundaries: 4G, 8G, etc 7671 */ 7672 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7673 u32 len, u32 mss) 7674 { 7675 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) { 7676 u32 base = (u32) mapping & 0xffffffff; 7677 7678 return ((base + len + (mss & 0x3fff)) < base); 7679 } 7680 return 0; 7681 } 7682 7683 /* Test for DMA addresses > 40-bit */ 7684 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7685 int len) 7686 { 7687 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) 7688 if (tg3_flag(tp, 40BIT_DMA_BUG)) 7689 return ((u64) mapping + len) > DMA_BIT_MASK(40); 7690 return 0; 7691 #else 7692 return 0; 7693 #endif 7694 } 7695 7696 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, 7697 dma_addr_t mapping, u32 len, u32 flags, 7698 u32 mss, u32 vlan) 7699 { 7700 txbd->addr_hi = ((u64) mapping >> 32); 7701 txbd->addr_lo = ((u64) mapping & 0xffffffff); 7702 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); 7703 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); 7704 } 7705 7706 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, 7707 dma_addr_t map, u32 len, u32 flags, 7708 u32 mss, u32 vlan) 7709 { 7710 struct tg3 *tp = tnapi->tp; 7711 bool hwbug = false; 7712 7713 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) 7714 hwbug = true; 7715 7716 if (tg3_4g_overflow_test(map, len)) 7717 hwbug = true; 7718 7719 if (tg3_4g_tso_overflow_test(tp, map, len, mss)) 7720 hwbug = true; 7721 7722 if (tg3_40bit_overflow_test(tp, map, len)) 7723 hwbug = true; 7724 7725 if (tp->dma_limit) { 7726 u32 prvidx = *entry; 7727 u32 tmp_flag = flags & ~TXD_FLAG_END; 7728 while (len > tp->dma_limit && *budget) { 7729 u32 frag_len = tp->dma_limit; 7730 len -= tp->dma_limit; 7731 7732 /* Avoid the 8byte DMA problem */ 7733 if (len <= 8) { 7734 len += tp->dma_limit / 2; 7735 frag_len = tp->dma_limit / 2; 7736 } 7737 7738 tnapi->tx_buffers[*entry].fragmented = true; 7739 7740 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7741 frag_len, tmp_flag, mss, vlan); 7742 *budget -= 1; 7743 prvidx = *entry; 7744 *entry = NEXT_TX(*entry); 7745 7746 map += frag_len; 7747 } 7748 7749 if (len) { 7750 if (*budget) { 7751 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7752 len, flags, mss, vlan); 7753 *budget -= 1; 7754 *entry = NEXT_TX(*entry); 7755 } else { 7756 hwbug = true; 7757 tnapi->tx_buffers[prvidx].fragmented = false; 7758 } 7759 } 7760 } else { 7761 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7762 len, flags, mss, vlan); 7763 *entry = NEXT_TX(*entry); 7764 } 7765 7766 return hwbug; 7767 } 7768 7769 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) 7770 { 7771 int i; 7772 struct sk_buff *skb; 7773 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; 7774 7775 skb = txb->skb; 7776 txb->skb = NULL; 7777 7778 pci_unmap_single(tnapi->tp->pdev, 7779 dma_unmap_addr(txb, mapping), 7780 skb_headlen(skb), 7781 PCI_DMA_TODEVICE); 7782 7783 while (txb->fragmented) { 7784 txb->fragmented = false; 7785 entry = NEXT_TX(entry); 7786 txb = &tnapi->tx_buffers[entry]; 7787 } 7788 7789 for (i = 0; i <= last; i++) { 7790 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 7791 7792 entry = NEXT_TX(entry); 7793 txb = &tnapi->tx_buffers[entry]; 7794 7795 pci_unmap_page(tnapi->tp->pdev, 7796 dma_unmap_addr(txb, mapping), 7797 skb_frag_size(frag), PCI_DMA_TODEVICE); 7798 7799 while (txb->fragmented) { 7800 txb->fragmented = false; 7801 entry = NEXT_TX(entry); 7802 txb = &tnapi->tx_buffers[entry]; 7803 } 7804 } 7805 } 7806 7807 /* Workaround 4GB and 40-bit hardware DMA bugs. */ 7808 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, 7809 struct sk_buff **pskb, 7810 u32 *entry, u32 *budget, 7811 u32 base_flags, u32 mss, u32 vlan) 7812 { 7813 struct tg3 *tp = tnapi->tp; 7814 struct sk_buff *new_skb, *skb = *pskb; 7815 dma_addr_t new_addr = 0; 7816 int ret = 0; 7817 7818 if (tg3_asic_rev(tp) != ASIC_REV_5701) 7819 new_skb = skb_copy(skb, GFP_ATOMIC); 7820 else { 7821 int more_headroom = 4 - ((unsigned long)skb->data & 3); 7822 7823 new_skb = skb_copy_expand(skb, 7824 skb_headroom(skb) + more_headroom, 7825 skb_tailroom(skb), GFP_ATOMIC); 7826 } 7827 7828 if (!new_skb) { 7829 ret = -1; 7830 } else { 7831 /* New SKB is guaranteed to be linear. */ 7832 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, 7833 PCI_DMA_TODEVICE); 7834 /* Make sure the mapping succeeded */ 7835 if (pci_dma_mapping_error(tp->pdev, new_addr)) { 7836 dev_kfree_skb_any(new_skb); 7837 ret = -1; 7838 } else { 7839 u32 save_entry = *entry; 7840 7841 base_flags |= TXD_FLAG_END; 7842 7843 tnapi->tx_buffers[*entry].skb = new_skb; 7844 dma_unmap_addr_set(&tnapi->tx_buffers[*entry], 7845 mapping, new_addr); 7846 7847 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, 7848 new_skb->len, base_flags, 7849 mss, vlan)) { 7850 tg3_tx_skb_unmap(tnapi, save_entry, -1); 7851 dev_kfree_skb_any(new_skb); 7852 ret = -1; 7853 } 7854 } 7855 } 7856 7857 dev_consume_skb_any(skb); 7858 *pskb = new_skb; 7859 return ret; 7860 } 7861 7862 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb) 7863 { 7864 /* Check if we will never have enough descriptors, 7865 * as gso_segs can be more than current ring size 7866 */ 7867 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; 7868 } 7869 7870 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); 7871 7872 /* Use GSO to workaround all TSO packets that meet HW bug conditions 7873 * indicated in tg3_tx_frag_set() 7874 */ 7875 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, 7876 struct netdev_queue *txq, struct sk_buff *skb) 7877 { 7878 struct sk_buff *segs, *nskb; 7879 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; 7880 7881 /* Estimate the number of fragments in the worst case */ 7882 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) { 7883 netif_tx_stop_queue(txq); 7884 7885 /* netif_tx_stop_queue() must be done before checking 7886 * checking tx index in tg3_tx_avail() below, because in 7887 * tg3_tx(), we update tx index before checking for 7888 * netif_tx_queue_stopped(). 7889 */ 7890 smp_mb(); 7891 if (tg3_tx_avail(tnapi) <= frag_cnt_est) 7892 return NETDEV_TX_BUSY; 7893 7894 netif_tx_wake_queue(txq); 7895 } 7896 7897 segs = skb_gso_segment(skb, tp->dev->features & 7898 ~(NETIF_F_TSO | NETIF_F_TSO6)); 7899 if (IS_ERR(segs) || !segs) 7900 goto tg3_tso_bug_end; 7901 7902 do { 7903 nskb = segs; 7904 segs = segs->next; 7905 nskb->next = NULL; 7906 tg3_start_xmit(nskb, tp->dev); 7907 } while (segs); 7908 7909 tg3_tso_bug_end: 7910 dev_consume_skb_any(skb); 7911 7912 return NETDEV_TX_OK; 7913 } 7914 7915 /* hard_start_xmit for all devices */ 7916 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 7917 { 7918 struct tg3 *tp = netdev_priv(dev); 7919 u32 len, entry, base_flags, mss, vlan = 0; 7920 u32 budget; 7921 int i = -1, would_hit_hwbug; 7922 dma_addr_t mapping; 7923 struct tg3_napi *tnapi; 7924 struct netdev_queue *txq; 7925 unsigned int last; 7926 struct iphdr *iph = NULL; 7927 struct tcphdr *tcph = NULL; 7928 __sum16 tcp_csum = 0, ip_csum = 0; 7929 __be16 ip_tot_len = 0; 7930 7931 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 7932 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 7933 if (tg3_flag(tp, ENABLE_TSS)) 7934 tnapi++; 7935 7936 budget = tg3_tx_avail(tnapi); 7937 7938 /* We are running in BH disabled context with netif_tx_lock 7939 * and TX reclaim runs via tp->napi.poll inside of a software 7940 * interrupt. Furthermore, IRQ processing runs lockless so we have 7941 * no IRQ context deadlocks to worry about either. Rejoice! 7942 */ 7943 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { 7944 if (!netif_tx_queue_stopped(txq)) { 7945 netif_tx_stop_queue(txq); 7946 7947 /* This is a hard error, log it. */ 7948 netdev_err(dev, 7949 "BUG! Tx Ring full when queue awake!\n"); 7950 } 7951 return NETDEV_TX_BUSY; 7952 } 7953 7954 entry = tnapi->tx_prod; 7955 base_flags = 0; 7956 7957 mss = skb_shinfo(skb)->gso_size; 7958 if (mss) { 7959 u32 tcp_opt_len, hdr_len; 7960 7961 if (skb_cow_head(skb, 0)) 7962 goto drop; 7963 7964 iph = ip_hdr(skb); 7965 tcp_opt_len = tcp_optlen(skb); 7966 7967 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; 7968 7969 /* HW/FW can not correctly segment packets that have been 7970 * vlan encapsulated. 7971 */ 7972 if (skb->protocol == htons(ETH_P_8021Q) || 7973 skb->protocol == htons(ETH_P_8021AD)) { 7974 if (tg3_tso_bug_gso_check(tnapi, skb)) 7975 return tg3_tso_bug(tp, tnapi, txq, skb); 7976 goto drop; 7977 } 7978 7979 if (!skb_is_gso_v6(skb)) { 7980 if (unlikely((ETH_HLEN + hdr_len) > 80) && 7981 tg3_flag(tp, TSO_BUG)) { 7982 if (tg3_tso_bug_gso_check(tnapi, skb)) 7983 return tg3_tso_bug(tp, tnapi, txq, skb); 7984 goto drop; 7985 } 7986 ip_csum = iph->check; 7987 ip_tot_len = iph->tot_len; 7988 iph->check = 0; 7989 iph->tot_len = htons(mss + hdr_len); 7990 } 7991 7992 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 7993 TXD_FLAG_CPU_POST_DMA); 7994 7995 tcph = tcp_hdr(skb); 7996 tcp_csum = tcph->check; 7997 7998 if (tg3_flag(tp, HW_TSO_1) || 7999 tg3_flag(tp, HW_TSO_2) || 8000 tg3_flag(tp, HW_TSO_3)) { 8001 tcph->check = 0; 8002 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 8003 } else { 8004 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 8005 0, IPPROTO_TCP, 0); 8006 } 8007 8008 if (tg3_flag(tp, HW_TSO_3)) { 8009 mss |= (hdr_len & 0xc) << 12; 8010 if (hdr_len & 0x10) 8011 base_flags |= 0x00000010; 8012 base_flags |= (hdr_len & 0x3e0) << 5; 8013 } else if (tg3_flag(tp, HW_TSO_2)) 8014 mss |= hdr_len << 9; 8015 else if (tg3_flag(tp, HW_TSO_1) || 8016 tg3_asic_rev(tp) == ASIC_REV_5705) { 8017 if (tcp_opt_len || iph->ihl > 5) { 8018 int tsflags; 8019 8020 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 8021 mss |= (tsflags << 11); 8022 } 8023 } else { 8024 if (tcp_opt_len || iph->ihl > 5) { 8025 int tsflags; 8026 8027 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 8028 base_flags |= tsflags << 12; 8029 } 8030 } 8031 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 8032 /* HW/FW can not correctly checksum packets that have been 8033 * vlan encapsulated. 8034 */ 8035 if (skb->protocol == htons(ETH_P_8021Q) || 8036 skb->protocol == htons(ETH_P_8021AD)) { 8037 if (skb_checksum_help(skb)) 8038 goto drop; 8039 } else { 8040 base_flags |= TXD_FLAG_TCPUDP_CSUM; 8041 } 8042 } 8043 8044 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 8045 !mss && skb->len > VLAN_ETH_FRAME_LEN) 8046 base_flags |= TXD_FLAG_JMB_PKT; 8047 8048 if (skb_vlan_tag_present(skb)) { 8049 base_flags |= TXD_FLAG_VLAN; 8050 vlan = skb_vlan_tag_get(skb); 8051 } 8052 8053 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && 8054 tg3_flag(tp, TX_TSTAMP_EN)) { 8055 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 8056 base_flags |= TXD_FLAG_HWTSTAMP; 8057 } 8058 8059 len = skb_headlen(skb); 8060 8061 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); 8062 if (pci_dma_mapping_error(tp->pdev, mapping)) 8063 goto drop; 8064 8065 8066 tnapi->tx_buffers[entry].skb = skb; 8067 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 8068 8069 would_hit_hwbug = 0; 8070 8071 if (tg3_flag(tp, 5701_DMA_BUG)) 8072 would_hit_hwbug = 1; 8073 8074 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | 8075 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), 8076 mss, vlan)) { 8077 would_hit_hwbug = 1; 8078 } else if (skb_shinfo(skb)->nr_frags > 0) { 8079 u32 tmp_mss = mss; 8080 8081 if (!tg3_flag(tp, HW_TSO_1) && 8082 !tg3_flag(tp, HW_TSO_2) && 8083 !tg3_flag(tp, HW_TSO_3)) 8084 tmp_mss = 0; 8085 8086 /* Now loop through additional data 8087 * fragments, and queue them. 8088 */ 8089 last = skb_shinfo(skb)->nr_frags - 1; 8090 for (i = 0; i <= last; i++) { 8091 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 8092 8093 len = skb_frag_size(frag); 8094 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, 8095 len, DMA_TO_DEVICE); 8096 8097 tnapi->tx_buffers[entry].skb = NULL; 8098 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 8099 mapping); 8100 if (dma_mapping_error(&tp->pdev->dev, mapping)) 8101 goto dma_error; 8102 8103 if (!budget || 8104 tg3_tx_frag_set(tnapi, &entry, &budget, mapping, 8105 len, base_flags | 8106 ((i == last) ? TXD_FLAG_END : 0), 8107 tmp_mss, vlan)) { 8108 would_hit_hwbug = 1; 8109 break; 8110 } 8111 } 8112 } 8113 8114 if (would_hit_hwbug) { 8115 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 8116 8117 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) { 8118 /* If it's a TSO packet, do GSO instead of 8119 * allocating and copying to a large linear SKB 8120 */ 8121 if (ip_tot_len) { 8122 iph->check = ip_csum; 8123 iph->tot_len = ip_tot_len; 8124 } 8125 tcph->check = tcp_csum; 8126 return tg3_tso_bug(tp, tnapi, txq, skb); 8127 } 8128 8129 /* If the workaround fails due to memory/mapping 8130 * failure, silently drop this packet. 8131 */ 8132 entry = tnapi->tx_prod; 8133 budget = tg3_tx_avail(tnapi); 8134 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, 8135 base_flags, mss, vlan)) 8136 goto drop_nofree; 8137 } 8138 8139 skb_tx_timestamp(skb); 8140 netdev_tx_sent_queue(txq, skb->len); 8141 8142 /* Sync BD data before updating mailbox */ 8143 wmb(); 8144 8145 tnapi->tx_prod = entry; 8146 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 8147 netif_tx_stop_queue(txq); 8148 8149 /* netif_tx_stop_queue() must be done before checking 8150 * checking tx index in tg3_tx_avail() below, because in 8151 * tg3_tx(), we update tx index before checking for 8152 * netif_tx_queue_stopped(). 8153 */ 8154 smp_mb(); 8155 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 8156 netif_tx_wake_queue(txq); 8157 } 8158 8159 if (!skb->xmit_more || netif_xmit_stopped(txq)) { 8160 /* Packets are ready, update Tx producer idx on card. */ 8161 tw32_tx_mbox(tnapi->prodmbox, entry); 8162 mmiowb(); 8163 } 8164 8165 return NETDEV_TX_OK; 8166 8167 dma_error: 8168 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); 8169 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; 8170 drop: 8171 dev_kfree_skb_any(skb); 8172 drop_nofree: 8173 tp->tx_dropped++; 8174 return NETDEV_TX_OK; 8175 } 8176 8177 static void tg3_mac_loopback(struct tg3 *tp, bool enable) 8178 { 8179 if (enable) { 8180 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | 8181 MAC_MODE_PORT_MODE_MASK); 8182 8183 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; 8184 8185 if (!tg3_flag(tp, 5705_PLUS)) 8186 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 8187 8188 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 8189 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 8190 else 8191 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 8192 } else { 8193 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; 8194 8195 if (tg3_flag(tp, 5705_PLUS) || 8196 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || 8197 tg3_asic_rev(tp) == ASIC_REV_5700) 8198 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 8199 } 8200 8201 tw32(MAC_MODE, tp->mac_mode); 8202 udelay(40); 8203 } 8204 8205 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) 8206 { 8207 u32 val, bmcr, mac_mode, ptest = 0; 8208 8209 tg3_phy_toggle_apd(tp, false); 8210 tg3_phy_toggle_automdix(tp, false); 8211 8212 if (extlpbk && tg3_phy_set_extloopbk(tp)) 8213 return -EIO; 8214 8215 bmcr = BMCR_FULLDPLX; 8216 switch (speed) { 8217 case SPEED_10: 8218 break; 8219 case SPEED_100: 8220 bmcr |= BMCR_SPEED100; 8221 break; 8222 case SPEED_1000: 8223 default: 8224 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 8225 speed = SPEED_100; 8226 bmcr |= BMCR_SPEED100; 8227 } else { 8228 speed = SPEED_1000; 8229 bmcr |= BMCR_SPEED1000; 8230 } 8231 } 8232 8233 if (extlpbk) { 8234 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 8235 tg3_readphy(tp, MII_CTRL1000, &val); 8236 val |= CTL1000_AS_MASTER | 8237 CTL1000_ENABLE_MASTER; 8238 tg3_writephy(tp, MII_CTRL1000, val); 8239 } else { 8240 ptest = MII_TG3_FET_PTEST_TRIM_SEL | 8241 MII_TG3_FET_PTEST_TRIM_2; 8242 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); 8243 } 8244 } else 8245 bmcr |= BMCR_LOOPBACK; 8246 8247 tg3_writephy(tp, MII_BMCR, bmcr); 8248 8249 /* The write needs to be flushed for the FETs */ 8250 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 8251 tg3_readphy(tp, MII_BMCR, &bmcr); 8252 8253 udelay(40); 8254 8255 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 8256 tg3_asic_rev(tp) == ASIC_REV_5785) { 8257 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | 8258 MII_TG3_FET_PTEST_FRC_TX_LINK | 8259 MII_TG3_FET_PTEST_FRC_TX_LOCK); 8260 8261 /* The write needs to be flushed for the AC131 */ 8262 tg3_readphy(tp, MII_TG3_FET_PTEST, &val); 8263 } 8264 8265 /* Reset to prevent losing 1st rx packet intermittently */ 8266 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 8267 tg3_flag(tp, 5780_CLASS)) { 8268 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 8269 udelay(10); 8270 tw32_f(MAC_RX_MODE, tp->rx_mode); 8271 } 8272 8273 mac_mode = tp->mac_mode & 8274 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 8275 if (speed == SPEED_1000) 8276 mac_mode |= MAC_MODE_PORT_MODE_GMII; 8277 else 8278 mac_mode |= MAC_MODE_PORT_MODE_MII; 8279 8280 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 8281 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; 8282 8283 if (masked_phy_id == TG3_PHY_ID_BCM5401) 8284 mac_mode &= ~MAC_MODE_LINK_POLARITY; 8285 else if (masked_phy_id == TG3_PHY_ID_BCM5411) 8286 mac_mode |= MAC_MODE_LINK_POLARITY; 8287 8288 tg3_writephy(tp, MII_TG3_EXT_CTRL, 8289 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 8290 } 8291 8292 tw32(MAC_MODE, mac_mode); 8293 udelay(40); 8294 8295 return 0; 8296 } 8297 8298 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) 8299 { 8300 struct tg3 *tp = netdev_priv(dev); 8301 8302 if (features & NETIF_F_LOOPBACK) { 8303 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) 8304 return; 8305 8306 spin_lock_bh(&tp->lock); 8307 tg3_mac_loopback(tp, true); 8308 netif_carrier_on(tp->dev); 8309 spin_unlock_bh(&tp->lock); 8310 netdev_info(dev, "Internal MAC loopback mode enabled.\n"); 8311 } else { 8312 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 8313 return; 8314 8315 spin_lock_bh(&tp->lock); 8316 tg3_mac_loopback(tp, false); 8317 /* Force link status check */ 8318 tg3_setup_phy(tp, true); 8319 spin_unlock_bh(&tp->lock); 8320 netdev_info(dev, "Internal MAC loopback mode disabled.\n"); 8321 } 8322 } 8323 8324 static netdev_features_t tg3_fix_features(struct net_device *dev, 8325 netdev_features_t features) 8326 { 8327 struct tg3 *tp = netdev_priv(dev); 8328 8329 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) 8330 features &= ~NETIF_F_ALL_TSO; 8331 8332 return features; 8333 } 8334 8335 static int tg3_set_features(struct net_device *dev, netdev_features_t features) 8336 { 8337 netdev_features_t changed = dev->features ^ features; 8338 8339 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) 8340 tg3_set_loopback(dev, features); 8341 8342 return 0; 8343 } 8344 8345 static void tg3_rx_prodring_free(struct tg3 *tp, 8346 struct tg3_rx_prodring_set *tpr) 8347 { 8348 int i; 8349 8350 if (tpr != &tp->napi[0].prodring) { 8351 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; 8352 i = (i + 1) & tp->rx_std_ring_mask) 8353 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8354 tp->rx_pkt_map_sz); 8355 8356 if (tg3_flag(tp, JUMBO_CAPABLE)) { 8357 for (i = tpr->rx_jmb_cons_idx; 8358 i != tpr->rx_jmb_prod_idx; 8359 i = (i + 1) & tp->rx_jmb_ring_mask) { 8360 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8361 TG3_RX_JMB_MAP_SZ); 8362 } 8363 } 8364 8365 return; 8366 } 8367 8368 for (i = 0; i <= tp->rx_std_ring_mask; i++) 8369 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8370 tp->rx_pkt_map_sz); 8371 8372 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8373 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) 8374 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8375 TG3_RX_JMB_MAP_SZ); 8376 } 8377 } 8378 8379 /* Initialize rx rings for packet processing. 8380 * 8381 * The chip has been shut down and the driver detached from 8382 * the networking, so no interrupts or new tx packets will 8383 * end up in the driver. tp->{tx,}lock are held and thus 8384 * we may not sleep. 8385 */ 8386 static int tg3_rx_prodring_alloc(struct tg3 *tp, 8387 struct tg3_rx_prodring_set *tpr) 8388 { 8389 u32 i, rx_pkt_dma_sz; 8390 8391 tpr->rx_std_cons_idx = 0; 8392 tpr->rx_std_prod_idx = 0; 8393 tpr->rx_jmb_cons_idx = 0; 8394 tpr->rx_jmb_prod_idx = 0; 8395 8396 if (tpr != &tp->napi[0].prodring) { 8397 memset(&tpr->rx_std_buffers[0], 0, 8398 TG3_RX_STD_BUFF_RING_SIZE(tp)); 8399 if (tpr->rx_jmb_buffers) 8400 memset(&tpr->rx_jmb_buffers[0], 0, 8401 TG3_RX_JMB_BUFF_RING_SIZE(tp)); 8402 goto done; 8403 } 8404 8405 /* Zero out all descriptors. */ 8406 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); 8407 8408 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; 8409 if (tg3_flag(tp, 5780_CLASS) && 8410 tp->dev->mtu > ETH_DATA_LEN) 8411 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; 8412 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); 8413 8414 /* Initialize invariants of the rings, we only set this 8415 * stuff once. This works because the card does not 8416 * write into the rx buffer posting rings. 8417 */ 8418 for (i = 0; i <= tp->rx_std_ring_mask; i++) { 8419 struct tg3_rx_buffer_desc *rxd; 8420 8421 rxd = &tpr->rx_std[i]; 8422 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; 8423 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); 8424 rxd->opaque = (RXD_OPAQUE_RING_STD | 8425 (i << RXD_OPAQUE_INDEX_SHIFT)); 8426 } 8427 8428 /* Now allocate fresh SKBs for each rx ring. */ 8429 for (i = 0; i < tp->rx_pending; i++) { 8430 unsigned int frag_size; 8431 8432 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i, 8433 &frag_size) < 0) { 8434 netdev_warn(tp->dev, 8435 "Using a smaller RX standard ring. Only " 8436 "%d out of %d buffers were allocated " 8437 "successfully\n", i, tp->rx_pending); 8438 if (i == 0) 8439 goto initfail; 8440 tp->rx_pending = i; 8441 break; 8442 } 8443 } 8444 8445 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 8446 goto done; 8447 8448 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); 8449 8450 if (!tg3_flag(tp, JUMBO_RING_ENABLE)) 8451 goto done; 8452 8453 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { 8454 struct tg3_rx_buffer_desc *rxd; 8455 8456 rxd = &tpr->rx_jmb[i].std; 8457 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; 8458 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | 8459 RXD_FLAG_JUMBO; 8460 rxd->opaque = (RXD_OPAQUE_RING_JUMBO | 8461 (i << RXD_OPAQUE_INDEX_SHIFT)); 8462 } 8463 8464 for (i = 0; i < tp->rx_jumbo_pending; i++) { 8465 unsigned int frag_size; 8466 8467 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i, 8468 &frag_size) < 0) { 8469 netdev_warn(tp->dev, 8470 "Using a smaller RX jumbo ring. Only %d " 8471 "out of %d buffers were allocated " 8472 "successfully\n", i, tp->rx_jumbo_pending); 8473 if (i == 0) 8474 goto initfail; 8475 tp->rx_jumbo_pending = i; 8476 break; 8477 } 8478 } 8479 8480 done: 8481 return 0; 8482 8483 initfail: 8484 tg3_rx_prodring_free(tp, tpr); 8485 return -ENOMEM; 8486 } 8487 8488 static void tg3_rx_prodring_fini(struct tg3 *tp, 8489 struct tg3_rx_prodring_set *tpr) 8490 { 8491 kfree(tpr->rx_std_buffers); 8492 tpr->rx_std_buffers = NULL; 8493 kfree(tpr->rx_jmb_buffers); 8494 tpr->rx_jmb_buffers = NULL; 8495 if (tpr->rx_std) { 8496 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), 8497 tpr->rx_std, tpr->rx_std_mapping); 8498 tpr->rx_std = NULL; 8499 } 8500 if (tpr->rx_jmb) { 8501 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), 8502 tpr->rx_jmb, tpr->rx_jmb_mapping); 8503 tpr->rx_jmb = NULL; 8504 } 8505 } 8506 8507 static int tg3_rx_prodring_init(struct tg3 *tp, 8508 struct tg3_rx_prodring_set *tpr) 8509 { 8510 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), 8511 GFP_KERNEL); 8512 if (!tpr->rx_std_buffers) 8513 return -ENOMEM; 8514 8515 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, 8516 TG3_RX_STD_RING_BYTES(tp), 8517 &tpr->rx_std_mapping, 8518 GFP_KERNEL); 8519 if (!tpr->rx_std) 8520 goto err_out; 8521 8522 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8523 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), 8524 GFP_KERNEL); 8525 if (!tpr->rx_jmb_buffers) 8526 goto err_out; 8527 8528 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, 8529 TG3_RX_JMB_RING_BYTES(tp), 8530 &tpr->rx_jmb_mapping, 8531 GFP_KERNEL); 8532 if (!tpr->rx_jmb) 8533 goto err_out; 8534 } 8535 8536 return 0; 8537 8538 err_out: 8539 tg3_rx_prodring_fini(tp, tpr); 8540 return -ENOMEM; 8541 } 8542 8543 /* Free up pending packets in all rx/tx rings. 8544 * 8545 * The chip has been shut down and the driver detached from 8546 * the networking, so no interrupts or new tx packets will 8547 * end up in the driver. tp->{tx,}lock is not held and we are not 8548 * in an interrupt context and thus may sleep. 8549 */ 8550 static void tg3_free_rings(struct tg3 *tp) 8551 { 8552 int i, j; 8553 8554 for (j = 0; j < tp->irq_cnt; j++) { 8555 struct tg3_napi *tnapi = &tp->napi[j]; 8556 8557 tg3_rx_prodring_free(tp, &tnapi->prodring); 8558 8559 if (!tnapi->tx_buffers) 8560 continue; 8561 8562 for (i = 0; i < TG3_TX_RING_SIZE; i++) { 8563 struct sk_buff *skb = tnapi->tx_buffers[i].skb; 8564 8565 if (!skb) 8566 continue; 8567 8568 tg3_tx_skb_unmap(tnapi, i, 8569 skb_shinfo(skb)->nr_frags - 1); 8570 8571 dev_consume_skb_any(skb); 8572 } 8573 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j)); 8574 } 8575 } 8576 8577 /* Initialize tx/rx rings for packet processing. 8578 * 8579 * The chip has been shut down and the driver detached from 8580 * the networking, so no interrupts or new tx packets will 8581 * end up in the driver. tp->{tx,}lock are held and thus 8582 * we may not sleep. 8583 */ 8584 static int tg3_init_rings(struct tg3 *tp) 8585 { 8586 int i; 8587 8588 /* Free up all the SKBs. */ 8589 tg3_free_rings(tp); 8590 8591 for (i = 0; i < tp->irq_cnt; i++) { 8592 struct tg3_napi *tnapi = &tp->napi[i]; 8593 8594 tnapi->last_tag = 0; 8595 tnapi->last_irq_tag = 0; 8596 tnapi->hw_status->status = 0; 8597 tnapi->hw_status->status_tag = 0; 8598 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8599 8600 tnapi->tx_prod = 0; 8601 tnapi->tx_cons = 0; 8602 if (tnapi->tx_ring) 8603 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); 8604 8605 tnapi->rx_rcb_ptr = 0; 8606 if (tnapi->rx_rcb) 8607 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 8608 8609 if (tnapi->prodring.rx_std && 8610 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { 8611 tg3_free_rings(tp); 8612 return -ENOMEM; 8613 } 8614 } 8615 8616 return 0; 8617 } 8618 8619 static void tg3_mem_tx_release(struct tg3 *tp) 8620 { 8621 int i; 8622 8623 for (i = 0; i < tp->irq_max; i++) { 8624 struct tg3_napi *tnapi = &tp->napi[i]; 8625 8626 if (tnapi->tx_ring) { 8627 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, 8628 tnapi->tx_ring, tnapi->tx_desc_mapping); 8629 tnapi->tx_ring = NULL; 8630 } 8631 8632 kfree(tnapi->tx_buffers); 8633 tnapi->tx_buffers = NULL; 8634 } 8635 } 8636 8637 static int tg3_mem_tx_acquire(struct tg3 *tp) 8638 { 8639 int i; 8640 struct tg3_napi *tnapi = &tp->napi[0]; 8641 8642 /* If multivector TSS is enabled, vector 0 does not handle 8643 * tx interrupts. Don't allocate any resources for it. 8644 */ 8645 if (tg3_flag(tp, ENABLE_TSS)) 8646 tnapi++; 8647 8648 for (i = 0; i < tp->txq_cnt; i++, tnapi++) { 8649 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE, 8650 sizeof(struct tg3_tx_ring_info), 8651 GFP_KERNEL); 8652 if (!tnapi->tx_buffers) 8653 goto err_out; 8654 8655 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, 8656 TG3_TX_RING_BYTES, 8657 &tnapi->tx_desc_mapping, 8658 GFP_KERNEL); 8659 if (!tnapi->tx_ring) 8660 goto err_out; 8661 } 8662 8663 return 0; 8664 8665 err_out: 8666 tg3_mem_tx_release(tp); 8667 return -ENOMEM; 8668 } 8669 8670 static void tg3_mem_rx_release(struct tg3 *tp) 8671 { 8672 int i; 8673 8674 for (i = 0; i < tp->irq_max; i++) { 8675 struct tg3_napi *tnapi = &tp->napi[i]; 8676 8677 tg3_rx_prodring_fini(tp, &tnapi->prodring); 8678 8679 if (!tnapi->rx_rcb) 8680 continue; 8681 8682 dma_free_coherent(&tp->pdev->dev, 8683 TG3_RX_RCB_RING_BYTES(tp), 8684 tnapi->rx_rcb, 8685 tnapi->rx_rcb_mapping); 8686 tnapi->rx_rcb = NULL; 8687 } 8688 } 8689 8690 static int tg3_mem_rx_acquire(struct tg3 *tp) 8691 { 8692 unsigned int i, limit; 8693 8694 limit = tp->rxq_cnt; 8695 8696 /* If RSS is enabled, we need a (dummy) producer ring 8697 * set on vector zero. This is the true hw prodring. 8698 */ 8699 if (tg3_flag(tp, ENABLE_RSS)) 8700 limit++; 8701 8702 for (i = 0; i < limit; i++) { 8703 struct tg3_napi *tnapi = &tp->napi[i]; 8704 8705 if (tg3_rx_prodring_init(tp, &tnapi->prodring)) 8706 goto err_out; 8707 8708 /* If multivector RSS is enabled, vector 0 8709 * does not handle rx or tx interrupts. 8710 * Don't allocate any resources for it. 8711 */ 8712 if (!i && tg3_flag(tp, ENABLE_RSS)) 8713 continue; 8714 8715 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev, 8716 TG3_RX_RCB_RING_BYTES(tp), 8717 &tnapi->rx_rcb_mapping, 8718 GFP_KERNEL); 8719 if (!tnapi->rx_rcb) 8720 goto err_out; 8721 } 8722 8723 return 0; 8724 8725 err_out: 8726 tg3_mem_rx_release(tp); 8727 return -ENOMEM; 8728 } 8729 8730 /* 8731 * Must not be invoked with interrupt sources disabled and 8732 * the hardware shutdown down. 8733 */ 8734 static void tg3_free_consistent(struct tg3 *tp) 8735 { 8736 int i; 8737 8738 for (i = 0; i < tp->irq_cnt; i++) { 8739 struct tg3_napi *tnapi = &tp->napi[i]; 8740 8741 if (tnapi->hw_status) { 8742 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, 8743 tnapi->hw_status, 8744 tnapi->status_mapping); 8745 tnapi->hw_status = NULL; 8746 } 8747 } 8748 8749 tg3_mem_rx_release(tp); 8750 tg3_mem_tx_release(tp); 8751 8752 /* tp->hw_stats can be referenced safely: 8753 * 1. under rtnl_lock 8754 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set. 8755 */ 8756 if (tp->hw_stats) { 8757 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), 8758 tp->hw_stats, tp->stats_mapping); 8759 tp->hw_stats = NULL; 8760 } 8761 } 8762 8763 /* 8764 * Must not be invoked with interrupt sources disabled and 8765 * the hardware shutdown down. Can sleep. 8766 */ 8767 static int tg3_alloc_consistent(struct tg3 *tp) 8768 { 8769 int i; 8770 8771 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev, 8772 sizeof(struct tg3_hw_stats), 8773 &tp->stats_mapping, GFP_KERNEL); 8774 if (!tp->hw_stats) 8775 goto err_out; 8776 8777 for (i = 0; i < tp->irq_cnt; i++) { 8778 struct tg3_napi *tnapi = &tp->napi[i]; 8779 struct tg3_hw_status *sblk; 8780 8781 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev, 8782 TG3_HW_STATUS_SIZE, 8783 &tnapi->status_mapping, 8784 GFP_KERNEL); 8785 if (!tnapi->hw_status) 8786 goto err_out; 8787 8788 sblk = tnapi->hw_status; 8789 8790 if (tg3_flag(tp, ENABLE_RSS)) { 8791 u16 *prodptr = NULL; 8792 8793 /* 8794 * When RSS is enabled, the status block format changes 8795 * slightly. The "rx_jumbo_consumer", "reserved", 8796 * and "rx_mini_consumer" members get mapped to the 8797 * other three rx return ring producer indexes. 8798 */ 8799 switch (i) { 8800 case 1: 8801 prodptr = &sblk->idx[0].rx_producer; 8802 break; 8803 case 2: 8804 prodptr = &sblk->rx_jumbo_consumer; 8805 break; 8806 case 3: 8807 prodptr = &sblk->reserved; 8808 break; 8809 case 4: 8810 prodptr = &sblk->rx_mini_consumer; 8811 break; 8812 } 8813 tnapi->rx_rcb_prod_idx = prodptr; 8814 } else { 8815 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; 8816 } 8817 } 8818 8819 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp)) 8820 goto err_out; 8821 8822 return 0; 8823 8824 err_out: 8825 tg3_free_consistent(tp); 8826 return -ENOMEM; 8827 } 8828 8829 #define MAX_WAIT_CNT 1000 8830 8831 /* To stop a block, clear the enable bit and poll till it 8832 * clears. tp->lock is held. 8833 */ 8834 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent) 8835 { 8836 unsigned int i; 8837 u32 val; 8838 8839 if (tg3_flag(tp, 5705_PLUS)) { 8840 switch (ofs) { 8841 case RCVLSC_MODE: 8842 case DMAC_MODE: 8843 case MBFREE_MODE: 8844 case BUFMGR_MODE: 8845 case MEMARB_MODE: 8846 /* We can't enable/disable these bits of the 8847 * 5705/5750, just say success. 8848 */ 8849 return 0; 8850 8851 default: 8852 break; 8853 } 8854 } 8855 8856 val = tr32(ofs); 8857 val &= ~enable_bit; 8858 tw32_f(ofs, val); 8859 8860 for (i = 0; i < MAX_WAIT_CNT; i++) { 8861 if (pci_channel_offline(tp->pdev)) { 8862 dev_err(&tp->pdev->dev, 8863 "tg3_stop_block device offline, " 8864 "ofs=%lx enable_bit=%x\n", 8865 ofs, enable_bit); 8866 return -ENODEV; 8867 } 8868 8869 udelay(100); 8870 val = tr32(ofs); 8871 if ((val & enable_bit) == 0) 8872 break; 8873 } 8874 8875 if (i == MAX_WAIT_CNT && !silent) { 8876 dev_err(&tp->pdev->dev, 8877 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", 8878 ofs, enable_bit); 8879 return -ENODEV; 8880 } 8881 8882 return 0; 8883 } 8884 8885 /* tp->lock is held. */ 8886 static int tg3_abort_hw(struct tg3 *tp, bool silent) 8887 { 8888 int i, err; 8889 8890 tg3_disable_ints(tp); 8891 8892 if (pci_channel_offline(tp->pdev)) { 8893 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE); 8894 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8895 err = -ENODEV; 8896 goto err_no_dev; 8897 } 8898 8899 tp->rx_mode &= ~RX_MODE_ENABLE; 8900 tw32_f(MAC_RX_MODE, tp->rx_mode); 8901 udelay(10); 8902 8903 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); 8904 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); 8905 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); 8906 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); 8907 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); 8908 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); 8909 8910 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); 8911 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); 8912 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); 8913 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); 8914 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); 8915 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); 8916 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); 8917 8918 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8919 tw32_f(MAC_MODE, tp->mac_mode); 8920 udelay(40); 8921 8922 tp->tx_mode &= ~TX_MODE_ENABLE; 8923 tw32_f(MAC_TX_MODE, tp->tx_mode); 8924 8925 for (i = 0; i < MAX_WAIT_CNT; i++) { 8926 udelay(100); 8927 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) 8928 break; 8929 } 8930 if (i >= MAX_WAIT_CNT) { 8931 dev_err(&tp->pdev->dev, 8932 "%s timed out, TX_MODE_ENABLE will not clear " 8933 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); 8934 err |= -ENODEV; 8935 } 8936 8937 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); 8938 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); 8939 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); 8940 8941 tw32(FTQ_RESET, 0xffffffff); 8942 tw32(FTQ_RESET, 0x00000000); 8943 8944 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); 8945 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); 8946 8947 err_no_dev: 8948 for (i = 0; i < tp->irq_cnt; i++) { 8949 struct tg3_napi *tnapi = &tp->napi[i]; 8950 if (tnapi->hw_status) 8951 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8952 } 8953 8954 return err; 8955 } 8956 8957 /* Save PCI command register before chip reset */ 8958 static void tg3_save_pci_state(struct tg3 *tp) 8959 { 8960 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); 8961 } 8962 8963 /* Restore PCI state after chip reset */ 8964 static void tg3_restore_pci_state(struct tg3 *tp) 8965 { 8966 u32 val; 8967 8968 /* Re-enable indirect register accesses. */ 8969 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 8970 tp->misc_host_ctrl); 8971 8972 /* Set MAX PCI retry to zero. */ 8973 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); 8974 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 8975 tg3_flag(tp, PCIX_MODE)) 8976 val |= PCISTATE_RETRY_SAME_DMA; 8977 /* Allow reads and writes to the APE register and memory space. */ 8978 if (tg3_flag(tp, ENABLE_APE)) 8979 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 8980 PCISTATE_ALLOW_APE_SHMEM_WR | 8981 PCISTATE_ALLOW_APE_PSPACE_WR; 8982 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); 8983 8984 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); 8985 8986 if (!tg3_flag(tp, PCI_EXPRESS)) { 8987 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 8988 tp->pci_cacheline_sz); 8989 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 8990 tp->pci_lat_timer); 8991 } 8992 8993 /* Make sure PCI-X relaxed ordering bit is clear. */ 8994 if (tg3_flag(tp, PCIX_MODE)) { 8995 u16 pcix_cmd; 8996 8997 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 8998 &pcix_cmd); 8999 pcix_cmd &= ~PCI_X_CMD_ERO; 9000 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 9001 pcix_cmd); 9002 } 9003 9004 if (tg3_flag(tp, 5780_CLASS)) { 9005 9006 /* Chip reset on 5780 will reset MSI enable bit, 9007 * so need to restore it. 9008 */ 9009 if (tg3_flag(tp, USING_MSI)) { 9010 u16 ctrl; 9011 9012 pci_read_config_word(tp->pdev, 9013 tp->msi_cap + PCI_MSI_FLAGS, 9014 &ctrl); 9015 pci_write_config_word(tp->pdev, 9016 tp->msi_cap + PCI_MSI_FLAGS, 9017 ctrl | PCI_MSI_FLAGS_ENABLE); 9018 val = tr32(MSGINT_MODE); 9019 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); 9020 } 9021 } 9022 } 9023 9024 static void tg3_override_clk(struct tg3 *tp) 9025 { 9026 u32 val; 9027 9028 switch (tg3_asic_rev(tp)) { 9029 case ASIC_REV_5717: 9030 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9031 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 9032 TG3_CPMU_MAC_ORIDE_ENABLE); 9033 break; 9034 9035 case ASIC_REV_5719: 9036 case ASIC_REV_5720: 9037 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 9038 break; 9039 9040 default: 9041 return; 9042 } 9043 } 9044 9045 static void tg3_restore_clk(struct tg3 *tp) 9046 { 9047 u32 val; 9048 9049 switch (tg3_asic_rev(tp)) { 9050 case ASIC_REV_5717: 9051 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9052 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, 9053 val & ~TG3_CPMU_MAC_ORIDE_ENABLE); 9054 break; 9055 9056 case ASIC_REV_5719: 9057 case ASIC_REV_5720: 9058 val = tr32(TG3_CPMU_CLCK_ORIDE); 9059 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 9060 break; 9061 9062 default: 9063 return; 9064 } 9065 } 9066 9067 /* tp->lock is held. */ 9068 static int tg3_chip_reset(struct tg3 *tp) 9069 __releases(tp->lock) 9070 __acquires(tp->lock) 9071 { 9072 u32 val; 9073 void (*write_op)(struct tg3 *, u32, u32); 9074 int i, err; 9075 9076 if (!pci_device_is_present(tp->pdev)) 9077 return -ENODEV; 9078 9079 tg3_nvram_lock(tp); 9080 9081 tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 9082 9083 /* No matching tg3_nvram_unlock() after this because 9084 * chip reset below will undo the nvram lock. 9085 */ 9086 tp->nvram_lock_cnt = 0; 9087 9088 /* GRC_MISC_CFG core clock reset will clear the memory 9089 * enable bit in PCI register 4 and the MSI enable bit 9090 * on some chips, so we save relevant registers here. 9091 */ 9092 tg3_save_pci_state(tp); 9093 9094 if (tg3_asic_rev(tp) == ASIC_REV_5752 || 9095 tg3_flag(tp, 5755_PLUS)) 9096 tw32(GRC_FASTBOOT_PC, 0); 9097 9098 /* 9099 * We must avoid the readl() that normally takes place. 9100 * It locks machines, causes machine checks, and other 9101 * fun things. So, temporarily disable the 5701 9102 * hardware workaround, while we do the reset. 9103 */ 9104 write_op = tp->write32; 9105 if (write_op == tg3_write_flush_reg32) 9106 tp->write32 = tg3_write32; 9107 9108 /* Prevent the irq handler from reading or writing PCI registers 9109 * during chip reset when the memory enable bit in the PCI command 9110 * register may be cleared. The chip does not generate interrupt 9111 * at this time, but the irq handler may still be called due to irq 9112 * sharing or irqpoll. 9113 */ 9114 tg3_flag_set(tp, CHIP_RESETTING); 9115 for (i = 0; i < tp->irq_cnt; i++) { 9116 struct tg3_napi *tnapi = &tp->napi[i]; 9117 if (tnapi->hw_status) { 9118 tnapi->hw_status->status = 0; 9119 tnapi->hw_status->status_tag = 0; 9120 } 9121 tnapi->last_tag = 0; 9122 tnapi->last_irq_tag = 0; 9123 } 9124 smp_mb(); 9125 9126 tg3_full_unlock(tp); 9127 9128 for (i = 0; i < tp->irq_cnt; i++) 9129 synchronize_irq(tp->napi[i].irq_vec); 9130 9131 tg3_full_lock(tp, 0); 9132 9133 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9134 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9135 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9136 } 9137 9138 /* do the reset */ 9139 val = GRC_MISC_CFG_CORECLK_RESET; 9140 9141 if (tg3_flag(tp, PCI_EXPRESS)) { 9142 /* Force PCIe 1.0a mode */ 9143 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 9144 !tg3_flag(tp, 57765_PLUS) && 9145 tr32(TG3_PCIE_PHY_TSTCTL) == 9146 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) 9147 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); 9148 9149 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) { 9150 tw32(GRC_MISC_CFG, (1 << 29)); 9151 val |= (1 << 29); 9152 } 9153 } 9154 9155 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 9156 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); 9157 tw32(GRC_VCPU_EXT_CTRL, 9158 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); 9159 } 9160 9161 /* Set the clock to the highest frequency to avoid timeouts. With link 9162 * aware mode, the clock speed could be slow and bootcode does not 9163 * complete within the expected time. Override the clock to allow the 9164 * bootcode to finish sooner and then restore it. 9165 */ 9166 tg3_override_clk(tp); 9167 9168 /* Manage gphy power for all CPMU absent PCIe devices. */ 9169 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) 9170 val |= GRC_MISC_CFG_KEEP_GPHY_POWER; 9171 9172 tw32(GRC_MISC_CFG, val); 9173 9174 /* restore 5701 hardware bug workaround write method */ 9175 tp->write32 = write_op; 9176 9177 /* Unfortunately, we have to delay before the PCI read back. 9178 * Some 575X chips even will not respond to a PCI cfg access 9179 * when the reset command is given to the chip. 9180 * 9181 * How do these hardware designers expect things to work 9182 * properly if the PCI write is posted for a long period 9183 * of time? It is always necessary to have some method by 9184 * which a register read back can occur to push the write 9185 * out which does the reset. 9186 * 9187 * For most tg3 variants the trick below was working. 9188 * Ho hum... 9189 */ 9190 udelay(120); 9191 9192 /* Flush PCI posted writes. The normal MMIO registers 9193 * are inaccessible at this time so this is the only 9194 * way to make this reliably (actually, this is no longer 9195 * the case, see above). I tried to use indirect 9196 * register read/write but this upset some 5701 variants. 9197 */ 9198 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); 9199 9200 udelay(120); 9201 9202 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) { 9203 u16 val16; 9204 9205 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) { 9206 int j; 9207 u32 cfg_val; 9208 9209 /* Wait for link training to complete. */ 9210 for (j = 0; j < 5000; j++) 9211 udelay(100); 9212 9213 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); 9214 pci_write_config_dword(tp->pdev, 0xc4, 9215 cfg_val | (1 << 15)); 9216 } 9217 9218 /* Clear the "no snoop" and "relaxed ordering" bits. */ 9219 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN; 9220 /* 9221 * Older PCIe devices only support the 128 byte 9222 * MPS setting. Enforce the restriction. 9223 */ 9224 if (!tg3_flag(tp, CPMU_PRESENT)) 9225 val16 |= PCI_EXP_DEVCTL_PAYLOAD; 9226 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16); 9227 9228 /* Clear error status */ 9229 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA, 9230 PCI_EXP_DEVSTA_CED | 9231 PCI_EXP_DEVSTA_NFED | 9232 PCI_EXP_DEVSTA_FED | 9233 PCI_EXP_DEVSTA_URD); 9234 } 9235 9236 tg3_restore_pci_state(tp); 9237 9238 tg3_flag_clear(tp, CHIP_RESETTING); 9239 tg3_flag_clear(tp, ERROR_PROCESSED); 9240 9241 val = 0; 9242 if (tg3_flag(tp, 5780_CLASS)) 9243 val = tr32(MEMARB_MODE); 9244 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 9245 9246 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) { 9247 tg3_stop_fw(tp); 9248 tw32(0x5000, 0x400); 9249 } 9250 9251 if (tg3_flag(tp, IS_SSB_CORE)) { 9252 /* 9253 * BCM4785: In order to avoid repercussions from using 9254 * potentially defective internal ROM, stop the Rx RISC CPU, 9255 * which is not required. 9256 */ 9257 tg3_stop_fw(tp); 9258 tg3_halt_cpu(tp, RX_CPU_BASE); 9259 } 9260 9261 err = tg3_poll_fw(tp); 9262 if (err) 9263 return err; 9264 9265 tw32(GRC_MODE, tp->grc_mode); 9266 9267 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) { 9268 val = tr32(0xc4); 9269 9270 tw32(0xc4, val | (1 << 15)); 9271 } 9272 9273 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && 9274 tg3_asic_rev(tp) == ASIC_REV_5705) { 9275 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; 9276 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) 9277 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; 9278 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 9279 } 9280 9281 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 9282 tp->mac_mode = MAC_MODE_PORT_MODE_TBI; 9283 val = tp->mac_mode; 9284 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 9285 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 9286 val = tp->mac_mode; 9287 } else 9288 val = 0; 9289 9290 tw32_f(MAC_MODE, val); 9291 udelay(40); 9292 9293 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); 9294 9295 tg3_mdio_start(tp); 9296 9297 if (tg3_flag(tp, PCI_EXPRESS) && 9298 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 9299 tg3_asic_rev(tp) != ASIC_REV_5785 && 9300 !tg3_flag(tp, 57765_PLUS)) { 9301 val = tr32(0x7c00); 9302 9303 tw32(0x7c00, val | (1 << 25)); 9304 } 9305 9306 tg3_restore_clk(tp); 9307 9308 /* Increase the core clock speed to fix tx timeout issue for 5762 9309 * with 100Mbps link speed. 9310 */ 9311 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 9312 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9313 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 9314 TG3_CPMU_MAC_ORIDE_ENABLE); 9315 } 9316 9317 /* Reprobe ASF enable state. */ 9318 tg3_flag_clear(tp, ENABLE_ASF); 9319 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 9320 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 9321 9322 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); 9323 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 9324 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 9325 u32 nic_cfg; 9326 9327 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 9328 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 9329 tg3_flag_set(tp, ENABLE_ASF); 9330 tp->last_event_jiffies = jiffies; 9331 if (tg3_flag(tp, 5750_PLUS)) 9332 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 9333 9334 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg); 9335 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK) 9336 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 9337 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID) 9338 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 9339 } 9340 } 9341 9342 return 0; 9343 } 9344 9345 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *); 9346 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *); 9347 static void __tg3_set_rx_mode(struct net_device *); 9348 9349 /* tp->lock is held. */ 9350 static int tg3_halt(struct tg3 *tp, int kind, bool silent) 9351 { 9352 int err; 9353 9354 tg3_stop_fw(tp); 9355 9356 tg3_write_sig_pre_reset(tp, kind); 9357 9358 tg3_abort_hw(tp, silent); 9359 err = tg3_chip_reset(tp); 9360 9361 __tg3_set_mac_addr(tp, false); 9362 9363 tg3_write_sig_legacy(tp, kind); 9364 tg3_write_sig_post_reset(tp, kind); 9365 9366 if (tp->hw_stats) { 9367 /* Save the stats across chip resets... */ 9368 tg3_get_nstats(tp, &tp->net_stats_prev); 9369 tg3_get_estats(tp, &tp->estats_prev); 9370 9371 /* And make sure the next sample is new data */ 9372 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); 9373 } 9374 9375 return err; 9376 } 9377 9378 static int tg3_set_mac_addr(struct net_device *dev, void *p) 9379 { 9380 struct tg3 *tp = netdev_priv(dev); 9381 struct sockaddr *addr = p; 9382 int err = 0; 9383 bool skip_mac_1 = false; 9384 9385 if (!is_valid_ether_addr(addr->sa_data)) 9386 return -EADDRNOTAVAIL; 9387 9388 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 9389 9390 if (!netif_running(dev)) 9391 return 0; 9392 9393 if (tg3_flag(tp, ENABLE_ASF)) { 9394 u32 addr0_high, addr0_low, addr1_high, addr1_low; 9395 9396 addr0_high = tr32(MAC_ADDR_0_HIGH); 9397 addr0_low = tr32(MAC_ADDR_0_LOW); 9398 addr1_high = tr32(MAC_ADDR_1_HIGH); 9399 addr1_low = tr32(MAC_ADDR_1_LOW); 9400 9401 /* Skip MAC addr 1 if ASF is using it. */ 9402 if ((addr0_high != addr1_high || addr0_low != addr1_low) && 9403 !(addr1_high == 0 && addr1_low == 0)) 9404 skip_mac_1 = true; 9405 } 9406 spin_lock_bh(&tp->lock); 9407 __tg3_set_mac_addr(tp, skip_mac_1); 9408 __tg3_set_rx_mode(dev); 9409 spin_unlock_bh(&tp->lock); 9410 9411 return err; 9412 } 9413 9414 /* tp->lock is held. */ 9415 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, 9416 dma_addr_t mapping, u32 maxlen_flags, 9417 u32 nic_addr) 9418 { 9419 tg3_write_mem(tp, 9420 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), 9421 ((u64) mapping >> 32)); 9422 tg3_write_mem(tp, 9423 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), 9424 ((u64) mapping & 0xffffffff)); 9425 tg3_write_mem(tp, 9426 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), 9427 maxlen_flags); 9428 9429 if (!tg3_flag(tp, 5705_PLUS)) 9430 tg3_write_mem(tp, 9431 (bdinfo_addr + TG3_BDINFO_NIC_ADDR), 9432 nic_addr); 9433 } 9434 9435 9436 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9437 { 9438 int i = 0; 9439 9440 if (!tg3_flag(tp, ENABLE_TSS)) { 9441 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); 9442 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); 9443 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); 9444 } else { 9445 tw32(HOSTCC_TXCOL_TICKS, 0); 9446 tw32(HOSTCC_TXMAX_FRAMES, 0); 9447 tw32(HOSTCC_TXCOAL_MAXF_INT, 0); 9448 9449 for (; i < tp->txq_cnt; i++) { 9450 u32 reg; 9451 9452 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; 9453 tw32(reg, ec->tx_coalesce_usecs); 9454 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; 9455 tw32(reg, ec->tx_max_coalesced_frames); 9456 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; 9457 tw32(reg, ec->tx_max_coalesced_frames_irq); 9458 } 9459 } 9460 9461 for (; i < tp->irq_max - 1; i++) { 9462 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); 9463 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); 9464 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9465 } 9466 } 9467 9468 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9469 { 9470 int i = 0; 9471 u32 limit = tp->rxq_cnt; 9472 9473 if (!tg3_flag(tp, ENABLE_RSS)) { 9474 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); 9475 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); 9476 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); 9477 limit--; 9478 } else { 9479 tw32(HOSTCC_RXCOL_TICKS, 0); 9480 tw32(HOSTCC_RXMAX_FRAMES, 0); 9481 tw32(HOSTCC_RXCOAL_MAXF_INT, 0); 9482 } 9483 9484 for (; i < limit; i++) { 9485 u32 reg; 9486 9487 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; 9488 tw32(reg, ec->rx_coalesce_usecs); 9489 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; 9490 tw32(reg, ec->rx_max_coalesced_frames); 9491 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; 9492 tw32(reg, ec->rx_max_coalesced_frames_irq); 9493 } 9494 9495 for (; i < tp->irq_max - 1; i++) { 9496 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); 9497 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); 9498 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9499 } 9500 } 9501 9502 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) 9503 { 9504 tg3_coal_tx_init(tp, ec); 9505 tg3_coal_rx_init(tp, ec); 9506 9507 if (!tg3_flag(tp, 5705_PLUS)) { 9508 u32 val = ec->stats_block_coalesce_usecs; 9509 9510 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); 9511 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); 9512 9513 if (!tp->link_up) 9514 val = 0; 9515 9516 tw32(HOSTCC_STAT_COAL_TICKS, val); 9517 } 9518 } 9519 9520 /* tp->lock is held. */ 9521 static void tg3_tx_rcbs_disable(struct tg3 *tp) 9522 { 9523 u32 txrcb, limit; 9524 9525 /* Disable all transmit rings but the first. */ 9526 if (!tg3_flag(tp, 5705_PLUS)) 9527 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; 9528 else if (tg3_flag(tp, 5717_PLUS)) 9529 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; 9530 else if (tg3_flag(tp, 57765_CLASS) || 9531 tg3_asic_rev(tp) == ASIC_REV_5762) 9532 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; 9533 else 9534 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9535 9536 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9537 txrcb < limit; txrcb += TG3_BDINFO_SIZE) 9538 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, 9539 BDINFO_FLAGS_DISABLED); 9540 } 9541 9542 /* tp->lock is held. */ 9543 static void tg3_tx_rcbs_init(struct tg3 *tp) 9544 { 9545 int i = 0; 9546 u32 txrcb = NIC_SRAM_SEND_RCB; 9547 9548 if (tg3_flag(tp, ENABLE_TSS)) 9549 i++; 9550 9551 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { 9552 struct tg3_napi *tnapi = &tp->napi[i]; 9553 9554 if (!tnapi->tx_ring) 9555 continue; 9556 9557 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, 9558 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), 9559 NIC_SRAM_TX_BUFFER_DESC); 9560 } 9561 } 9562 9563 /* tp->lock is held. */ 9564 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp) 9565 { 9566 u32 rxrcb, limit; 9567 9568 /* Disable all receive return rings but the first. */ 9569 if (tg3_flag(tp, 5717_PLUS)) 9570 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; 9571 else if (!tg3_flag(tp, 5705_PLUS)) 9572 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; 9573 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9574 tg3_asic_rev(tp) == ASIC_REV_5762 || 9575 tg3_flag(tp, 57765_CLASS)) 9576 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; 9577 else 9578 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9579 9580 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9581 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) 9582 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, 9583 BDINFO_FLAGS_DISABLED); 9584 } 9585 9586 /* tp->lock is held. */ 9587 static void tg3_rx_ret_rcbs_init(struct tg3 *tp) 9588 { 9589 int i = 0; 9590 u32 rxrcb = NIC_SRAM_RCV_RET_RCB; 9591 9592 if (tg3_flag(tp, ENABLE_RSS)) 9593 i++; 9594 9595 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { 9596 struct tg3_napi *tnapi = &tp->napi[i]; 9597 9598 if (!tnapi->rx_rcb) 9599 continue; 9600 9601 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, 9602 (tp->rx_ret_ring_mask + 1) << 9603 BDINFO_FLAGS_MAXLEN_SHIFT, 0); 9604 } 9605 } 9606 9607 /* tp->lock is held. */ 9608 static void tg3_rings_reset(struct tg3 *tp) 9609 { 9610 int i; 9611 u32 stblk; 9612 struct tg3_napi *tnapi = &tp->napi[0]; 9613 9614 tg3_tx_rcbs_disable(tp); 9615 9616 tg3_rx_ret_rcbs_disable(tp); 9617 9618 /* Disable interrupts */ 9619 tw32_mailbox_f(tp->napi[0].int_mbox, 1); 9620 tp->napi[0].chk_msi_cnt = 0; 9621 tp->napi[0].last_rx_cons = 0; 9622 tp->napi[0].last_tx_cons = 0; 9623 9624 /* Zero mailbox registers. */ 9625 if (tg3_flag(tp, SUPPORT_MSIX)) { 9626 for (i = 1; i < tp->irq_max; i++) { 9627 tp->napi[i].tx_prod = 0; 9628 tp->napi[i].tx_cons = 0; 9629 if (tg3_flag(tp, ENABLE_TSS)) 9630 tw32_mailbox(tp->napi[i].prodmbox, 0); 9631 tw32_rx_mbox(tp->napi[i].consmbox, 0); 9632 tw32_mailbox_f(tp->napi[i].int_mbox, 1); 9633 tp->napi[i].chk_msi_cnt = 0; 9634 tp->napi[i].last_rx_cons = 0; 9635 tp->napi[i].last_tx_cons = 0; 9636 } 9637 if (!tg3_flag(tp, ENABLE_TSS)) 9638 tw32_mailbox(tp->napi[0].prodmbox, 0); 9639 } else { 9640 tp->napi[0].tx_prod = 0; 9641 tp->napi[0].tx_cons = 0; 9642 tw32_mailbox(tp->napi[0].prodmbox, 0); 9643 tw32_rx_mbox(tp->napi[0].consmbox, 0); 9644 } 9645 9646 /* Make sure the NIC-based send BD rings are disabled. */ 9647 if (!tg3_flag(tp, 5705_PLUS)) { 9648 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; 9649 for (i = 0; i < 16; i++) 9650 tw32_tx_mbox(mbox + i * 8, 0); 9651 } 9652 9653 /* Clear status block in ram. */ 9654 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9655 9656 /* Set status block DMA address */ 9657 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 9658 ((u64) tnapi->status_mapping >> 32)); 9659 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 9660 ((u64) tnapi->status_mapping & 0xffffffff)); 9661 9662 stblk = HOSTCC_STATBLCK_RING1; 9663 9664 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { 9665 u64 mapping = (u64)tnapi->status_mapping; 9666 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); 9667 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); 9668 stblk += 8; 9669 9670 /* Clear status block in ram. */ 9671 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9672 } 9673 9674 tg3_tx_rcbs_init(tp); 9675 tg3_rx_ret_rcbs_init(tp); 9676 } 9677 9678 static void tg3_setup_rxbd_thresholds(struct tg3 *tp) 9679 { 9680 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; 9681 9682 if (!tg3_flag(tp, 5750_PLUS) || 9683 tg3_flag(tp, 5780_CLASS) || 9684 tg3_asic_rev(tp) == ASIC_REV_5750 || 9685 tg3_asic_rev(tp) == ASIC_REV_5752 || 9686 tg3_flag(tp, 57765_PLUS)) 9687 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; 9688 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9689 tg3_asic_rev(tp) == ASIC_REV_5787) 9690 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; 9691 else 9692 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; 9693 9694 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); 9695 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); 9696 9697 val = min(nic_rep_thresh, host_rep_thresh); 9698 tw32(RCVBDI_STD_THRESH, val); 9699 9700 if (tg3_flag(tp, 57765_PLUS)) 9701 tw32(STD_REPLENISH_LWM, bdcache_maxcnt); 9702 9703 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 9704 return; 9705 9706 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; 9707 9708 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); 9709 9710 val = min(bdcache_maxcnt / 2, host_rep_thresh); 9711 tw32(RCVBDI_JUMBO_THRESH, val); 9712 9713 if (tg3_flag(tp, 57765_PLUS)) 9714 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); 9715 } 9716 9717 static inline u32 calc_crc(unsigned char *buf, int len) 9718 { 9719 u32 reg; 9720 u32 tmp; 9721 int j, k; 9722 9723 reg = 0xffffffff; 9724 9725 for (j = 0; j < len; j++) { 9726 reg ^= buf[j]; 9727 9728 for (k = 0; k < 8; k++) { 9729 tmp = reg & 0x01; 9730 9731 reg >>= 1; 9732 9733 if (tmp) 9734 reg ^= CRC32_POLY_LE; 9735 } 9736 } 9737 9738 return ~reg; 9739 } 9740 9741 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) 9742 { 9743 /* accept or reject all multicast frames */ 9744 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); 9745 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); 9746 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); 9747 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); 9748 } 9749 9750 static void __tg3_set_rx_mode(struct net_device *dev) 9751 { 9752 struct tg3 *tp = netdev_priv(dev); 9753 u32 rx_mode; 9754 9755 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | 9756 RX_MODE_KEEP_VLAN_TAG); 9757 9758 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) 9759 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 9760 * flag clear. 9761 */ 9762 if (!tg3_flag(tp, ENABLE_ASF)) 9763 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 9764 #endif 9765 9766 if (dev->flags & IFF_PROMISC) { 9767 /* Promiscuous mode. */ 9768 rx_mode |= RX_MODE_PROMISC; 9769 } else if (dev->flags & IFF_ALLMULTI) { 9770 /* Accept all multicast. */ 9771 tg3_set_multi(tp, 1); 9772 } else if (netdev_mc_empty(dev)) { 9773 /* Reject all multicast. */ 9774 tg3_set_multi(tp, 0); 9775 } else { 9776 /* Accept one or more multicast(s). */ 9777 struct netdev_hw_addr *ha; 9778 u32 mc_filter[4] = { 0, }; 9779 u32 regidx; 9780 u32 bit; 9781 u32 crc; 9782 9783 netdev_for_each_mc_addr(ha, dev) { 9784 crc = calc_crc(ha->addr, ETH_ALEN); 9785 bit = ~crc & 0x7f; 9786 regidx = (bit & 0x60) >> 5; 9787 bit &= 0x1f; 9788 mc_filter[regidx] |= (1 << bit); 9789 } 9790 9791 tw32(MAC_HASH_REG_0, mc_filter[0]); 9792 tw32(MAC_HASH_REG_1, mc_filter[1]); 9793 tw32(MAC_HASH_REG_2, mc_filter[2]); 9794 tw32(MAC_HASH_REG_3, mc_filter[3]); 9795 } 9796 9797 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) { 9798 rx_mode |= RX_MODE_PROMISC; 9799 } else if (!(dev->flags & IFF_PROMISC)) { 9800 /* Add all entries into to the mac addr filter list */ 9801 int i = 0; 9802 struct netdev_hw_addr *ha; 9803 9804 netdev_for_each_uc_addr(ha, dev) { 9805 __tg3_set_one_mac_addr(tp, ha->addr, 9806 i + TG3_UCAST_ADDR_IDX(tp)); 9807 i++; 9808 } 9809 } 9810 9811 if (rx_mode != tp->rx_mode) { 9812 tp->rx_mode = rx_mode; 9813 tw32_f(MAC_RX_MODE, rx_mode); 9814 udelay(10); 9815 } 9816 } 9817 9818 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt) 9819 { 9820 int i; 9821 9822 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 9823 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt); 9824 } 9825 9826 static void tg3_rss_check_indir_tbl(struct tg3 *tp) 9827 { 9828 int i; 9829 9830 if (!tg3_flag(tp, SUPPORT_MSIX)) 9831 return; 9832 9833 if (tp->rxq_cnt == 1) { 9834 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); 9835 return; 9836 } 9837 9838 /* Validate table against current IRQ count */ 9839 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { 9840 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt) 9841 break; 9842 } 9843 9844 if (i != TG3_RSS_INDIR_TBL_SIZE) 9845 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt); 9846 } 9847 9848 static void tg3_rss_write_indir_tbl(struct tg3 *tp) 9849 { 9850 int i = 0; 9851 u32 reg = MAC_RSS_INDIR_TBL_0; 9852 9853 while (i < TG3_RSS_INDIR_TBL_SIZE) { 9854 u32 val = tp->rss_ind_tbl[i]; 9855 i++; 9856 for (; i % 8; i++) { 9857 val <<= 4; 9858 val |= tp->rss_ind_tbl[i]; 9859 } 9860 tw32(reg, val); 9861 reg += 4; 9862 } 9863 } 9864 9865 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp) 9866 { 9867 if (tg3_asic_rev(tp) == ASIC_REV_5719) 9868 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719; 9869 else 9870 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720; 9871 } 9872 9873 /* tp->lock is held. */ 9874 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) 9875 { 9876 u32 val, rdmac_mode; 9877 int i, err, limit; 9878 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 9879 9880 tg3_disable_ints(tp); 9881 9882 tg3_stop_fw(tp); 9883 9884 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); 9885 9886 if (tg3_flag(tp, INIT_COMPLETE)) 9887 tg3_abort_hw(tp, 1); 9888 9889 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 9890 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { 9891 tg3_phy_pull_config(tp); 9892 tg3_eee_pull_config(tp, NULL); 9893 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 9894 } 9895 9896 /* Enable MAC control of LPI */ 9897 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 9898 tg3_setup_eee(tp); 9899 9900 if (reset_phy) 9901 tg3_phy_reset(tp); 9902 9903 err = tg3_chip_reset(tp); 9904 if (err) 9905 return err; 9906 9907 tg3_write_sig_legacy(tp, RESET_KIND_INIT); 9908 9909 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 9910 val = tr32(TG3_CPMU_CTRL); 9911 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); 9912 tw32(TG3_CPMU_CTRL, val); 9913 9914 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 9915 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 9916 val |= CPMU_LSPD_10MB_MACCLK_6_25; 9917 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 9918 9919 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); 9920 val &= ~CPMU_LNK_AWARE_MACCLK_MASK; 9921 val |= CPMU_LNK_AWARE_MACCLK_6_25; 9922 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); 9923 9924 val = tr32(TG3_CPMU_HST_ACC); 9925 val &= ~CPMU_HST_ACC_MACCLK_MASK; 9926 val |= CPMU_HST_ACC_MACCLK_6_25; 9927 tw32(TG3_CPMU_HST_ACC, val); 9928 } 9929 9930 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9931 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; 9932 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | 9933 PCIE_PWR_MGMT_L1_THRESH_4MS; 9934 tw32(PCIE_PWR_MGMT_THRESH, val); 9935 9936 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; 9937 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); 9938 9939 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); 9940 9941 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9942 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9943 } 9944 9945 if (tg3_flag(tp, L1PLLPD_EN)) { 9946 u32 grc_mode = tr32(GRC_MODE); 9947 9948 /* Access the lower 1K of PL PCIE block registers. */ 9949 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9950 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 9951 9952 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); 9953 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, 9954 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); 9955 9956 tw32(GRC_MODE, grc_mode); 9957 } 9958 9959 if (tg3_flag(tp, 57765_CLASS)) { 9960 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 9961 u32 grc_mode = tr32(GRC_MODE); 9962 9963 /* Access the lower 1K of PL PCIE block registers. */ 9964 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9965 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 9966 9967 val = tr32(TG3_PCIE_TLDLPL_PORT + 9968 TG3_PCIE_PL_LO_PHYCTL5); 9969 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, 9970 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); 9971 9972 tw32(GRC_MODE, grc_mode); 9973 } 9974 9975 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) { 9976 u32 grc_mode; 9977 9978 /* Fix transmit hangs */ 9979 val = tr32(TG3_CPMU_PADRNG_CTL); 9980 val |= TG3_CPMU_PADRNG_CTL_RDIV2; 9981 tw32(TG3_CPMU_PADRNG_CTL, val); 9982 9983 grc_mode = tr32(GRC_MODE); 9984 9985 /* Access the lower 1K of DL PCIE block registers. */ 9986 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9987 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); 9988 9989 val = tr32(TG3_PCIE_TLDLPL_PORT + 9990 TG3_PCIE_DL_LO_FTSMAX); 9991 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; 9992 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, 9993 val | TG3_PCIE_DL_LO_FTSMAX_VAL); 9994 9995 tw32(GRC_MODE, grc_mode); 9996 } 9997 9998 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 9999 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 10000 val |= CPMU_LSPD_10MB_MACCLK_6_25; 10001 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 10002 } 10003 10004 /* This works around an issue with Athlon chipsets on 10005 * B3 tigon3 silicon. This bit has no effect on any 10006 * other revision. But do not set this on PCI Express 10007 * chips and don't even touch the clocks if the CPMU is present. 10008 */ 10009 if (!tg3_flag(tp, CPMU_PRESENT)) { 10010 if (!tg3_flag(tp, PCI_EXPRESS)) 10011 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; 10012 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 10013 } 10014 10015 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 10016 tg3_flag(tp, PCIX_MODE)) { 10017 val = tr32(TG3PCI_PCISTATE); 10018 val |= PCISTATE_RETRY_SAME_DMA; 10019 tw32(TG3PCI_PCISTATE, val); 10020 } 10021 10022 if (tg3_flag(tp, ENABLE_APE)) { 10023 /* Allow reads and writes to the 10024 * APE register and memory space. 10025 */ 10026 val = tr32(TG3PCI_PCISTATE); 10027 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 10028 PCISTATE_ALLOW_APE_SHMEM_WR | 10029 PCISTATE_ALLOW_APE_PSPACE_WR; 10030 tw32(TG3PCI_PCISTATE, val); 10031 } 10032 10033 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) { 10034 /* Enable some hw fixes. */ 10035 val = tr32(TG3PCI_MSI_DATA); 10036 val |= (1 << 26) | (1 << 28) | (1 << 29); 10037 tw32(TG3PCI_MSI_DATA, val); 10038 } 10039 10040 /* Descriptor ring init may make accesses to the 10041 * NIC SRAM area to setup the TX descriptors, so we 10042 * can only do this after the hardware has been 10043 * successfully reset. 10044 */ 10045 err = tg3_init_rings(tp); 10046 if (err) 10047 return err; 10048 10049 if (tg3_flag(tp, 57765_PLUS)) { 10050 val = tr32(TG3PCI_DMA_RW_CTRL) & 10051 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 10052 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 10053 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; 10054 if (!tg3_flag(tp, 57765_CLASS) && 10055 tg3_asic_rev(tp) != ASIC_REV_5717 && 10056 tg3_asic_rev(tp) != ASIC_REV_5762) 10057 val |= DMA_RWCTRL_TAGGED_STAT_WA; 10058 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); 10059 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 && 10060 tg3_asic_rev(tp) != ASIC_REV_5761) { 10061 /* This value is determined during the probe time DMA 10062 * engine test, tg3_test_dma. 10063 */ 10064 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 10065 } 10066 10067 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | 10068 GRC_MODE_4X_NIC_SEND_RINGS | 10069 GRC_MODE_NO_TX_PHDR_CSUM | 10070 GRC_MODE_NO_RX_PHDR_CSUM); 10071 tp->grc_mode |= GRC_MODE_HOST_SENDBDS; 10072 10073 /* Pseudo-header checksum is done by hardware logic and not 10074 * the offload processers, so make the chip do the pseudo- 10075 * header checksums on receive. For transmit it is more 10076 * convenient to do the pseudo-header checksum in software 10077 * as Linux does that on transmit for us in all cases. 10078 */ 10079 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; 10080 10081 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP; 10082 if (tp->rxptpctl) 10083 tw32(TG3_RX_PTP_CTL, 10084 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 10085 10086 if (tg3_flag(tp, PTP_CAPABLE)) 10087 val |= GRC_MODE_TIME_SYNC_ENABLE; 10088 10089 tw32(GRC_MODE, tp->grc_mode | val); 10090 10091 /* On one of the AMD platform, MRRS is restricted to 4000 because of 10092 * south bridge limitation. As a workaround, Driver is setting MRRS 10093 * to 2048 instead of default 4096. 10094 */ 10095 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 10096 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) { 10097 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK; 10098 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048); 10099 } 10100 10101 /* Setup the timer prescalar register. Clock is always 66Mhz. */ 10102 val = tr32(GRC_MISC_CFG); 10103 val &= ~0xff; 10104 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); 10105 tw32(GRC_MISC_CFG, val); 10106 10107 /* Initialize MBUF/DESC pool. */ 10108 if (tg3_flag(tp, 5750_PLUS)) { 10109 /* Do nothing. */ 10110 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) { 10111 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); 10112 if (tg3_asic_rev(tp) == ASIC_REV_5704) 10113 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); 10114 else 10115 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); 10116 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); 10117 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); 10118 } else if (tg3_flag(tp, TSO_CAPABLE)) { 10119 int fw_len; 10120 10121 fw_len = tp->fw_len; 10122 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); 10123 tw32(BUFMGR_MB_POOL_ADDR, 10124 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); 10125 tw32(BUFMGR_MB_POOL_SIZE, 10126 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); 10127 } 10128 10129 if (tp->dev->mtu <= ETH_DATA_LEN) { 10130 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10131 tp->bufmgr_config.mbuf_read_dma_low_water); 10132 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10133 tp->bufmgr_config.mbuf_mac_rx_low_water); 10134 tw32(BUFMGR_MB_HIGH_WATER, 10135 tp->bufmgr_config.mbuf_high_water); 10136 } else { 10137 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10138 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); 10139 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10140 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); 10141 tw32(BUFMGR_MB_HIGH_WATER, 10142 tp->bufmgr_config.mbuf_high_water_jumbo); 10143 } 10144 tw32(BUFMGR_DMA_LOW_WATER, 10145 tp->bufmgr_config.dma_low_water); 10146 tw32(BUFMGR_DMA_HIGH_WATER, 10147 tp->bufmgr_config.dma_high_water); 10148 10149 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; 10150 if (tg3_asic_rev(tp) == ASIC_REV_5719) 10151 val |= BUFMGR_MODE_NO_TX_UNDERRUN; 10152 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10153 tg3_asic_rev(tp) == ASIC_REV_5762 || 10154 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10155 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) 10156 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; 10157 tw32(BUFMGR_MODE, val); 10158 for (i = 0; i < 2000; i++) { 10159 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) 10160 break; 10161 udelay(10); 10162 } 10163 if (i >= 2000) { 10164 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); 10165 return -ENODEV; 10166 } 10167 10168 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1) 10169 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); 10170 10171 tg3_setup_rxbd_thresholds(tp); 10172 10173 /* Initialize TG3_BDINFO's at: 10174 * RCVDBDI_STD_BD: standard eth size rx ring 10175 * RCVDBDI_JUMBO_BD: jumbo frame rx ring 10176 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) 10177 * 10178 * like so: 10179 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring 10180 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | 10181 * ring attribute flags 10182 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM 10183 * 10184 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. 10185 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. 10186 * 10187 * The size of each ring is fixed in the firmware, but the location is 10188 * configurable. 10189 */ 10190 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10191 ((u64) tpr->rx_std_mapping >> 32)); 10192 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10193 ((u64) tpr->rx_std_mapping & 0xffffffff)); 10194 if (!tg3_flag(tp, 5717_PLUS)) 10195 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 10196 NIC_SRAM_RX_BUFFER_DESC); 10197 10198 /* Disable the mini ring */ 10199 if (!tg3_flag(tp, 5705_PLUS)) 10200 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, 10201 BDINFO_FLAGS_DISABLED); 10202 10203 /* Program the jumbo buffer descriptor ring control 10204 * blocks on those devices that have them. 10205 */ 10206 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10207 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { 10208 10209 if (tg3_flag(tp, JUMBO_RING_ENABLE)) { 10210 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10211 ((u64) tpr->rx_jmb_mapping >> 32)); 10212 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10213 ((u64) tpr->rx_jmb_mapping & 0xffffffff)); 10214 val = TG3_RX_JMB_RING_SIZE(tp) << 10215 BDINFO_FLAGS_MAXLEN_SHIFT; 10216 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10217 val | BDINFO_FLAGS_USE_EXT_RECV); 10218 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || 10219 tg3_flag(tp, 57765_CLASS) || 10220 tg3_asic_rev(tp) == ASIC_REV_5762) 10221 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 10222 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 10223 } else { 10224 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10225 BDINFO_FLAGS_DISABLED); 10226 } 10227 10228 if (tg3_flag(tp, 57765_PLUS)) { 10229 val = TG3_RX_STD_RING_SIZE(tp); 10230 val <<= BDINFO_FLAGS_MAXLEN_SHIFT; 10231 val |= (TG3_RX_STD_DMA_SZ << 2); 10232 } else 10233 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; 10234 } else 10235 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; 10236 10237 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); 10238 10239 tpr->rx_std_prod_idx = tp->rx_pending; 10240 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); 10241 10242 tpr->rx_jmb_prod_idx = 10243 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; 10244 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); 10245 10246 tg3_rings_reset(tp); 10247 10248 /* Initialize MAC address and backoff seed. */ 10249 __tg3_set_mac_addr(tp, false); 10250 10251 /* MTU + ethernet header + FCS + optional VLAN tag */ 10252 tw32(MAC_RX_MTU_SIZE, 10253 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 10254 10255 /* The slot time is changed by tg3_setup_phy if we 10256 * run at gigabit with half duplex. 10257 */ 10258 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 10259 (6 << TX_LENGTHS_IPG_SHIFT) | 10260 (32 << TX_LENGTHS_SLOT_TIME_SHIFT); 10261 10262 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10263 tg3_asic_rev(tp) == ASIC_REV_5762) 10264 val |= tr32(MAC_TX_LENGTHS) & 10265 (TX_LENGTHS_JMB_FRM_LEN_MSK | 10266 TX_LENGTHS_CNT_DWN_VAL_MSK); 10267 10268 tw32(MAC_TX_LENGTHS, val); 10269 10270 /* Receive rules. */ 10271 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); 10272 tw32(RCVLPC_CONFIG, 0x0181); 10273 10274 /* Calculate RDMAC_MODE setting early, we need it to determine 10275 * the RCVLPC_STATE_ENABLE mask. 10276 */ 10277 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | 10278 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | 10279 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | 10280 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 10281 RDMAC_MODE_LNGREAD_ENAB); 10282 10283 if (tg3_asic_rev(tp) == ASIC_REV_5717) 10284 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; 10285 10286 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 10287 tg3_asic_rev(tp) == ASIC_REV_5785 || 10288 tg3_asic_rev(tp) == ASIC_REV_57780) 10289 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | 10290 RDMAC_MODE_MBUF_RBD_CRPT_ENAB | 10291 RDMAC_MODE_MBUF_SBD_CRPT_ENAB; 10292 10293 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10294 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10295 if (tg3_flag(tp, TSO_CAPABLE) && 10296 tg3_asic_rev(tp) == ASIC_REV_5705) { 10297 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; 10298 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10299 !tg3_flag(tp, IS_5788)) { 10300 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10301 } 10302 } 10303 10304 if (tg3_flag(tp, PCI_EXPRESS)) 10305 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10306 10307 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10308 tp->dma_limit = 0; 10309 if (tp->dev->mtu <= ETH_DATA_LEN) { 10310 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; 10311 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K; 10312 } 10313 } 10314 10315 if (tg3_flag(tp, HW_TSO_1) || 10316 tg3_flag(tp, HW_TSO_2) || 10317 tg3_flag(tp, HW_TSO_3)) 10318 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; 10319 10320 if (tg3_flag(tp, 57765_PLUS) || 10321 tg3_asic_rev(tp) == ASIC_REV_5785 || 10322 tg3_asic_rev(tp) == ASIC_REV_57780) 10323 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 10324 10325 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10326 tg3_asic_rev(tp) == ASIC_REV_5762) 10327 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; 10328 10329 if (tg3_asic_rev(tp) == ASIC_REV_5761 || 10330 tg3_asic_rev(tp) == ASIC_REV_5784 || 10331 tg3_asic_rev(tp) == ASIC_REV_5785 || 10332 tg3_asic_rev(tp) == ASIC_REV_57780 || 10333 tg3_flag(tp, 57765_PLUS)) { 10334 u32 tgtreg; 10335 10336 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10337 tgtreg = TG3_RDMA_RSRVCTRL_REG2; 10338 else 10339 tgtreg = TG3_RDMA_RSRVCTRL_REG; 10340 10341 val = tr32(tgtreg); 10342 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10343 tg3_asic_rev(tp) == ASIC_REV_5762) { 10344 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | 10345 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | 10346 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); 10347 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | 10348 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 10349 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; 10350 } 10351 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 10352 } 10353 10354 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10355 tg3_asic_rev(tp) == ASIC_REV_5720 || 10356 tg3_asic_rev(tp) == ASIC_REV_5762) { 10357 u32 tgtreg; 10358 10359 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10360 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2; 10361 else 10362 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL; 10363 10364 val = tr32(tgtreg); 10365 tw32(tgtreg, val | 10366 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | 10367 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); 10368 } 10369 10370 /* Receive/send statistics. */ 10371 if (tg3_flag(tp, 5750_PLUS)) { 10372 val = tr32(RCVLPC_STATS_ENABLE); 10373 val &= ~RCVLPC_STATSENAB_DACK_FIX; 10374 tw32(RCVLPC_STATS_ENABLE, val); 10375 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && 10376 tg3_flag(tp, TSO_CAPABLE)) { 10377 val = tr32(RCVLPC_STATS_ENABLE); 10378 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; 10379 tw32(RCVLPC_STATS_ENABLE, val); 10380 } else { 10381 tw32(RCVLPC_STATS_ENABLE, 0xffffff); 10382 } 10383 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); 10384 tw32(SNDDATAI_STATSENAB, 0xffffff); 10385 tw32(SNDDATAI_STATSCTRL, 10386 (SNDDATAI_SCTRL_ENABLE | 10387 SNDDATAI_SCTRL_FASTUPD)); 10388 10389 /* Setup host coalescing engine. */ 10390 tw32(HOSTCC_MODE, 0); 10391 for (i = 0; i < 2000; i++) { 10392 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) 10393 break; 10394 udelay(10); 10395 } 10396 10397 __tg3_set_coalesce(tp, &tp->coal); 10398 10399 if (!tg3_flag(tp, 5705_PLUS)) { 10400 /* Status/statistics block address. See tg3_timer, 10401 * the tg3_periodic_fetch_stats call there, and 10402 * tg3_get_stats to see how this works for 5705/5750 chips. 10403 */ 10404 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 10405 ((u64) tp->stats_mapping >> 32)); 10406 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 10407 ((u64) tp->stats_mapping & 0xffffffff)); 10408 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); 10409 10410 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); 10411 10412 /* Clear statistics and status block memory areas */ 10413 for (i = NIC_SRAM_STATS_BLK; 10414 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; 10415 i += sizeof(u32)) { 10416 tg3_write_mem(tp, i, 0); 10417 udelay(40); 10418 } 10419 } 10420 10421 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); 10422 10423 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); 10424 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); 10425 if (!tg3_flag(tp, 5705_PLUS)) 10426 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); 10427 10428 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 10429 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 10430 /* reset to prevent losing 1st rx packet intermittently */ 10431 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10432 udelay(10); 10433 } 10434 10435 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 10436 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | 10437 MAC_MODE_FHDE_ENABLE; 10438 if (tg3_flag(tp, ENABLE_APE)) 10439 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 10440 if (!tg3_flag(tp, 5705_PLUS) && 10441 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10442 tg3_asic_rev(tp) != ASIC_REV_5700) 10443 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 10444 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); 10445 udelay(40); 10446 10447 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). 10448 * If TG3_FLAG_IS_NIC is zero, we should read the 10449 * register to preserve the GPIO settings for LOMs. The GPIOs, 10450 * whether used as inputs or outputs, are set by boot code after 10451 * reset. 10452 */ 10453 if (!tg3_flag(tp, IS_NIC)) { 10454 u32 gpio_mask; 10455 10456 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | 10457 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | 10458 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; 10459 10460 if (tg3_asic_rev(tp) == ASIC_REV_5752) 10461 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | 10462 GRC_LCLCTRL_GPIO_OUTPUT3; 10463 10464 if (tg3_asic_rev(tp) == ASIC_REV_5755) 10465 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; 10466 10467 tp->grc_local_ctrl &= ~gpio_mask; 10468 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; 10469 10470 /* GPIO1 must be driven high for eeprom write protect */ 10471 if (tg3_flag(tp, EEPROM_WRITE_PROT)) 10472 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 10473 GRC_LCLCTRL_GPIO_OUTPUT1); 10474 } 10475 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10476 udelay(100); 10477 10478 if (tg3_flag(tp, USING_MSIX)) { 10479 val = tr32(MSGINT_MODE); 10480 val |= MSGINT_MODE_ENABLE; 10481 if (tp->irq_cnt > 1) 10482 val |= MSGINT_MODE_MULTIVEC_EN; 10483 if (!tg3_flag(tp, 1SHOT_MSI)) 10484 val |= MSGINT_MODE_ONE_SHOT_DISABLE; 10485 tw32(MSGINT_MODE, val); 10486 } 10487 10488 if (!tg3_flag(tp, 5705_PLUS)) { 10489 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 10490 udelay(40); 10491 } 10492 10493 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | 10494 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | 10495 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | 10496 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | 10497 WDMAC_MODE_LNGREAD_ENAB); 10498 10499 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10500 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10501 if (tg3_flag(tp, TSO_CAPABLE) && 10502 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 || 10503 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) { 10504 /* nothing */ 10505 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10506 !tg3_flag(tp, IS_5788)) { 10507 val |= WDMAC_MODE_RX_ACCEL; 10508 } 10509 } 10510 10511 /* Enable host coalescing bug fix */ 10512 if (tg3_flag(tp, 5755_PLUS)) 10513 val |= WDMAC_MODE_STATUS_TAG_FIX; 10514 10515 if (tg3_asic_rev(tp) == ASIC_REV_5785) 10516 val |= WDMAC_MODE_BURST_ALL_DATA; 10517 10518 tw32_f(WDMAC_MODE, val); 10519 udelay(40); 10520 10521 if (tg3_flag(tp, PCIX_MODE)) { 10522 u16 pcix_cmd; 10523 10524 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10525 &pcix_cmd); 10526 if (tg3_asic_rev(tp) == ASIC_REV_5703) { 10527 pcix_cmd &= ~PCI_X_CMD_MAX_READ; 10528 pcix_cmd |= PCI_X_CMD_READ_2K; 10529 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) { 10530 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); 10531 pcix_cmd |= PCI_X_CMD_READ_2K; 10532 } 10533 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10534 pcix_cmd); 10535 } 10536 10537 tw32_f(RDMAC_MODE, rdmac_mode); 10538 udelay(40); 10539 10540 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10541 tg3_asic_rev(tp) == ASIC_REV_5720) { 10542 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) { 10543 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp)) 10544 break; 10545 } 10546 if (i < TG3_NUM_RDMA_CHANNELS) { 10547 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10548 val |= tg3_lso_rd_dma_workaround_bit(tp); 10549 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10550 tg3_flag_set(tp, 5719_5720_RDMA_BUG); 10551 } 10552 } 10553 10554 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); 10555 if (!tg3_flag(tp, 5705_PLUS)) 10556 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); 10557 10558 if (tg3_asic_rev(tp) == ASIC_REV_5761) 10559 tw32(SNDDATAC_MODE, 10560 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); 10561 else 10562 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); 10563 10564 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); 10565 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); 10566 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; 10567 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 10568 val |= RCVDBDI_MODE_LRG_RING_SZ; 10569 tw32(RCVDBDI_MODE, val); 10570 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 10571 if (tg3_flag(tp, HW_TSO_1) || 10572 tg3_flag(tp, HW_TSO_2) || 10573 tg3_flag(tp, HW_TSO_3)) 10574 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); 10575 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; 10576 if (tg3_flag(tp, ENABLE_TSS)) 10577 val |= SNDBDI_MODE_MULTI_TXQ_EN; 10578 tw32(SNDBDI_MODE, val); 10579 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); 10580 10581 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 10582 err = tg3_load_5701_a0_firmware_fix(tp); 10583 if (err) 10584 return err; 10585 } 10586 10587 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10588 /* Ignore any errors for the firmware download. If download 10589 * fails, the device will operate with EEE disabled 10590 */ 10591 tg3_load_57766_firmware(tp); 10592 } 10593 10594 if (tg3_flag(tp, TSO_CAPABLE)) { 10595 err = tg3_load_tso_firmware(tp); 10596 if (err) 10597 return err; 10598 } 10599 10600 tp->tx_mode = TX_MODE_ENABLE; 10601 10602 if (tg3_flag(tp, 5755_PLUS) || 10603 tg3_asic_rev(tp) == ASIC_REV_5906) 10604 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; 10605 10606 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10607 tg3_asic_rev(tp) == ASIC_REV_5762) { 10608 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; 10609 tp->tx_mode &= ~val; 10610 tp->tx_mode |= tr32(MAC_TX_MODE) & val; 10611 } 10612 10613 tw32_f(MAC_TX_MODE, tp->tx_mode); 10614 udelay(100); 10615 10616 if (tg3_flag(tp, ENABLE_RSS)) { 10617 u32 rss_key[10]; 10618 10619 tg3_rss_write_indir_tbl(tp); 10620 10621 netdev_rss_key_fill(rss_key, 10 * sizeof(u32)); 10622 10623 for (i = 0; i < 10 ; i++) 10624 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]); 10625 } 10626 10627 tp->rx_mode = RX_MODE_ENABLE; 10628 if (tg3_flag(tp, 5755_PLUS)) 10629 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 10630 10631 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10632 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX; 10633 10634 if (tg3_flag(tp, ENABLE_RSS)) 10635 tp->rx_mode |= RX_MODE_RSS_ENABLE | 10636 RX_MODE_RSS_ITBL_HASH_BITS_7 | 10637 RX_MODE_RSS_IPV6_HASH_EN | 10638 RX_MODE_RSS_TCP_IPV6_HASH_EN | 10639 RX_MODE_RSS_IPV4_HASH_EN | 10640 RX_MODE_RSS_TCP_IPV4_HASH_EN; 10641 10642 tw32_f(MAC_RX_MODE, tp->rx_mode); 10643 udelay(10); 10644 10645 tw32(MAC_LED_CTRL, tp->led_ctrl); 10646 10647 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 10648 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10649 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10650 udelay(10); 10651 } 10652 tw32_f(MAC_RX_MODE, tp->rx_mode); 10653 udelay(10); 10654 10655 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10656 if ((tg3_asic_rev(tp) == ASIC_REV_5704) && 10657 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { 10658 /* Set drive transmission level to 1.2V */ 10659 /* only if the signal pre-emphasis bit is not set */ 10660 val = tr32(MAC_SERDES_CFG); 10661 val &= 0xfffff000; 10662 val |= 0x880; 10663 tw32(MAC_SERDES_CFG, val); 10664 } 10665 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) 10666 tw32(MAC_SERDES_CFG, 0x616000); 10667 } 10668 10669 /* Prevent chip from dropping frames when flow control 10670 * is enabled. 10671 */ 10672 if (tg3_flag(tp, 57765_CLASS)) 10673 val = 1; 10674 else 10675 val = 2; 10676 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); 10677 10678 if (tg3_asic_rev(tp) == ASIC_REV_5704 && 10679 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 10680 /* Use hardware link auto-negotiation */ 10681 tg3_flag_set(tp, HW_AUTONEG); 10682 } 10683 10684 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 10685 tg3_asic_rev(tp) == ASIC_REV_5714) { 10686 u32 tmp; 10687 10688 tmp = tr32(SERDES_RX_CTRL); 10689 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); 10690 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; 10691 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; 10692 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10693 } 10694 10695 if (!tg3_flag(tp, USE_PHYLIB)) { 10696 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 10697 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 10698 10699 err = tg3_setup_phy(tp, false); 10700 if (err) 10701 return err; 10702 10703 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10704 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 10705 u32 tmp; 10706 10707 /* Clear CRC stats. */ 10708 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { 10709 tg3_writephy(tp, MII_TG3_TEST1, 10710 tmp | MII_TG3_TEST1_CRC_EN); 10711 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); 10712 } 10713 } 10714 } 10715 10716 __tg3_set_rx_mode(tp->dev); 10717 10718 /* Initialize receive rules. */ 10719 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); 10720 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); 10721 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); 10722 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); 10723 10724 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) 10725 limit = 8; 10726 else 10727 limit = 16; 10728 if (tg3_flag(tp, ENABLE_ASF)) 10729 limit -= 4; 10730 switch (limit) { 10731 case 16: 10732 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); 10733 /* fall through */ 10734 case 15: 10735 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); 10736 /* fall through */ 10737 case 14: 10738 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); 10739 /* fall through */ 10740 case 13: 10741 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); 10742 /* fall through */ 10743 case 12: 10744 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); 10745 /* fall through */ 10746 case 11: 10747 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); 10748 /* fall through */ 10749 case 10: 10750 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); 10751 /* fall through */ 10752 case 9: 10753 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); 10754 /* fall through */ 10755 case 8: 10756 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); 10757 /* fall through */ 10758 case 7: 10759 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); 10760 /* fall through */ 10761 case 6: 10762 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); 10763 /* fall through */ 10764 case 5: 10765 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); 10766 /* fall through */ 10767 case 4: 10768 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ 10769 case 3: 10770 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ 10771 case 2: 10772 case 1: 10773 10774 default: 10775 break; 10776 } 10777 10778 if (tg3_flag(tp, ENABLE_APE)) 10779 /* Write our heartbeat update interval to APE. */ 10780 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, 10781 APE_HOST_HEARTBEAT_INT_5SEC); 10782 10783 tg3_write_sig_post_reset(tp, RESET_KIND_INIT); 10784 10785 return 0; 10786 } 10787 10788 /* Called at device open time to get the chip ready for 10789 * packet processing. Invoked with tp->lock held. 10790 */ 10791 static int tg3_init_hw(struct tg3 *tp, bool reset_phy) 10792 { 10793 /* Chip may have been just powered on. If so, the boot code may still 10794 * be running initialization. Wait for it to finish to avoid races in 10795 * accessing the hardware. 10796 */ 10797 tg3_enable_register_access(tp); 10798 tg3_poll_fw(tp); 10799 10800 tg3_switch_clocks(tp); 10801 10802 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 10803 10804 return tg3_reset_hw(tp, reset_phy); 10805 } 10806 10807 #ifdef CONFIG_TIGON3_HWMON 10808 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) 10809 { 10810 int i; 10811 10812 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) { 10813 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN; 10814 10815 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len); 10816 off += len; 10817 10818 if (ocir->signature != TG3_OCIR_SIG_MAGIC || 10819 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE)) 10820 memset(ocir, 0, TG3_OCIR_LEN); 10821 } 10822 } 10823 10824 /* sysfs attributes for hwmon */ 10825 static ssize_t tg3_show_temp(struct device *dev, 10826 struct device_attribute *devattr, char *buf) 10827 { 10828 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 10829 struct tg3 *tp = dev_get_drvdata(dev); 10830 u32 temperature; 10831 10832 spin_lock_bh(&tp->lock); 10833 tg3_ape_scratchpad_read(tp, &temperature, attr->index, 10834 sizeof(temperature)); 10835 spin_unlock_bh(&tp->lock); 10836 return sprintf(buf, "%u\n", temperature * 1000); 10837 } 10838 10839 10840 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL, 10841 TG3_TEMP_SENSOR_OFFSET); 10842 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL, 10843 TG3_TEMP_CAUTION_OFFSET); 10844 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL, 10845 TG3_TEMP_MAX_OFFSET); 10846 10847 static struct attribute *tg3_attrs[] = { 10848 &sensor_dev_attr_temp1_input.dev_attr.attr, 10849 &sensor_dev_attr_temp1_crit.dev_attr.attr, 10850 &sensor_dev_attr_temp1_max.dev_attr.attr, 10851 NULL 10852 }; 10853 ATTRIBUTE_GROUPS(tg3); 10854 10855 static void tg3_hwmon_close(struct tg3 *tp) 10856 { 10857 if (tp->hwmon_dev) { 10858 hwmon_device_unregister(tp->hwmon_dev); 10859 tp->hwmon_dev = NULL; 10860 } 10861 } 10862 10863 static void tg3_hwmon_open(struct tg3 *tp) 10864 { 10865 int i; 10866 u32 size = 0; 10867 struct pci_dev *pdev = tp->pdev; 10868 struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; 10869 10870 tg3_sd_scan_scratchpad(tp, ocirs); 10871 10872 for (i = 0; i < TG3_SD_NUM_RECS; i++) { 10873 if (!ocirs[i].src_data_length) 10874 continue; 10875 10876 size += ocirs[i].src_hdr_length; 10877 size += ocirs[i].src_data_length; 10878 } 10879 10880 if (!size) 10881 return; 10882 10883 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", 10884 tp, tg3_groups); 10885 if (IS_ERR(tp->hwmon_dev)) { 10886 tp->hwmon_dev = NULL; 10887 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); 10888 } 10889 } 10890 #else 10891 static inline void tg3_hwmon_close(struct tg3 *tp) { } 10892 static inline void tg3_hwmon_open(struct tg3 *tp) { } 10893 #endif /* CONFIG_TIGON3_HWMON */ 10894 10895 10896 #define TG3_STAT_ADD32(PSTAT, REG) \ 10897 do { u32 __val = tr32(REG); \ 10898 (PSTAT)->low += __val; \ 10899 if ((PSTAT)->low < __val) \ 10900 (PSTAT)->high += 1; \ 10901 } while (0) 10902 10903 static void tg3_periodic_fetch_stats(struct tg3 *tp) 10904 { 10905 struct tg3_hw_stats *sp = tp->hw_stats; 10906 10907 if (!tp->link_up) 10908 return; 10909 10910 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); 10911 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); 10912 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); 10913 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); 10914 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); 10915 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); 10916 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); 10917 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); 10918 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); 10919 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); 10920 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); 10921 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); 10922 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); 10923 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) && 10924 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low + 10925 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) { 10926 u32 val; 10927 10928 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10929 val &= ~tg3_lso_rd_dma_workaround_bit(tp); 10930 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10931 tg3_flag_clear(tp, 5719_5720_RDMA_BUG); 10932 } 10933 10934 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); 10935 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); 10936 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); 10937 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); 10938 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); 10939 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); 10940 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); 10941 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); 10942 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); 10943 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); 10944 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); 10945 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); 10946 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); 10947 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); 10948 10949 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); 10950 if (tg3_asic_rev(tp) != ASIC_REV_5717 && 10951 tg3_asic_rev(tp) != ASIC_REV_5762 && 10952 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 && 10953 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) { 10954 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); 10955 } else { 10956 u32 val = tr32(HOSTCC_FLOW_ATTN); 10957 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; 10958 if (val) { 10959 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); 10960 sp->rx_discards.low += val; 10961 if (sp->rx_discards.low < val) 10962 sp->rx_discards.high += 1; 10963 } 10964 sp->mbuf_lwm_thresh_hit = sp->rx_discards; 10965 } 10966 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); 10967 } 10968 10969 static void tg3_chk_missed_msi(struct tg3 *tp) 10970 { 10971 u32 i; 10972 10973 for (i = 0; i < tp->irq_cnt; i++) { 10974 struct tg3_napi *tnapi = &tp->napi[i]; 10975 10976 if (tg3_has_work(tnapi)) { 10977 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && 10978 tnapi->last_tx_cons == tnapi->tx_cons) { 10979 if (tnapi->chk_msi_cnt < 1) { 10980 tnapi->chk_msi_cnt++; 10981 return; 10982 } 10983 tg3_msi(0, tnapi); 10984 } 10985 } 10986 tnapi->chk_msi_cnt = 0; 10987 tnapi->last_rx_cons = tnapi->rx_rcb_ptr; 10988 tnapi->last_tx_cons = tnapi->tx_cons; 10989 } 10990 } 10991 10992 static void tg3_timer(struct timer_list *t) 10993 { 10994 struct tg3 *tp = from_timer(tp, t, timer); 10995 10996 spin_lock(&tp->lock); 10997 10998 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) { 10999 spin_unlock(&tp->lock); 11000 goto restart_timer; 11001 } 11002 11003 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 11004 tg3_flag(tp, 57765_CLASS)) 11005 tg3_chk_missed_msi(tp); 11006 11007 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 11008 /* BCM4785: Flush posted writes from GbE to host memory. */ 11009 tr32(HOSTCC_MODE); 11010 } 11011 11012 if (!tg3_flag(tp, TAGGED_STATUS)) { 11013 /* All of this garbage is because when using non-tagged 11014 * IRQ status the mailbox/status_block protocol the chip 11015 * uses with the cpu is race prone. 11016 */ 11017 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { 11018 tw32(GRC_LOCAL_CTRL, 11019 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 11020 } else { 11021 tw32(HOSTCC_MODE, tp->coalesce_mode | 11022 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); 11023 } 11024 11025 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 11026 spin_unlock(&tp->lock); 11027 tg3_reset_task_schedule(tp); 11028 goto restart_timer; 11029 } 11030 } 11031 11032 /* This part only runs once per second. */ 11033 if (!--tp->timer_counter) { 11034 if (tg3_flag(tp, 5705_PLUS)) 11035 tg3_periodic_fetch_stats(tp); 11036 11037 if (tp->setlpicnt && !--tp->setlpicnt) 11038 tg3_phy_eee_enable(tp); 11039 11040 if (tg3_flag(tp, USE_LINKCHG_REG)) { 11041 u32 mac_stat; 11042 int phy_event; 11043 11044 mac_stat = tr32(MAC_STATUS); 11045 11046 phy_event = 0; 11047 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { 11048 if (mac_stat & MAC_STATUS_MI_INTERRUPT) 11049 phy_event = 1; 11050 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) 11051 phy_event = 1; 11052 11053 if (phy_event) 11054 tg3_setup_phy(tp, false); 11055 } else if (tg3_flag(tp, POLL_SERDES)) { 11056 u32 mac_stat = tr32(MAC_STATUS); 11057 int need_setup = 0; 11058 11059 if (tp->link_up && 11060 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { 11061 need_setup = 1; 11062 } 11063 if (!tp->link_up && 11064 (mac_stat & (MAC_STATUS_PCS_SYNCED | 11065 MAC_STATUS_SIGNAL_DET))) { 11066 need_setup = 1; 11067 } 11068 if (need_setup) { 11069 if (!tp->serdes_counter) { 11070 tw32_f(MAC_MODE, 11071 (tp->mac_mode & 11072 ~MAC_MODE_PORT_MODE_MASK)); 11073 udelay(40); 11074 tw32_f(MAC_MODE, tp->mac_mode); 11075 udelay(40); 11076 } 11077 tg3_setup_phy(tp, false); 11078 } 11079 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 11080 tg3_flag(tp, 5780_CLASS)) { 11081 tg3_serdes_parallel_detect(tp); 11082 } else if (tg3_flag(tp, POLL_CPMU_LINK)) { 11083 u32 cpmu = tr32(TG3_CPMU_STATUS); 11084 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) == 11085 TG3_CPMU_STATUS_LINK_MASK); 11086 11087 if (link_up != tp->link_up) 11088 tg3_setup_phy(tp, false); 11089 } 11090 11091 tp->timer_counter = tp->timer_multiplier; 11092 } 11093 11094 /* Heartbeat is only sent once every 2 seconds. 11095 * 11096 * The heartbeat is to tell the ASF firmware that the host 11097 * driver is still alive. In the event that the OS crashes, 11098 * ASF needs to reset the hardware to free up the FIFO space 11099 * that may be filled with rx packets destined for the host. 11100 * If the FIFO is full, ASF will no longer function properly. 11101 * 11102 * Unintended resets have been reported on real time kernels 11103 * where the timer doesn't run on time. Netpoll will also have 11104 * same problem. 11105 * 11106 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware 11107 * to check the ring condition when the heartbeat is expiring 11108 * before doing the reset. This will prevent most unintended 11109 * resets. 11110 */ 11111 if (!--tp->asf_counter) { 11112 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 11113 tg3_wait_for_event_ack(tp); 11114 11115 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, 11116 FWCMD_NICDRV_ALIVE3); 11117 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 11118 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 11119 TG3_FW_UPDATE_TIMEOUT_SEC); 11120 11121 tg3_generate_fw_event(tp); 11122 } 11123 tp->asf_counter = tp->asf_multiplier; 11124 } 11125 11126 /* Update the APE heartbeat every 5 seconds.*/ 11127 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL); 11128 11129 spin_unlock(&tp->lock); 11130 11131 restart_timer: 11132 tp->timer.expires = jiffies + tp->timer_offset; 11133 add_timer(&tp->timer); 11134 } 11135 11136 static void tg3_timer_init(struct tg3 *tp) 11137 { 11138 if (tg3_flag(tp, TAGGED_STATUS) && 11139 tg3_asic_rev(tp) != ASIC_REV_5717 && 11140 !tg3_flag(tp, 57765_CLASS)) 11141 tp->timer_offset = HZ; 11142 else 11143 tp->timer_offset = HZ / 10; 11144 11145 BUG_ON(tp->timer_offset > HZ); 11146 11147 tp->timer_multiplier = (HZ / tp->timer_offset); 11148 tp->asf_multiplier = (HZ / tp->timer_offset) * 11149 TG3_FW_UPDATE_FREQ_SEC; 11150 11151 timer_setup(&tp->timer, tg3_timer, 0); 11152 } 11153 11154 static void tg3_timer_start(struct tg3 *tp) 11155 { 11156 tp->asf_counter = tp->asf_multiplier; 11157 tp->timer_counter = tp->timer_multiplier; 11158 11159 tp->timer.expires = jiffies + tp->timer_offset; 11160 add_timer(&tp->timer); 11161 } 11162 11163 static void tg3_timer_stop(struct tg3 *tp) 11164 { 11165 del_timer_sync(&tp->timer); 11166 } 11167 11168 /* Restart hardware after configuration changes, self-test, etc. 11169 * Invoked with tp->lock held. 11170 */ 11171 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy) 11172 __releases(tp->lock) 11173 __acquires(tp->lock) 11174 { 11175 int err; 11176 11177 err = tg3_init_hw(tp, reset_phy); 11178 if (err) { 11179 netdev_err(tp->dev, 11180 "Failed to re-initialize device, aborting\n"); 11181 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11182 tg3_full_unlock(tp); 11183 tg3_timer_stop(tp); 11184 tp->irq_sync = 0; 11185 tg3_napi_enable(tp); 11186 dev_close(tp->dev); 11187 tg3_full_lock(tp, 0); 11188 } 11189 return err; 11190 } 11191 11192 static void tg3_reset_task(struct work_struct *work) 11193 { 11194 struct tg3 *tp = container_of(work, struct tg3, reset_task); 11195 int err; 11196 11197 rtnl_lock(); 11198 tg3_full_lock(tp, 0); 11199 11200 if (!netif_running(tp->dev)) { 11201 tg3_flag_clear(tp, RESET_TASK_PENDING); 11202 tg3_full_unlock(tp); 11203 rtnl_unlock(); 11204 return; 11205 } 11206 11207 tg3_full_unlock(tp); 11208 11209 tg3_phy_stop(tp); 11210 11211 tg3_netif_stop(tp); 11212 11213 tg3_full_lock(tp, 1); 11214 11215 if (tg3_flag(tp, TX_RECOVERY_PENDING)) { 11216 tp->write32_tx_mbox = tg3_write32_tx_mbox; 11217 tp->write32_rx_mbox = tg3_write_flush_reg32; 11218 tg3_flag_set(tp, MBOX_WRITE_REORDER); 11219 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 11220 } 11221 11222 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 11223 err = tg3_init_hw(tp, true); 11224 if (err) 11225 goto out; 11226 11227 tg3_netif_start(tp); 11228 11229 out: 11230 tg3_full_unlock(tp); 11231 11232 if (!err) 11233 tg3_phy_start(tp); 11234 11235 tg3_flag_clear(tp, RESET_TASK_PENDING); 11236 rtnl_unlock(); 11237 } 11238 11239 static int tg3_request_irq(struct tg3 *tp, int irq_num) 11240 { 11241 irq_handler_t fn; 11242 unsigned long flags; 11243 char *name; 11244 struct tg3_napi *tnapi = &tp->napi[irq_num]; 11245 11246 if (tp->irq_cnt == 1) 11247 name = tp->dev->name; 11248 else { 11249 name = &tnapi->irq_lbl[0]; 11250 if (tnapi->tx_buffers && tnapi->rx_rcb) 11251 snprintf(name, IFNAMSIZ, 11252 "%s-txrx-%d", tp->dev->name, irq_num); 11253 else if (tnapi->tx_buffers) 11254 snprintf(name, IFNAMSIZ, 11255 "%s-tx-%d", tp->dev->name, irq_num); 11256 else if (tnapi->rx_rcb) 11257 snprintf(name, IFNAMSIZ, 11258 "%s-rx-%d", tp->dev->name, irq_num); 11259 else 11260 snprintf(name, IFNAMSIZ, 11261 "%s-%d", tp->dev->name, irq_num); 11262 name[IFNAMSIZ-1] = 0; 11263 } 11264 11265 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11266 fn = tg3_msi; 11267 if (tg3_flag(tp, 1SHOT_MSI)) 11268 fn = tg3_msi_1shot; 11269 flags = 0; 11270 } else { 11271 fn = tg3_interrupt; 11272 if (tg3_flag(tp, TAGGED_STATUS)) 11273 fn = tg3_interrupt_tagged; 11274 flags = IRQF_SHARED; 11275 } 11276 11277 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); 11278 } 11279 11280 static int tg3_test_interrupt(struct tg3 *tp) 11281 { 11282 struct tg3_napi *tnapi = &tp->napi[0]; 11283 struct net_device *dev = tp->dev; 11284 int err, i, intr_ok = 0; 11285 u32 val; 11286 11287 if (!netif_running(dev)) 11288 return -ENODEV; 11289 11290 tg3_disable_ints(tp); 11291 11292 free_irq(tnapi->irq_vec, tnapi); 11293 11294 /* 11295 * Turn off MSI one shot mode. Otherwise this test has no 11296 * observable way to know whether the interrupt was delivered. 11297 */ 11298 if (tg3_flag(tp, 57765_PLUS)) { 11299 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; 11300 tw32(MSGINT_MODE, val); 11301 } 11302 11303 err = request_irq(tnapi->irq_vec, tg3_test_isr, 11304 IRQF_SHARED, dev->name, tnapi); 11305 if (err) 11306 return err; 11307 11308 tnapi->hw_status->status &= ~SD_STATUS_UPDATED; 11309 tg3_enable_ints(tp); 11310 11311 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 11312 tnapi->coal_now); 11313 11314 for (i = 0; i < 5; i++) { 11315 u32 int_mbox, misc_host_ctrl; 11316 11317 int_mbox = tr32_mailbox(tnapi->int_mbox); 11318 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 11319 11320 if ((int_mbox != 0) || 11321 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { 11322 intr_ok = 1; 11323 break; 11324 } 11325 11326 if (tg3_flag(tp, 57765_PLUS) && 11327 tnapi->hw_status->status_tag != tnapi->last_tag) 11328 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 11329 11330 msleep(10); 11331 } 11332 11333 tg3_disable_ints(tp); 11334 11335 free_irq(tnapi->irq_vec, tnapi); 11336 11337 err = tg3_request_irq(tp, 0); 11338 11339 if (err) 11340 return err; 11341 11342 if (intr_ok) { 11343 /* Reenable MSI one shot mode. */ 11344 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) { 11345 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; 11346 tw32(MSGINT_MODE, val); 11347 } 11348 return 0; 11349 } 11350 11351 return -EIO; 11352 } 11353 11354 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is 11355 * successfully restored 11356 */ 11357 static int tg3_test_msi(struct tg3 *tp) 11358 { 11359 int err; 11360 u16 pci_cmd; 11361 11362 if (!tg3_flag(tp, USING_MSI)) 11363 return 0; 11364 11365 /* Turn off SERR reporting in case MSI terminates with Master 11366 * Abort. 11367 */ 11368 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 11369 pci_write_config_word(tp->pdev, PCI_COMMAND, 11370 pci_cmd & ~PCI_COMMAND_SERR); 11371 11372 err = tg3_test_interrupt(tp); 11373 11374 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 11375 11376 if (!err) 11377 return 0; 11378 11379 /* other failures */ 11380 if (err != -EIO) 11381 return err; 11382 11383 /* MSI test failed, go back to INTx mode */ 11384 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " 11385 "to INTx mode. Please report this failure to the PCI " 11386 "maintainer and include system chipset information\n"); 11387 11388 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11389 11390 pci_disable_msi(tp->pdev); 11391 11392 tg3_flag_clear(tp, USING_MSI); 11393 tp->napi[0].irq_vec = tp->pdev->irq; 11394 11395 err = tg3_request_irq(tp, 0); 11396 if (err) 11397 return err; 11398 11399 /* Need to reset the chip because the MSI cycle may have terminated 11400 * with Master Abort. 11401 */ 11402 tg3_full_lock(tp, 1); 11403 11404 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11405 err = tg3_init_hw(tp, true); 11406 11407 tg3_full_unlock(tp); 11408 11409 if (err) 11410 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11411 11412 return err; 11413 } 11414 11415 static int tg3_request_firmware(struct tg3 *tp) 11416 { 11417 const struct tg3_firmware_hdr *fw_hdr; 11418 11419 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { 11420 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", 11421 tp->fw_needed); 11422 return -ENOENT; 11423 } 11424 11425 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 11426 11427 /* Firmware blob starts with version numbers, followed by 11428 * start address and _full_ length including BSS sections 11429 * (which must be longer than the actual data, of course 11430 */ 11431 11432 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */ 11433 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) { 11434 netdev_err(tp->dev, "bogus length %d in \"%s\"\n", 11435 tp->fw_len, tp->fw_needed); 11436 release_firmware(tp->fw); 11437 tp->fw = NULL; 11438 return -EINVAL; 11439 } 11440 11441 /* We no longer need firmware; we have it. */ 11442 tp->fw_needed = NULL; 11443 return 0; 11444 } 11445 11446 static u32 tg3_irq_count(struct tg3 *tp) 11447 { 11448 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt); 11449 11450 if (irq_cnt > 1) { 11451 /* We want as many rx rings enabled as there are cpus. 11452 * In multiqueue MSI-X mode, the first MSI-X vector 11453 * only deals with link interrupts, etc, so we add 11454 * one to the number of vectors we are requesting. 11455 */ 11456 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max); 11457 } 11458 11459 return irq_cnt; 11460 } 11461 11462 static bool tg3_enable_msix(struct tg3 *tp) 11463 { 11464 int i, rc; 11465 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS]; 11466 11467 tp->txq_cnt = tp->txq_req; 11468 tp->rxq_cnt = tp->rxq_req; 11469 if (!tp->rxq_cnt) 11470 tp->rxq_cnt = netif_get_num_default_rss_queues(); 11471 if (tp->rxq_cnt > tp->rxq_max) 11472 tp->rxq_cnt = tp->rxq_max; 11473 11474 /* Disable multiple TX rings by default. Simple round-robin hardware 11475 * scheduling of the TX rings can cause starvation of rings with 11476 * small packets when other rings have TSO or jumbo packets. 11477 */ 11478 if (!tp->txq_req) 11479 tp->txq_cnt = 1; 11480 11481 tp->irq_cnt = tg3_irq_count(tp); 11482 11483 for (i = 0; i < tp->irq_max; i++) { 11484 msix_ent[i].entry = i; 11485 msix_ent[i].vector = 0; 11486 } 11487 11488 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt); 11489 if (rc < 0) { 11490 return false; 11491 } else if (rc < tp->irq_cnt) { 11492 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", 11493 tp->irq_cnt, rc); 11494 tp->irq_cnt = rc; 11495 tp->rxq_cnt = max(rc - 1, 1); 11496 if (tp->txq_cnt) 11497 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max); 11498 } 11499 11500 for (i = 0; i < tp->irq_max; i++) 11501 tp->napi[i].irq_vec = msix_ent[i].vector; 11502 11503 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) { 11504 pci_disable_msix(tp->pdev); 11505 return false; 11506 } 11507 11508 if (tp->irq_cnt == 1) 11509 return true; 11510 11511 tg3_flag_set(tp, ENABLE_RSS); 11512 11513 if (tp->txq_cnt > 1) 11514 tg3_flag_set(tp, ENABLE_TSS); 11515 11516 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt); 11517 11518 return true; 11519 } 11520 11521 static void tg3_ints_init(struct tg3 *tp) 11522 { 11523 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && 11524 !tg3_flag(tp, TAGGED_STATUS)) { 11525 /* All MSI supporting chips should support tagged 11526 * status. Assert that this is the case. 11527 */ 11528 netdev_warn(tp->dev, 11529 "MSI without TAGGED_STATUS? Not using MSI\n"); 11530 goto defcfg; 11531 } 11532 11533 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) 11534 tg3_flag_set(tp, USING_MSIX); 11535 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) 11536 tg3_flag_set(tp, USING_MSI); 11537 11538 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11539 u32 msi_mode = tr32(MSGINT_MODE); 11540 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) 11541 msi_mode |= MSGINT_MODE_MULTIVEC_EN; 11542 if (!tg3_flag(tp, 1SHOT_MSI)) 11543 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE; 11544 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); 11545 } 11546 defcfg: 11547 if (!tg3_flag(tp, USING_MSIX)) { 11548 tp->irq_cnt = 1; 11549 tp->napi[0].irq_vec = tp->pdev->irq; 11550 } 11551 11552 if (tp->irq_cnt == 1) { 11553 tp->txq_cnt = 1; 11554 tp->rxq_cnt = 1; 11555 netif_set_real_num_tx_queues(tp->dev, 1); 11556 netif_set_real_num_rx_queues(tp->dev, 1); 11557 } 11558 } 11559 11560 static void tg3_ints_fini(struct tg3 *tp) 11561 { 11562 if (tg3_flag(tp, USING_MSIX)) 11563 pci_disable_msix(tp->pdev); 11564 else if (tg3_flag(tp, USING_MSI)) 11565 pci_disable_msi(tp->pdev); 11566 tg3_flag_clear(tp, USING_MSI); 11567 tg3_flag_clear(tp, USING_MSIX); 11568 tg3_flag_clear(tp, ENABLE_RSS); 11569 tg3_flag_clear(tp, ENABLE_TSS); 11570 } 11571 11572 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, 11573 bool init) 11574 { 11575 struct net_device *dev = tp->dev; 11576 int i, err; 11577 11578 /* 11579 * Setup interrupts first so we know how 11580 * many NAPI resources to allocate 11581 */ 11582 tg3_ints_init(tp); 11583 11584 tg3_rss_check_indir_tbl(tp); 11585 11586 /* The placement of this call is tied 11587 * to the setup and use of Host TX descriptors. 11588 */ 11589 err = tg3_alloc_consistent(tp); 11590 if (err) 11591 goto out_ints_fini; 11592 11593 tg3_napi_init(tp); 11594 11595 tg3_napi_enable(tp); 11596 11597 for (i = 0; i < tp->irq_cnt; i++) { 11598 err = tg3_request_irq(tp, i); 11599 if (err) { 11600 for (i--; i >= 0; i--) { 11601 struct tg3_napi *tnapi = &tp->napi[i]; 11602 11603 free_irq(tnapi->irq_vec, tnapi); 11604 } 11605 goto out_napi_fini; 11606 } 11607 } 11608 11609 tg3_full_lock(tp, 0); 11610 11611 if (init) 11612 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 11613 11614 err = tg3_init_hw(tp, reset_phy); 11615 if (err) { 11616 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11617 tg3_free_rings(tp); 11618 } 11619 11620 tg3_full_unlock(tp); 11621 11622 if (err) 11623 goto out_free_irq; 11624 11625 if (test_irq && tg3_flag(tp, USING_MSI)) { 11626 err = tg3_test_msi(tp); 11627 11628 if (err) { 11629 tg3_full_lock(tp, 0); 11630 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11631 tg3_free_rings(tp); 11632 tg3_full_unlock(tp); 11633 11634 goto out_napi_fini; 11635 } 11636 11637 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { 11638 u32 val = tr32(PCIE_TRANSACTION_CFG); 11639 11640 tw32(PCIE_TRANSACTION_CFG, 11641 val | PCIE_TRANS_CFG_1SHOT_MSI); 11642 } 11643 } 11644 11645 tg3_phy_start(tp); 11646 11647 tg3_hwmon_open(tp); 11648 11649 tg3_full_lock(tp, 0); 11650 11651 tg3_timer_start(tp); 11652 tg3_flag_set(tp, INIT_COMPLETE); 11653 tg3_enable_ints(tp); 11654 11655 tg3_ptp_resume(tp); 11656 11657 tg3_full_unlock(tp); 11658 11659 netif_tx_start_all_queues(dev); 11660 11661 /* 11662 * Reset loopback feature if it was turned on while the device was down 11663 * make sure that it's installed properly now. 11664 */ 11665 if (dev->features & NETIF_F_LOOPBACK) 11666 tg3_set_loopback(dev, dev->features); 11667 11668 return 0; 11669 11670 out_free_irq: 11671 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11672 struct tg3_napi *tnapi = &tp->napi[i]; 11673 free_irq(tnapi->irq_vec, tnapi); 11674 } 11675 11676 out_napi_fini: 11677 tg3_napi_disable(tp); 11678 tg3_napi_fini(tp); 11679 tg3_free_consistent(tp); 11680 11681 out_ints_fini: 11682 tg3_ints_fini(tp); 11683 11684 return err; 11685 } 11686 11687 static void tg3_stop(struct tg3 *tp) 11688 { 11689 int i; 11690 11691 tg3_reset_task_cancel(tp); 11692 tg3_netif_stop(tp); 11693 11694 tg3_timer_stop(tp); 11695 11696 tg3_hwmon_close(tp); 11697 11698 tg3_phy_stop(tp); 11699 11700 tg3_full_lock(tp, 1); 11701 11702 tg3_disable_ints(tp); 11703 11704 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11705 tg3_free_rings(tp); 11706 tg3_flag_clear(tp, INIT_COMPLETE); 11707 11708 tg3_full_unlock(tp); 11709 11710 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11711 struct tg3_napi *tnapi = &tp->napi[i]; 11712 free_irq(tnapi->irq_vec, tnapi); 11713 } 11714 11715 tg3_ints_fini(tp); 11716 11717 tg3_napi_fini(tp); 11718 11719 tg3_free_consistent(tp); 11720 } 11721 11722 static int tg3_open(struct net_device *dev) 11723 { 11724 struct tg3 *tp = netdev_priv(dev); 11725 int err; 11726 11727 if (tp->pcierr_recovery) { 11728 netdev_err(dev, "Failed to open device. PCI error recovery " 11729 "in progress\n"); 11730 return -EAGAIN; 11731 } 11732 11733 if (tp->fw_needed) { 11734 err = tg3_request_firmware(tp); 11735 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 11736 if (err) { 11737 netdev_warn(tp->dev, "EEE capability disabled\n"); 11738 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 11739 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 11740 netdev_warn(tp->dev, "EEE capability restored\n"); 11741 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 11742 } 11743 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 11744 if (err) 11745 return err; 11746 } else if (err) { 11747 netdev_warn(tp->dev, "TSO capability disabled\n"); 11748 tg3_flag_clear(tp, TSO_CAPABLE); 11749 } else if (!tg3_flag(tp, TSO_CAPABLE)) { 11750 netdev_notice(tp->dev, "TSO capability restored\n"); 11751 tg3_flag_set(tp, TSO_CAPABLE); 11752 } 11753 } 11754 11755 tg3_carrier_off(tp); 11756 11757 err = tg3_power_up(tp); 11758 if (err) 11759 return err; 11760 11761 tg3_full_lock(tp, 0); 11762 11763 tg3_disable_ints(tp); 11764 tg3_flag_clear(tp, INIT_COMPLETE); 11765 11766 tg3_full_unlock(tp); 11767 11768 err = tg3_start(tp, 11769 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN), 11770 true, true); 11771 if (err) { 11772 tg3_frob_aux_power(tp, false); 11773 pci_set_power_state(tp->pdev, PCI_D3hot); 11774 } 11775 11776 return err; 11777 } 11778 11779 static int tg3_close(struct net_device *dev) 11780 { 11781 struct tg3 *tp = netdev_priv(dev); 11782 11783 if (tp->pcierr_recovery) { 11784 netdev_err(dev, "Failed to close device. PCI error recovery " 11785 "in progress\n"); 11786 return -EAGAIN; 11787 } 11788 11789 tg3_stop(tp); 11790 11791 if (pci_device_is_present(tp->pdev)) { 11792 tg3_power_down_prepare(tp); 11793 11794 tg3_carrier_off(tp); 11795 } 11796 return 0; 11797 } 11798 11799 static inline u64 get_stat64(tg3_stat64_t *val) 11800 { 11801 return ((u64)val->high << 32) | ((u64)val->low); 11802 } 11803 11804 static u64 tg3_calc_crc_errors(struct tg3 *tp) 11805 { 11806 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11807 11808 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 11809 (tg3_asic_rev(tp) == ASIC_REV_5700 || 11810 tg3_asic_rev(tp) == ASIC_REV_5701)) { 11811 u32 val; 11812 11813 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { 11814 tg3_writephy(tp, MII_TG3_TEST1, 11815 val | MII_TG3_TEST1_CRC_EN); 11816 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); 11817 } else 11818 val = 0; 11819 11820 tp->phy_crc_errors += val; 11821 11822 return tp->phy_crc_errors; 11823 } 11824 11825 return get_stat64(&hw_stats->rx_fcs_errors); 11826 } 11827 11828 #define ESTAT_ADD(member) \ 11829 estats->member = old_estats->member + \ 11830 get_stat64(&hw_stats->member) 11831 11832 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats) 11833 { 11834 struct tg3_ethtool_stats *old_estats = &tp->estats_prev; 11835 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11836 11837 ESTAT_ADD(rx_octets); 11838 ESTAT_ADD(rx_fragments); 11839 ESTAT_ADD(rx_ucast_packets); 11840 ESTAT_ADD(rx_mcast_packets); 11841 ESTAT_ADD(rx_bcast_packets); 11842 ESTAT_ADD(rx_fcs_errors); 11843 ESTAT_ADD(rx_align_errors); 11844 ESTAT_ADD(rx_xon_pause_rcvd); 11845 ESTAT_ADD(rx_xoff_pause_rcvd); 11846 ESTAT_ADD(rx_mac_ctrl_rcvd); 11847 ESTAT_ADD(rx_xoff_entered); 11848 ESTAT_ADD(rx_frame_too_long_errors); 11849 ESTAT_ADD(rx_jabbers); 11850 ESTAT_ADD(rx_undersize_packets); 11851 ESTAT_ADD(rx_in_length_errors); 11852 ESTAT_ADD(rx_out_length_errors); 11853 ESTAT_ADD(rx_64_or_less_octet_packets); 11854 ESTAT_ADD(rx_65_to_127_octet_packets); 11855 ESTAT_ADD(rx_128_to_255_octet_packets); 11856 ESTAT_ADD(rx_256_to_511_octet_packets); 11857 ESTAT_ADD(rx_512_to_1023_octet_packets); 11858 ESTAT_ADD(rx_1024_to_1522_octet_packets); 11859 ESTAT_ADD(rx_1523_to_2047_octet_packets); 11860 ESTAT_ADD(rx_2048_to_4095_octet_packets); 11861 ESTAT_ADD(rx_4096_to_8191_octet_packets); 11862 ESTAT_ADD(rx_8192_to_9022_octet_packets); 11863 11864 ESTAT_ADD(tx_octets); 11865 ESTAT_ADD(tx_collisions); 11866 ESTAT_ADD(tx_xon_sent); 11867 ESTAT_ADD(tx_xoff_sent); 11868 ESTAT_ADD(tx_flow_control); 11869 ESTAT_ADD(tx_mac_errors); 11870 ESTAT_ADD(tx_single_collisions); 11871 ESTAT_ADD(tx_mult_collisions); 11872 ESTAT_ADD(tx_deferred); 11873 ESTAT_ADD(tx_excessive_collisions); 11874 ESTAT_ADD(tx_late_collisions); 11875 ESTAT_ADD(tx_collide_2times); 11876 ESTAT_ADD(tx_collide_3times); 11877 ESTAT_ADD(tx_collide_4times); 11878 ESTAT_ADD(tx_collide_5times); 11879 ESTAT_ADD(tx_collide_6times); 11880 ESTAT_ADD(tx_collide_7times); 11881 ESTAT_ADD(tx_collide_8times); 11882 ESTAT_ADD(tx_collide_9times); 11883 ESTAT_ADD(tx_collide_10times); 11884 ESTAT_ADD(tx_collide_11times); 11885 ESTAT_ADD(tx_collide_12times); 11886 ESTAT_ADD(tx_collide_13times); 11887 ESTAT_ADD(tx_collide_14times); 11888 ESTAT_ADD(tx_collide_15times); 11889 ESTAT_ADD(tx_ucast_packets); 11890 ESTAT_ADD(tx_mcast_packets); 11891 ESTAT_ADD(tx_bcast_packets); 11892 ESTAT_ADD(tx_carrier_sense_errors); 11893 ESTAT_ADD(tx_discards); 11894 ESTAT_ADD(tx_errors); 11895 11896 ESTAT_ADD(dma_writeq_full); 11897 ESTAT_ADD(dma_write_prioq_full); 11898 ESTAT_ADD(rxbds_empty); 11899 ESTAT_ADD(rx_discards); 11900 ESTAT_ADD(rx_errors); 11901 ESTAT_ADD(rx_threshold_hit); 11902 11903 ESTAT_ADD(dma_readq_full); 11904 ESTAT_ADD(dma_read_prioq_full); 11905 ESTAT_ADD(tx_comp_queue_full); 11906 11907 ESTAT_ADD(ring_set_send_prod_index); 11908 ESTAT_ADD(ring_status_update); 11909 ESTAT_ADD(nic_irqs); 11910 ESTAT_ADD(nic_avoided_irqs); 11911 ESTAT_ADD(nic_tx_threshold_hit); 11912 11913 ESTAT_ADD(mbuf_lwm_thresh_hit); 11914 } 11915 11916 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) 11917 { 11918 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; 11919 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11920 11921 stats->rx_packets = old_stats->rx_packets + 11922 get_stat64(&hw_stats->rx_ucast_packets) + 11923 get_stat64(&hw_stats->rx_mcast_packets) + 11924 get_stat64(&hw_stats->rx_bcast_packets); 11925 11926 stats->tx_packets = old_stats->tx_packets + 11927 get_stat64(&hw_stats->tx_ucast_packets) + 11928 get_stat64(&hw_stats->tx_mcast_packets) + 11929 get_stat64(&hw_stats->tx_bcast_packets); 11930 11931 stats->rx_bytes = old_stats->rx_bytes + 11932 get_stat64(&hw_stats->rx_octets); 11933 stats->tx_bytes = old_stats->tx_bytes + 11934 get_stat64(&hw_stats->tx_octets); 11935 11936 stats->rx_errors = old_stats->rx_errors + 11937 get_stat64(&hw_stats->rx_errors); 11938 stats->tx_errors = old_stats->tx_errors + 11939 get_stat64(&hw_stats->tx_errors) + 11940 get_stat64(&hw_stats->tx_mac_errors) + 11941 get_stat64(&hw_stats->tx_carrier_sense_errors) + 11942 get_stat64(&hw_stats->tx_discards); 11943 11944 stats->multicast = old_stats->multicast + 11945 get_stat64(&hw_stats->rx_mcast_packets); 11946 stats->collisions = old_stats->collisions + 11947 get_stat64(&hw_stats->tx_collisions); 11948 11949 stats->rx_length_errors = old_stats->rx_length_errors + 11950 get_stat64(&hw_stats->rx_frame_too_long_errors) + 11951 get_stat64(&hw_stats->rx_undersize_packets); 11952 11953 stats->rx_frame_errors = old_stats->rx_frame_errors + 11954 get_stat64(&hw_stats->rx_align_errors); 11955 stats->tx_aborted_errors = old_stats->tx_aborted_errors + 11956 get_stat64(&hw_stats->tx_discards); 11957 stats->tx_carrier_errors = old_stats->tx_carrier_errors + 11958 get_stat64(&hw_stats->tx_carrier_sense_errors); 11959 11960 stats->rx_crc_errors = old_stats->rx_crc_errors + 11961 tg3_calc_crc_errors(tp); 11962 11963 stats->rx_missed_errors = old_stats->rx_missed_errors + 11964 get_stat64(&hw_stats->rx_discards); 11965 11966 stats->rx_dropped = tp->rx_dropped; 11967 stats->tx_dropped = tp->tx_dropped; 11968 } 11969 11970 static int tg3_get_regs_len(struct net_device *dev) 11971 { 11972 return TG3_REG_BLK_SIZE; 11973 } 11974 11975 static void tg3_get_regs(struct net_device *dev, 11976 struct ethtool_regs *regs, void *_p) 11977 { 11978 struct tg3 *tp = netdev_priv(dev); 11979 11980 regs->version = 0; 11981 11982 memset(_p, 0, TG3_REG_BLK_SIZE); 11983 11984 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11985 return; 11986 11987 tg3_full_lock(tp, 0); 11988 11989 tg3_dump_legacy_regs(tp, (u32 *)_p); 11990 11991 tg3_full_unlock(tp); 11992 } 11993 11994 static int tg3_get_eeprom_len(struct net_device *dev) 11995 { 11996 struct tg3 *tp = netdev_priv(dev); 11997 11998 return tp->nvram_size; 11999 } 12000 12001 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 12002 { 12003 struct tg3 *tp = netdev_priv(dev); 12004 int ret, cpmu_restore = 0; 12005 u8 *pd; 12006 u32 i, offset, len, b_offset, b_count, cpmu_val = 0; 12007 __be32 val; 12008 12009 if (tg3_flag(tp, NO_NVRAM)) 12010 return -EINVAL; 12011 12012 offset = eeprom->offset; 12013 len = eeprom->len; 12014 eeprom->len = 0; 12015 12016 eeprom->magic = TG3_EEPROM_MAGIC; 12017 12018 /* Override clock, link aware and link idle modes */ 12019 if (tg3_flag(tp, CPMU_PRESENT)) { 12020 cpmu_val = tr32(TG3_CPMU_CTRL); 12021 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE | 12022 CPMU_CTRL_LINK_IDLE_MODE)) { 12023 tw32(TG3_CPMU_CTRL, cpmu_val & 12024 ~(CPMU_CTRL_LINK_AWARE_MODE | 12025 CPMU_CTRL_LINK_IDLE_MODE)); 12026 cpmu_restore = 1; 12027 } 12028 } 12029 tg3_override_clk(tp); 12030 12031 if (offset & 3) { 12032 /* adjustments to start on required 4 byte boundary */ 12033 b_offset = offset & 3; 12034 b_count = 4 - b_offset; 12035 if (b_count > len) { 12036 /* i.e. offset=1 len=2 */ 12037 b_count = len; 12038 } 12039 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); 12040 if (ret) 12041 goto eeprom_done; 12042 memcpy(data, ((char *)&val) + b_offset, b_count); 12043 len -= b_count; 12044 offset += b_count; 12045 eeprom->len += b_count; 12046 } 12047 12048 /* read bytes up to the last 4 byte boundary */ 12049 pd = &data[eeprom->len]; 12050 for (i = 0; i < (len - (len & 3)); i += 4) { 12051 ret = tg3_nvram_read_be32(tp, offset + i, &val); 12052 if (ret) { 12053 if (i) 12054 i -= 4; 12055 eeprom->len += i; 12056 goto eeprom_done; 12057 } 12058 memcpy(pd + i, &val, 4); 12059 if (need_resched()) { 12060 if (signal_pending(current)) { 12061 eeprom->len += i; 12062 ret = -EINTR; 12063 goto eeprom_done; 12064 } 12065 cond_resched(); 12066 } 12067 } 12068 eeprom->len += i; 12069 12070 if (len & 3) { 12071 /* read last bytes not ending on 4 byte boundary */ 12072 pd = &data[eeprom->len]; 12073 b_count = len & 3; 12074 b_offset = offset + len - b_count; 12075 ret = tg3_nvram_read_be32(tp, b_offset, &val); 12076 if (ret) 12077 goto eeprom_done; 12078 memcpy(pd, &val, b_count); 12079 eeprom->len += b_count; 12080 } 12081 ret = 0; 12082 12083 eeprom_done: 12084 /* Restore clock, link aware and link idle modes */ 12085 tg3_restore_clk(tp); 12086 if (cpmu_restore) 12087 tw32(TG3_CPMU_CTRL, cpmu_val); 12088 12089 return ret; 12090 } 12091 12092 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 12093 { 12094 struct tg3 *tp = netdev_priv(dev); 12095 int ret; 12096 u32 offset, len, b_offset, odd_len; 12097 u8 *buf; 12098 __be32 start = 0, end; 12099 12100 if (tg3_flag(tp, NO_NVRAM) || 12101 eeprom->magic != TG3_EEPROM_MAGIC) 12102 return -EINVAL; 12103 12104 offset = eeprom->offset; 12105 len = eeprom->len; 12106 12107 if ((b_offset = (offset & 3))) { 12108 /* adjustments to start on required 4 byte boundary */ 12109 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); 12110 if (ret) 12111 return ret; 12112 len += b_offset; 12113 offset &= ~3; 12114 if (len < 4) 12115 len = 4; 12116 } 12117 12118 odd_len = 0; 12119 if (len & 3) { 12120 /* adjustments to end on required 4 byte boundary */ 12121 odd_len = 1; 12122 len = (len + 3) & ~3; 12123 ret = tg3_nvram_read_be32(tp, offset+len-4, &end); 12124 if (ret) 12125 return ret; 12126 } 12127 12128 buf = data; 12129 if (b_offset || odd_len) { 12130 buf = kmalloc(len, GFP_KERNEL); 12131 if (!buf) 12132 return -ENOMEM; 12133 if (b_offset) 12134 memcpy(buf, &start, 4); 12135 if (odd_len) 12136 memcpy(buf+len-4, &end, 4); 12137 memcpy(buf + b_offset, data, eeprom->len); 12138 } 12139 12140 ret = tg3_nvram_write_block(tp, offset, len, buf); 12141 12142 if (buf != data) 12143 kfree(buf); 12144 12145 return ret; 12146 } 12147 12148 static int tg3_get_link_ksettings(struct net_device *dev, 12149 struct ethtool_link_ksettings *cmd) 12150 { 12151 struct tg3 *tp = netdev_priv(dev); 12152 u32 supported, advertising; 12153 12154 if (tg3_flag(tp, USE_PHYLIB)) { 12155 struct phy_device *phydev; 12156 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12157 return -EAGAIN; 12158 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12159 phy_ethtool_ksettings_get(phydev, cmd); 12160 12161 return 0; 12162 } 12163 12164 supported = (SUPPORTED_Autoneg); 12165 12166 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12167 supported |= (SUPPORTED_1000baseT_Half | 12168 SUPPORTED_1000baseT_Full); 12169 12170 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12171 supported |= (SUPPORTED_100baseT_Half | 12172 SUPPORTED_100baseT_Full | 12173 SUPPORTED_10baseT_Half | 12174 SUPPORTED_10baseT_Full | 12175 SUPPORTED_TP); 12176 cmd->base.port = PORT_TP; 12177 } else { 12178 supported |= SUPPORTED_FIBRE; 12179 cmd->base.port = PORT_FIBRE; 12180 } 12181 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 12182 supported); 12183 12184 advertising = tp->link_config.advertising; 12185 if (tg3_flag(tp, PAUSE_AUTONEG)) { 12186 if (tp->link_config.flowctrl & FLOW_CTRL_RX) { 12187 if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12188 advertising |= ADVERTISED_Pause; 12189 } else { 12190 advertising |= ADVERTISED_Pause | 12191 ADVERTISED_Asym_Pause; 12192 } 12193 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12194 advertising |= ADVERTISED_Asym_Pause; 12195 } 12196 } 12197 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 12198 advertising); 12199 12200 if (netif_running(dev) && tp->link_up) { 12201 cmd->base.speed = tp->link_config.active_speed; 12202 cmd->base.duplex = tp->link_config.active_duplex; 12203 ethtool_convert_legacy_u32_to_link_mode( 12204 cmd->link_modes.lp_advertising, 12205 tp->link_config.rmt_adv); 12206 12207 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12208 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE) 12209 cmd->base.eth_tp_mdix = ETH_TP_MDI_X; 12210 else 12211 cmd->base.eth_tp_mdix = ETH_TP_MDI; 12212 } 12213 } else { 12214 cmd->base.speed = SPEED_UNKNOWN; 12215 cmd->base.duplex = DUPLEX_UNKNOWN; 12216 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; 12217 } 12218 cmd->base.phy_address = tp->phy_addr; 12219 cmd->base.autoneg = tp->link_config.autoneg; 12220 return 0; 12221 } 12222 12223 static int tg3_set_link_ksettings(struct net_device *dev, 12224 const struct ethtool_link_ksettings *cmd) 12225 { 12226 struct tg3 *tp = netdev_priv(dev); 12227 u32 speed = cmd->base.speed; 12228 u32 advertising; 12229 12230 if (tg3_flag(tp, USE_PHYLIB)) { 12231 struct phy_device *phydev; 12232 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12233 return -EAGAIN; 12234 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12235 return phy_ethtool_ksettings_set(phydev, cmd); 12236 } 12237 12238 if (cmd->base.autoneg != AUTONEG_ENABLE && 12239 cmd->base.autoneg != AUTONEG_DISABLE) 12240 return -EINVAL; 12241 12242 if (cmd->base.autoneg == AUTONEG_DISABLE && 12243 cmd->base.duplex != DUPLEX_FULL && 12244 cmd->base.duplex != DUPLEX_HALF) 12245 return -EINVAL; 12246 12247 ethtool_convert_link_mode_to_legacy_u32(&advertising, 12248 cmd->link_modes.advertising); 12249 12250 if (cmd->base.autoneg == AUTONEG_ENABLE) { 12251 u32 mask = ADVERTISED_Autoneg | 12252 ADVERTISED_Pause | 12253 ADVERTISED_Asym_Pause; 12254 12255 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12256 mask |= ADVERTISED_1000baseT_Half | 12257 ADVERTISED_1000baseT_Full; 12258 12259 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 12260 mask |= ADVERTISED_100baseT_Half | 12261 ADVERTISED_100baseT_Full | 12262 ADVERTISED_10baseT_Half | 12263 ADVERTISED_10baseT_Full | 12264 ADVERTISED_TP; 12265 else 12266 mask |= ADVERTISED_FIBRE; 12267 12268 if (advertising & ~mask) 12269 return -EINVAL; 12270 12271 mask &= (ADVERTISED_1000baseT_Half | 12272 ADVERTISED_1000baseT_Full | 12273 ADVERTISED_100baseT_Half | 12274 ADVERTISED_100baseT_Full | 12275 ADVERTISED_10baseT_Half | 12276 ADVERTISED_10baseT_Full); 12277 12278 advertising &= mask; 12279 } else { 12280 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { 12281 if (speed != SPEED_1000) 12282 return -EINVAL; 12283 12284 if (cmd->base.duplex != DUPLEX_FULL) 12285 return -EINVAL; 12286 } else { 12287 if (speed != SPEED_100 && 12288 speed != SPEED_10) 12289 return -EINVAL; 12290 } 12291 } 12292 12293 tg3_full_lock(tp, 0); 12294 12295 tp->link_config.autoneg = cmd->base.autoneg; 12296 if (cmd->base.autoneg == AUTONEG_ENABLE) { 12297 tp->link_config.advertising = (advertising | 12298 ADVERTISED_Autoneg); 12299 tp->link_config.speed = SPEED_UNKNOWN; 12300 tp->link_config.duplex = DUPLEX_UNKNOWN; 12301 } else { 12302 tp->link_config.advertising = 0; 12303 tp->link_config.speed = speed; 12304 tp->link_config.duplex = cmd->base.duplex; 12305 } 12306 12307 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12308 12309 tg3_warn_mgmt_link_flap(tp); 12310 12311 if (netif_running(dev)) 12312 tg3_setup_phy(tp, true); 12313 12314 tg3_full_unlock(tp); 12315 12316 return 0; 12317 } 12318 12319 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 12320 { 12321 struct tg3 *tp = netdev_priv(dev); 12322 12323 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 12324 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 12325 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version)); 12326 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info)); 12327 } 12328 12329 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12330 { 12331 struct tg3 *tp = netdev_priv(dev); 12332 12333 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) 12334 wol->supported = WAKE_MAGIC; 12335 else 12336 wol->supported = 0; 12337 wol->wolopts = 0; 12338 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) 12339 wol->wolopts = WAKE_MAGIC; 12340 memset(&wol->sopass, 0, sizeof(wol->sopass)); 12341 } 12342 12343 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12344 { 12345 struct tg3 *tp = netdev_priv(dev); 12346 struct device *dp = &tp->pdev->dev; 12347 12348 if (wol->wolopts & ~WAKE_MAGIC) 12349 return -EINVAL; 12350 if ((wol->wolopts & WAKE_MAGIC) && 12351 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) 12352 return -EINVAL; 12353 12354 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); 12355 12356 if (device_may_wakeup(dp)) 12357 tg3_flag_set(tp, WOL_ENABLE); 12358 else 12359 tg3_flag_clear(tp, WOL_ENABLE); 12360 12361 return 0; 12362 } 12363 12364 static u32 tg3_get_msglevel(struct net_device *dev) 12365 { 12366 struct tg3 *tp = netdev_priv(dev); 12367 return tp->msg_enable; 12368 } 12369 12370 static void tg3_set_msglevel(struct net_device *dev, u32 value) 12371 { 12372 struct tg3 *tp = netdev_priv(dev); 12373 tp->msg_enable = value; 12374 } 12375 12376 static int tg3_nway_reset(struct net_device *dev) 12377 { 12378 struct tg3 *tp = netdev_priv(dev); 12379 int r; 12380 12381 if (!netif_running(dev)) 12382 return -EAGAIN; 12383 12384 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 12385 return -EINVAL; 12386 12387 tg3_warn_mgmt_link_flap(tp); 12388 12389 if (tg3_flag(tp, USE_PHYLIB)) { 12390 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12391 return -EAGAIN; 12392 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 12393 } else { 12394 u32 bmcr; 12395 12396 spin_lock_bh(&tp->lock); 12397 r = -EINVAL; 12398 tg3_readphy(tp, MII_BMCR, &bmcr); 12399 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 12400 ((bmcr & BMCR_ANENABLE) || 12401 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { 12402 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | 12403 BMCR_ANENABLE); 12404 r = 0; 12405 } 12406 spin_unlock_bh(&tp->lock); 12407 } 12408 12409 return r; 12410 } 12411 12412 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 12413 { 12414 struct tg3 *tp = netdev_priv(dev); 12415 12416 ering->rx_max_pending = tp->rx_std_ring_mask; 12417 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12418 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; 12419 else 12420 ering->rx_jumbo_max_pending = 0; 12421 12422 ering->tx_max_pending = TG3_TX_RING_SIZE - 1; 12423 12424 ering->rx_pending = tp->rx_pending; 12425 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12426 ering->rx_jumbo_pending = tp->rx_jumbo_pending; 12427 else 12428 ering->rx_jumbo_pending = 0; 12429 12430 ering->tx_pending = tp->napi[0].tx_pending; 12431 } 12432 12433 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 12434 { 12435 struct tg3 *tp = netdev_priv(dev); 12436 int i, irq_sync = 0, err = 0; 12437 12438 if ((ering->rx_pending > tp->rx_std_ring_mask) || 12439 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || 12440 (ering->tx_pending > TG3_TX_RING_SIZE - 1) || 12441 (ering->tx_pending <= MAX_SKB_FRAGS) || 12442 (tg3_flag(tp, TSO_BUG) && 12443 (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) 12444 return -EINVAL; 12445 12446 if (netif_running(dev)) { 12447 tg3_phy_stop(tp); 12448 tg3_netif_stop(tp); 12449 irq_sync = 1; 12450 } 12451 12452 tg3_full_lock(tp, irq_sync); 12453 12454 tp->rx_pending = ering->rx_pending; 12455 12456 if (tg3_flag(tp, MAX_RXPEND_64) && 12457 tp->rx_pending > 63) 12458 tp->rx_pending = 63; 12459 12460 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12461 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 12462 12463 for (i = 0; i < tp->irq_max; i++) 12464 tp->napi[i].tx_pending = ering->tx_pending; 12465 12466 if (netif_running(dev)) { 12467 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12468 err = tg3_restart_hw(tp, false); 12469 if (!err) 12470 tg3_netif_start(tp); 12471 } 12472 12473 tg3_full_unlock(tp); 12474 12475 if (irq_sync && !err) 12476 tg3_phy_start(tp); 12477 12478 return err; 12479 } 12480 12481 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12482 { 12483 struct tg3 *tp = netdev_priv(dev); 12484 12485 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); 12486 12487 if (tp->link_config.flowctrl & FLOW_CTRL_RX) 12488 epause->rx_pause = 1; 12489 else 12490 epause->rx_pause = 0; 12491 12492 if (tp->link_config.flowctrl & FLOW_CTRL_TX) 12493 epause->tx_pause = 1; 12494 else 12495 epause->tx_pause = 0; 12496 } 12497 12498 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12499 { 12500 struct tg3 *tp = netdev_priv(dev); 12501 int err = 0; 12502 12503 if (tp->link_config.autoneg == AUTONEG_ENABLE) 12504 tg3_warn_mgmt_link_flap(tp); 12505 12506 if (tg3_flag(tp, USE_PHYLIB)) { 12507 struct phy_device *phydev; 12508 12509 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12510 12511 if (!phy_validate_pause(phydev, epause)) 12512 return -EINVAL; 12513 12514 tp->link_config.flowctrl = 0; 12515 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); 12516 if (epause->rx_pause) { 12517 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12518 12519 if (epause->tx_pause) { 12520 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12521 } 12522 } else if (epause->tx_pause) { 12523 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12524 } 12525 12526 if (epause->autoneg) 12527 tg3_flag_set(tp, PAUSE_AUTONEG); 12528 else 12529 tg3_flag_clear(tp, PAUSE_AUTONEG); 12530 12531 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 12532 if (phydev->autoneg) { 12533 /* phy_set_asym_pause() will 12534 * renegotiate the link to inform our 12535 * link partner of our flow control 12536 * settings, even if the flow control 12537 * is forced. Let tg3_adjust_link() 12538 * do the final flow control setup. 12539 */ 12540 return 0; 12541 } 12542 12543 if (!epause->autoneg) 12544 tg3_setup_flow_control(tp, 0, 0); 12545 } 12546 } else { 12547 int irq_sync = 0; 12548 12549 if (netif_running(dev)) { 12550 tg3_netif_stop(tp); 12551 irq_sync = 1; 12552 } 12553 12554 tg3_full_lock(tp, irq_sync); 12555 12556 if (epause->autoneg) 12557 tg3_flag_set(tp, PAUSE_AUTONEG); 12558 else 12559 tg3_flag_clear(tp, PAUSE_AUTONEG); 12560 if (epause->rx_pause) 12561 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12562 else 12563 tp->link_config.flowctrl &= ~FLOW_CTRL_RX; 12564 if (epause->tx_pause) 12565 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12566 else 12567 tp->link_config.flowctrl &= ~FLOW_CTRL_TX; 12568 12569 if (netif_running(dev)) { 12570 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12571 err = tg3_restart_hw(tp, false); 12572 if (!err) 12573 tg3_netif_start(tp); 12574 } 12575 12576 tg3_full_unlock(tp); 12577 } 12578 12579 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12580 12581 return err; 12582 } 12583 12584 static int tg3_get_sset_count(struct net_device *dev, int sset) 12585 { 12586 switch (sset) { 12587 case ETH_SS_TEST: 12588 return TG3_NUM_TEST; 12589 case ETH_SS_STATS: 12590 return TG3_NUM_STATS; 12591 default: 12592 return -EOPNOTSUPP; 12593 } 12594 } 12595 12596 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 12597 u32 *rules __always_unused) 12598 { 12599 struct tg3 *tp = netdev_priv(dev); 12600 12601 if (!tg3_flag(tp, SUPPORT_MSIX)) 12602 return -EOPNOTSUPP; 12603 12604 switch (info->cmd) { 12605 case ETHTOOL_GRXRINGS: 12606 if (netif_running(tp->dev)) 12607 info->data = tp->rxq_cnt; 12608 else { 12609 info->data = num_online_cpus(); 12610 if (info->data > TG3_RSS_MAX_NUM_QS) 12611 info->data = TG3_RSS_MAX_NUM_QS; 12612 } 12613 12614 return 0; 12615 12616 default: 12617 return -EOPNOTSUPP; 12618 } 12619 } 12620 12621 static u32 tg3_get_rxfh_indir_size(struct net_device *dev) 12622 { 12623 u32 size = 0; 12624 struct tg3 *tp = netdev_priv(dev); 12625 12626 if (tg3_flag(tp, SUPPORT_MSIX)) 12627 size = TG3_RSS_INDIR_TBL_SIZE; 12628 12629 return size; 12630 } 12631 12632 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) 12633 { 12634 struct tg3 *tp = netdev_priv(dev); 12635 int i; 12636 12637 if (hfunc) 12638 *hfunc = ETH_RSS_HASH_TOP; 12639 if (!indir) 12640 return 0; 12641 12642 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12643 indir[i] = tp->rss_ind_tbl[i]; 12644 12645 return 0; 12646 } 12647 12648 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, 12649 const u8 hfunc) 12650 { 12651 struct tg3 *tp = netdev_priv(dev); 12652 size_t i; 12653 12654 /* We require at least one supported parameter to be changed and no 12655 * change in any of the unsupported parameters 12656 */ 12657 if (key || 12658 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 12659 return -EOPNOTSUPP; 12660 12661 if (!indir) 12662 return 0; 12663 12664 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12665 tp->rss_ind_tbl[i] = indir[i]; 12666 12667 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS)) 12668 return 0; 12669 12670 /* It is legal to write the indirection 12671 * table while the device is running. 12672 */ 12673 tg3_full_lock(tp, 0); 12674 tg3_rss_write_indir_tbl(tp); 12675 tg3_full_unlock(tp); 12676 12677 return 0; 12678 } 12679 12680 static void tg3_get_channels(struct net_device *dev, 12681 struct ethtool_channels *channel) 12682 { 12683 struct tg3 *tp = netdev_priv(dev); 12684 u32 deflt_qs = netif_get_num_default_rss_queues(); 12685 12686 channel->max_rx = tp->rxq_max; 12687 channel->max_tx = tp->txq_max; 12688 12689 if (netif_running(dev)) { 12690 channel->rx_count = tp->rxq_cnt; 12691 channel->tx_count = tp->txq_cnt; 12692 } else { 12693 if (tp->rxq_req) 12694 channel->rx_count = tp->rxq_req; 12695 else 12696 channel->rx_count = min(deflt_qs, tp->rxq_max); 12697 12698 if (tp->txq_req) 12699 channel->tx_count = tp->txq_req; 12700 else 12701 channel->tx_count = min(deflt_qs, tp->txq_max); 12702 } 12703 } 12704 12705 static int tg3_set_channels(struct net_device *dev, 12706 struct ethtool_channels *channel) 12707 { 12708 struct tg3 *tp = netdev_priv(dev); 12709 12710 if (!tg3_flag(tp, SUPPORT_MSIX)) 12711 return -EOPNOTSUPP; 12712 12713 if (channel->rx_count > tp->rxq_max || 12714 channel->tx_count > tp->txq_max) 12715 return -EINVAL; 12716 12717 tp->rxq_req = channel->rx_count; 12718 tp->txq_req = channel->tx_count; 12719 12720 if (!netif_running(dev)) 12721 return 0; 12722 12723 tg3_stop(tp); 12724 12725 tg3_carrier_off(tp); 12726 12727 tg3_start(tp, true, false, false); 12728 12729 return 0; 12730 } 12731 12732 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 12733 { 12734 switch (stringset) { 12735 case ETH_SS_STATS: 12736 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); 12737 break; 12738 case ETH_SS_TEST: 12739 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); 12740 break; 12741 default: 12742 WARN_ON(1); /* we need a WARN() */ 12743 break; 12744 } 12745 } 12746 12747 static int tg3_set_phys_id(struct net_device *dev, 12748 enum ethtool_phys_id_state state) 12749 { 12750 struct tg3 *tp = netdev_priv(dev); 12751 12752 if (!netif_running(tp->dev)) 12753 return -EAGAIN; 12754 12755 switch (state) { 12756 case ETHTOOL_ID_ACTIVE: 12757 return 1; /* cycle on/off once per second */ 12758 12759 case ETHTOOL_ID_ON: 12760 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12761 LED_CTRL_1000MBPS_ON | 12762 LED_CTRL_100MBPS_ON | 12763 LED_CTRL_10MBPS_ON | 12764 LED_CTRL_TRAFFIC_OVERRIDE | 12765 LED_CTRL_TRAFFIC_BLINK | 12766 LED_CTRL_TRAFFIC_LED); 12767 break; 12768 12769 case ETHTOOL_ID_OFF: 12770 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12771 LED_CTRL_TRAFFIC_OVERRIDE); 12772 break; 12773 12774 case ETHTOOL_ID_INACTIVE: 12775 tw32(MAC_LED_CTRL, tp->led_ctrl); 12776 break; 12777 } 12778 12779 return 0; 12780 } 12781 12782 static void tg3_get_ethtool_stats(struct net_device *dev, 12783 struct ethtool_stats *estats, u64 *tmp_stats) 12784 { 12785 struct tg3 *tp = netdev_priv(dev); 12786 12787 if (tp->hw_stats) 12788 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats); 12789 else 12790 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats)); 12791 } 12792 12793 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen) 12794 { 12795 int i; 12796 __be32 *buf; 12797 u32 offset = 0, len = 0; 12798 u32 magic, val; 12799 12800 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) 12801 return NULL; 12802 12803 if (magic == TG3_EEPROM_MAGIC) { 12804 for (offset = TG3_NVM_DIR_START; 12805 offset < TG3_NVM_DIR_END; 12806 offset += TG3_NVM_DIRENT_SIZE) { 12807 if (tg3_nvram_read(tp, offset, &val)) 12808 return NULL; 12809 12810 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == 12811 TG3_NVM_DIRTYPE_EXTVPD) 12812 break; 12813 } 12814 12815 if (offset != TG3_NVM_DIR_END) { 12816 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; 12817 if (tg3_nvram_read(tp, offset + 4, &offset)) 12818 return NULL; 12819 12820 offset = tg3_nvram_logical_addr(tp, offset); 12821 } 12822 } 12823 12824 if (!offset || !len) { 12825 offset = TG3_NVM_VPD_OFF; 12826 len = TG3_NVM_VPD_LEN; 12827 } 12828 12829 buf = kmalloc(len, GFP_KERNEL); 12830 if (buf == NULL) 12831 return NULL; 12832 12833 if (magic == TG3_EEPROM_MAGIC) { 12834 for (i = 0; i < len; i += 4) { 12835 /* The data is in little-endian format in NVRAM. 12836 * Use the big-endian read routines to preserve 12837 * the byte order as it exists in NVRAM. 12838 */ 12839 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) 12840 goto error; 12841 } 12842 } else { 12843 u8 *ptr; 12844 ssize_t cnt; 12845 unsigned int pos = 0; 12846 12847 ptr = (u8 *)&buf[0]; 12848 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) { 12849 cnt = pci_read_vpd(tp->pdev, pos, 12850 len - pos, ptr); 12851 if (cnt == -ETIMEDOUT || cnt == -EINTR) 12852 cnt = 0; 12853 else if (cnt < 0) 12854 goto error; 12855 } 12856 if (pos != len) 12857 goto error; 12858 } 12859 12860 *vpdlen = len; 12861 12862 return buf; 12863 12864 error: 12865 kfree(buf); 12866 return NULL; 12867 } 12868 12869 #define NVRAM_TEST_SIZE 0x100 12870 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 12871 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 12872 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c 12873 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 12874 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 12875 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 12876 #define NVRAM_SELFBOOT_HW_SIZE 0x20 12877 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c 12878 12879 static int tg3_test_nvram(struct tg3 *tp) 12880 { 12881 u32 csum, magic, len; 12882 __be32 *buf; 12883 int i, j, k, err = 0, size; 12884 12885 if (tg3_flag(tp, NO_NVRAM)) 12886 return 0; 12887 12888 if (tg3_nvram_read(tp, 0, &magic) != 0) 12889 return -EIO; 12890 12891 if (magic == TG3_EEPROM_MAGIC) 12892 size = NVRAM_TEST_SIZE; 12893 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { 12894 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == 12895 TG3_EEPROM_SB_FORMAT_1) { 12896 switch (magic & TG3_EEPROM_SB_REVISION_MASK) { 12897 case TG3_EEPROM_SB_REVISION_0: 12898 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; 12899 break; 12900 case TG3_EEPROM_SB_REVISION_2: 12901 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; 12902 break; 12903 case TG3_EEPROM_SB_REVISION_3: 12904 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; 12905 break; 12906 case TG3_EEPROM_SB_REVISION_4: 12907 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; 12908 break; 12909 case TG3_EEPROM_SB_REVISION_5: 12910 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; 12911 break; 12912 case TG3_EEPROM_SB_REVISION_6: 12913 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; 12914 break; 12915 default: 12916 return -EIO; 12917 } 12918 } else 12919 return 0; 12920 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 12921 size = NVRAM_SELFBOOT_HW_SIZE; 12922 else 12923 return -EIO; 12924 12925 buf = kmalloc(size, GFP_KERNEL); 12926 if (buf == NULL) 12927 return -ENOMEM; 12928 12929 err = -EIO; 12930 for (i = 0, j = 0; i < size; i += 4, j++) { 12931 err = tg3_nvram_read_be32(tp, i, &buf[j]); 12932 if (err) 12933 break; 12934 } 12935 if (i < size) 12936 goto out; 12937 12938 /* Selfboot format */ 12939 magic = be32_to_cpu(buf[0]); 12940 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == 12941 TG3_EEPROM_MAGIC_FW) { 12942 u8 *buf8 = (u8 *) buf, csum8 = 0; 12943 12944 if ((magic & TG3_EEPROM_SB_REVISION_MASK) == 12945 TG3_EEPROM_SB_REVISION_2) { 12946 /* For rev 2, the csum doesn't include the MBA. */ 12947 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) 12948 csum8 += buf8[i]; 12949 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) 12950 csum8 += buf8[i]; 12951 } else { 12952 for (i = 0; i < size; i++) 12953 csum8 += buf8[i]; 12954 } 12955 12956 if (csum8 == 0) { 12957 err = 0; 12958 goto out; 12959 } 12960 12961 err = -EIO; 12962 goto out; 12963 } 12964 12965 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == 12966 TG3_EEPROM_MAGIC_HW) { 12967 u8 data[NVRAM_SELFBOOT_DATA_SIZE]; 12968 u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; 12969 u8 *buf8 = (u8 *) buf; 12970 12971 /* Separate the parity bits and the data bytes. */ 12972 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { 12973 if ((i == 0) || (i == 8)) { 12974 int l; 12975 u8 msk; 12976 12977 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) 12978 parity[k++] = buf8[i] & msk; 12979 i++; 12980 } else if (i == 16) { 12981 int l; 12982 u8 msk; 12983 12984 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) 12985 parity[k++] = buf8[i] & msk; 12986 i++; 12987 12988 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) 12989 parity[k++] = buf8[i] & msk; 12990 i++; 12991 } 12992 data[j++] = buf8[i]; 12993 } 12994 12995 err = -EIO; 12996 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { 12997 u8 hw8 = hweight8(data[i]); 12998 12999 if ((hw8 & 0x1) && parity[i]) 13000 goto out; 13001 else if (!(hw8 & 0x1) && !parity[i]) 13002 goto out; 13003 } 13004 err = 0; 13005 goto out; 13006 } 13007 13008 err = -EIO; 13009 13010 /* Bootstrap checksum at offset 0x10 */ 13011 csum = calc_crc((unsigned char *) buf, 0x10); 13012 if (csum != le32_to_cpu(buf[0x10/4])) 13013 goto out; 13014 13015 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ 13016 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); 13017 if (csum != le32_to_cpu(buf[0xfc/4])) 13018 goto out; 13019 13020 kfree(buf); 13021 13022 buf = tg3_vpd_readblock(tp, &len); 13023 if (!buf) 13024 return -ENOMEM; 13025 13026 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA); 13027 if (i > 0) { 13028 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]); 13029 if (j < 0) 13030 goto out; 13031 13032 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len) 13033 goto out; 13034 13035 i += PCI_VPD_LRDT_TAG_SIZE; 13036 j = pci_vpd_find_info_keyword((u8 *)buf, i, j, 13037 PCI_VPD_RO_KEYWORD_CHKSUM); 13038 if (j > 0) { 13039 u8 csum8 = 0; 13040 13041 j += PCI_VPD_INFO_FLD_HDR_SIZE; 13042 13043 for (i = 0; i <= j; i++) 13044 csum8 += ((u8 *)buf)[i]; 13045 13046 if (csum8) 13047 goto out; 13048 } 13049 } 13050 13051 err = 0; 13052 13053 out: 13054 kfree(buf); 13055 return err; 13056 } 13057 13058 #define TG3_SERDES_TIMEOUT_SEC 2 13059 #define TG3_COPPER_TIMEOUT_SEC 6 13060 13061 static int tg3_test_link(struct tg3 *tp) 13062 { 13063 int i, max; 13064 13065 if (!netif_running(tp->dev)) 13066 return -ENODEV; 13067 13068 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 13069 max = TG3_SERDES_TIMEOUT_SEC; 13070 else 13071 max = TG3_COPPER_TIMEOUT_SEC; 13072 13073 for (i = 0; i < max; i++) { 13074 if (tp->link_up) 13075 return 0; 13076 13077 if (msleep_interruptible(1000)) 13078 break; 13079 } 13080 13081 return -EIO; 13082 } 13083 13084 /* Only test the commonly used registers */ 13085 static int tg3_test_registers(struct tg3 *tp) 13086 { 13087 int i, is_5705, is_5750; 13088 u32 offset, read_mask, write_mask, val, save_val, read_val; 13089 static struct { 13090 u16 offset; 13091 u16 flags; 13092 #define TG3_FL_5705 0x1 13093 #define TG3_FL_NOT_5705 0x2 13094 #define TG3_FL_NOT_5788 0x4 13095 #define TG3_FL_NOT_5750 0x8 13096 u32 read_mask; 13097 u32 write_mask; 13098 } reg_tbl[] = { 13099 /* MAC Control Registers */ 13100 { MAC_MODE, TG3_FL_NOT_5705, 13101 0x00000000, 0x00ef6f8c }, 13102 { MAC_MODE, TG3_FL_5705, 13103 0x00000000, 0x01ef6b8c }, 13104 { MAC_STATUS, TG3_FL_NOT_5705, 13105 0x03800107, 0x00000000 }, 13106 { MAC_STATUS, TG3_FL_5705, 13107 0x03800100, 0x00000000 }, 13108 { MAC_ADDR_0_HIGH, 0x0000, 13109 0x00000000, 0x0000ffff }, 13110 { MAC_ADDR_0_LOW, 0x0000, 13111 0x00000000, 0xffffffff }, 13112 { MAC_RX_MTU_SIZE, 0x0000, 13113 0x00000000, 0x0000ffff }, 13114 { MAC_TX_MODE, 0x0000, 13115 0x00000000, 0x00000070 }, 13116 { MAC_TX_LENGTHS, 0x0000, 13117 0x00000000, 0x00003fff }, 13118 { MAC_RX_MODE, TG3_FL_NOT_5705, 13119 0x00000000, 0x000007fc }, 13120 { MAC_RX_MODE, TG3_FL_5705, 13121 0x00000000, 0x000007dc }, 13122 { MAC_HASH_REG_0, 0x0000, 13123 0x00000000, 0xffffffff }, 13124 { MAC_HASH_REG_1, 0x0000, 13125 0x00000000, 0xffffffff }, 13126 { MAC_HASH_REG_2, 0x0000, 13127 0x00000000, 0xffffffff }, 13128 { MAC_HASH_REG_3, 0x0000, 13129 0x00000000, 0xffffffff }, 13130 13131 /* Receive Data and Receive BD Initiator Control Registers. */ 13132 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, 13133 0x00000000, 0xffffffff }, 13134 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, 13135 0x00000000, 0xffffffff }, 13136 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, 13137 0x00000000, 0x00000003 }, 13138 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, 13139 0x00000000, 0xffffffff }, 13140 { RCVDBDI_STD_BD+0, 0x0000, 13141 0x00000000, 0xffffffff }, 13142 { RCVDBDI_STD_BD+4, 0x0000, 13143 0x00000000, 0xffffffff }, 13144 { RCVDBDI_STD_BD+8, 0x0000, 13145 0x00000000, 0xffff0002 }, 13146 { RCVDBDI_STD_BD+0xc, 0x0000, 13147 0x00000000, 0xffffffff }, 13148 13149 /* Receive BD Initiator Control Registers. */ 13150 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, 13151 0x00000000, 0xffffffff }, 13152 { RCVBDI_STD_THRESH, TG3_FL_5705, 13153 0x00000000, 0x000003ff }, 13154 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, 13155 0x00000000, 0xffffffff }, 13156 13157 /* Host Coalescing Control Registers. */ 13158 { HOSTCC_MODE, TG3_FL_NOT_5705, 13159 0x00000000, 0x00000004 }, 13160 { HOSTCC_MODE, TG3_FL_5705, 13161 0x00000000, 0x000000f6 }, 13162 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, 13163 0x00000000, 0xffffffff }, 13164 { HOSTCC_RXCOL_TICKS, TG3_FL_5705, 13165 0x00000000, 0x000003ff }, 13166 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, 13167 0x00000000, 0xffffffff }, 13168 { HOSTCC_TXCOL_TICKS, TG3_FL_5705, 13169 0x00000000, 0x000003ff }, 13170 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, 13171 0x00000000, 0xffffffff }, 13172 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13173 0x00000000, 0x000000ff }, 13174 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, 13175 0x00000000, 0xffffffff }, 13176 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13177 0x00000000, 0x000000ff }, 13178 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, 13179 0x00000000, 0xffffffff }, 13180 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, 13181 0x00000000, 0xffffffff }, 13182 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13183 0x00000000, 0xffffffff }, 13184 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13185 0x00000000, 0x000000ff }, 13186 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13187 0x00000000, 0xffffffff }, 13188 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13189 0x00000000, 0x000000ff }, 13190 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, 13191 0x00000000, 0xffffffff }, 13192 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, 13193 0x00000000, 0xffffffff }, 13194 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, 13195 0x00000000, 0xffffffff }, 13196 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, 13197 0x00000000, 0xffffffff }, 13198 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, 13199 0x00000000, 0xffffffff }, 13200 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, 13201 0xffffffff, 0x00000000 }, 13202 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, 13203 0xffffffff, 0x00000000 }, 13204 13205 /* Buffer Manager Control Registers. */ 13206 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, 13207 0x00000000, 0x007fff80 }, 13208 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, 13209 0x00000000, 0x007fffff }, 13210 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, 13211 0x00000000, 0x0000003f }, 13212 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, 13213 0x00000000, 0x000001ff }, 13214 { BUFMGR_MB_HIGH_WATER, 0x0000, 13215 0x00000000, 0x000001ff }, 13216 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, 13217 0xffffffff, 0x00000000 }, 13218 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, 13219 0xffffffff, 0x00000000 }, 13220 13221 /* Mailbox Registers */ 13222 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, 13223 0x00000000, 0x000001ff }, 13224 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, 13225 0x00000000, 0x000001ff }, 13226 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, 13227 0x00000000, 0x000007ff }, 13228 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, 13229 0x00000000, 0x000001ff }, 13230 13231 { 0xffff, 0x0000, 0x00000000, 0x00000000 }, 13232 }; 13233 13234 is_5705 = is_5750 = 0; 13235 if (tg3_flag(tp, 5705_PLUS)) { 13236 is_5705 = 1; 13237 if (tg3_flag(tp, 5750_PLUS)) 13238 is_5750 = 1; 13239 } 13240 13241 for (i = 0; reg_tbl[i].offset != 0xffff; i++) { 13242 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) 13243 continue; 13244 13245 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) 13246 continue; 13247 13248 if (tg3_flag(tp, IS_5788) && 13249 (reg_tbl[i].flags & TG3_FL_NOT_5788)) 13250 continue; 13251 13252 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) 13253 continue; 13254 13255 offset = (u32) reg_tbl[i].offset; 13256 read_mask = reg_tbl[i].read_mask; 13257 write_mask = reg_tbl[i].write_mask; 13258 13259 /* Save the original register content */ 13260 save_val = tr32(offset); 13261 13262 /* Determine the read-only value. */ 13263 read_val = save_val & read_mask; 13264 13265 /* Write zero to the register, then make sure the read-only bits 13266 * are not changed and the read/write bits are all zeros. 13267 */ 13268 tw32(offset, 0); 13269 13270 val = tr32(offset); 13271 13272 /* Test the read-only and read/write bits. */ 13273 if (((val & read_mask) != read_val) || (val & write_mask)) 13274 goto out; 13275 13276 /* Write ones to all the bits defined by RdMask and WrMask, then 13277 * make sure the read-only bits are not changed and the 13278 * read/write bits are all ones. 13279 */ 13280 tw32(offset, read_mask | write_mask); 13281 13282 val = tr32(offset); 13283 13284 /* Test the read-only bits. */ 13285 if ((val & read_mask) != read_val) 13286 goto out; 13287 13288 /* Test the read/write bits. */ 13289 if ((val & write_mask) != write_mask) 13290 goto out; 13291 13292 tw32(offset, save_val); 13293 } 13294 13295 return 0; 13296 13297 out: 13298 if (netif_msg_hw(tp)) 13299 netdev_err(tp->dev, 13300 "Register test failed at offset %x\n", offset); 13301 tw32(offset, save_val); 13302 return -EIO; 13303 } 13304 13305 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) 13306 { 13307 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; 13308 int i; 13309 u32 j; 13310 13311 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { 13312 for (j = 0; j < len; j += 4) { 13313 u32 val; 13314 13315 tg3_write_mem(tp, offset + j, test_pattern[i]); 13316 tg3_read_mem(tp, offset + j, &val); 13317 if (val != test_pattern[i]) 13318 return -EIO; 13319 } 13320 } 13321 return 0; 13322 } 13323 13324 static int tg3_test_memory(struct tg3 *tp) 13325 { 13326 static struct mem_entry { 13327 u32 offset; 13328 u32 len; 13329 } mem_tbl_570x[] = { 13330 { 0x00000000, 0x00b50}, 13331 { 0x00002000, 0x1c000}, 13332 { 0xffffffff, 0x00000} 13333 }, mem_tbl_5705[] = { 13334 { 0x00000100, 0x0000c}, 13335 { 0x00000200, 0x00008}, 13336 { 0x00004000, 0x00800}, 13337 { 0x00006000, 0x01000}, 13338 { 0x00008000, 0x02000}, 13339 { 0x00010000, 0x0e000}, 13340 { 0xffffffff, 0x00000} 13341 }, mem_tbl_5755[] = { 13342 { 0x00000200, 0x00008}, 13343 { 0x00004000, 0x00800}, 13344 { 0x00006000, 0x00800}, 13345 { 0x00008000, 0x02000}, 13346 { 0x00010000, 0x0c000}, 13347 { 0xffffffff, 0x00000} 13348 }, mem_tbl_5906[] = { 13349 { 0x00000200, 0x00008}, 13350 { 0x00004000, 0x00400}, 13351 { 0x00006000, 0x00400}, 13352 { 0x00008000, 0x01000}, 13353 { 0x00010000, 0x01000}, 13354 { 0xffffffff, 0x00000} 13355 }, mem_tbl_5717[] = { 13356 { 0x00000200, 0x00008}, 13357 { 0x00010000, 0x0a000}, 13358 { 0x00020000, 0x13c00}, 13359 { 0xffffffff, 0x00000} 13360 }, mem_tbl_57765[] = { 13361 { 0x00000200, 0x00008}, 13362 { 0x00004000, 0x00800}, 13363 { 0x00006000, 0x09800}, 13364 { 0x00010000, 0x0a000}, 13365 { 0xffffffff, 0x00000} 13366 }; 13367 struct mem_entry *mem_tbl; 13368 int err = 0; 13369 int i; 13370 13371 if (tg3_flag(tp, 5717_PLUS)) 13372 mem_tbl = mem_tbl_5717; 13373 else if (tg3_flag(tp, 57765_CLASS) || 13374 tg3_asic_rev(tp) == ASIC_REV_5762) 13375 mem_tbl = mem_tbl_57765; 13376 else if (tg3_flag(tp, 5755_PLUS)) 13377 mem_tbl = mem_tbl_5755; 13378 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 13379 mem_tbl = mem_tbl_5906; 13380 else if (tg3_flag(tp, 5705_PLUS)) 13381 mem_tbl = mem_tbl_5705; 13382 else 13383 mem_tbl = mem_tbl_570x; 13384 13385 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { 13386 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); 13387 if (err) 13388 break; 13389 } 13390 13391 return err; 13392 } 13393 13394 #define TG3_TSO_MSS 500 13395 13396 #define TG3_TSO_IP_HDR_LEN 20 13397 #define TG3_TSO_TCP_HDR_LEN 20 13398 #define TG3_TSO_TCP_OPT_LEN 12 13399 13400 static const u8 tg3_tso_header[] = { 13401 0x08, 0x00, 13402 0x45, 0x00, 0x00, 0x00, 13403 0x00, 0x00, 0x40, 0x00, 13404 0x40, 0x06, 0x00, 0x00, 13405 0x0a, 0x00, 0x00, 0x01, 13406 0x0a, 0x00, 0x00, 0x02, 13407 0x0d, 0x00, 0xe0, 0x00, 13408 0x00, 0x00, 0x01, 0x00, 13409 0x00, 0x00, 0x02, 0x00, 13410 0x80, 0x10, 0x10, 0x00, 13411 0x14, 0x09, 0x00, 0x00, 13412 0x01, 0x01, 0x08, 0x0a, 13413 0x11, 0x11, 0x11, 0x11, 13414 0x11, 0x11, 0x11, 0x11, 13415 }; 13416 13417 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) 13418 { 13419 u32 rx_start_idx, rx_idx, tx_idx, opaque_key; 13420 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; 13421 u32 budget; 13422 struct sk_buff *skb; 13423 u8 *tx_data, *rx_data; 13424 dma_addr_t map; 13425 int num_pkts, tx_len, rx_len, i, err; 13426 struct tg3_rx_buffer_desc *desc; 13427 struct tg3_napi *tnapi, *rnapi; 13428 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 13429 13430 tnapi = &tp->napi[0]; 13431 rnapi = &tp->napi[0]; 13432 if (tp->irq_cnt > 1) { 13433 if (tg3_flag(tp, ENABLE_RSS)) 13434 rnapi = &tp->napi[1]; 13435 if (tg3_flag(tp, ENABLE_TSS)) 13436 tnapi = &tp->napi[1]; 13437 } 13438 coal_now = tnapi->coal_now | rnapi->coal_now; 13439 13440 err = -EIO; 13441 13442 tx_len = pktsz; 13443 skb = netdev_alloc_skb(tp->dev, tx_len); 13444 if (!skb) 13445 return -ENOMEM; 13446 13447 tx_data = skb_put(skb, tx_len); 13448 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN); 13449 memset(tx_data + ETH_ALEN, 0x0, 8); 13450 13451 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); 13452 13453 if (tso_loopback) { 13454 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; 13455 13456 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + 13457 TG3_TSO_TCP_OPT_LEN; 13458 13459 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, 13460 sizeof(tg3_tso_header)); 13461 mss = TG3_TSO_MSS; 13462 13463 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); 13464 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); 13465 13466 /* Set the total length field in the IP header */ 13467 iph->tot_len = htons((u16)(mss + hdr_len)); 13468 13469 base_flags = (TXD_FLAG_CPU_PRE_DMA | 13470 TXD_FLAG_CPU_POST_DMA); 13471 13472 if (tg3_flag(tp, HW_TSO_1) || 13473 tg3_flag(tp, HW_TSO_2) || 13474 tg3_flag(tp, HW_TSO_3)) { 13475 struct tcphdr *th; 13476 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; 13477 th = (struct tcphdr *)&tx_data[val]; 13478 th->check = 0; 13479 } else 13480 base_flags |= TXD_FLAG_TCPUDP_CSUM; 13481 13482 if (tg3_flag(tp, HW_TSO_3)) { 13483 mss |= (hdr_len & 0xc) << 12; 13484 if (hdr_len & 0x10) 13485 base_flags |= 0x00000010; 13486 base_flags |= (hdr_len & 0x3e0) << 5; 13487 } else if (tg3_flag(tp, HW_TSO_2)) 13488 mss |= hdr_len << 9; 13489 else if (tg3_flag(tp, HW_TSO_1) || 13490 tg3_asic_rev(tp) == ASIC_REV_5705) { 13491 mss |= (TG3_TSO_TCP_OPT_LEN << 9); 13492 } else { 13493 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); 13494 } 13495 13496 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); 13497 } else { 13498 num_pkts = 1; 13499 data_off = ETH_HLEN; 13500 13501 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 13502 tx_len > VLAN_ETH_FRAME_LEN) 13503 base_flags |= TXD_FLAG_JMB_PKT; 13504 } 13505 13506 for (i = data_off; i < tx_len; i++) 13507 tx_data[i] = (u8) (i & 0xff); 13508 13509 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); 13510 if (pci_dma_mapping_error(tp->pdev, map)) { 13511 dev_kfree_skb(skb); 13512 return -EIO; 13513 } 13514 13515 val = tnapi->tx_prod; 13516 tnapi->tx_buffers[val].skb = skb; 13517 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); 13518 13519 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13520 rnapi->coal_now); 13521 13522 udelay(10); 13523 13524 rx_start_idx = rnapi->hw_status->idx[0].rx_producer; 13525 13526 budget = tg3_tx_avail(tnapi); 13527 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, 13528 base_flags | TXD_FLAG_END, mss, 0)) { 13529 tnapi->tx_buffers[val].skb = NULL; 13530 dev_kfree_skb(skb); 13531 return -EIO; 13532 } 13533 13534 tnapi->tx_prod++; 13535 13536 /* Sync BD data before updating mailbox */ 13537 wmb(); 13538 13539 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); 13540 tr32_mailbox(tnapi->prodmbox); 13541 13542 udelay(10); 13543 13544 /* 350 usec to allow enough time on some 10/100 Mbps devices. */ 13545 for (i = 0; i < 35; i++) { 13546 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13547 coal_now); 13548 13549 udelay(10); 13550 13551 tx_idx = tnapi->hw_status->idx[0].tx_consumer; 13552 rx_idx = rnapi->hw_status->idx[0].rx_producer; 13553 if ((tx_idx == tnapi->tx_prod) && 13554 (rx_idx == (rx_start_idx + num_pkts))) 13555 break; 13556 } 13557 13558 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1); 13559 dev_kfree_skb(skb); 13560 13561 if (tx_idx != tnapi->tx_prod) 13562 goto out; 13563 13564 if (rx_idx != rx_start_idx + num_pkts) 13565 goto out; 13566 13567 val = data_off; 13568 while (rx_idx != rx_start_idx) { 13569 desc = &rnapi->rx_rcb[rx_start_idx++]; 13570 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 13571 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 13572 13573 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 13574 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) 13575 goto out; 13576 13577 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) 13578 - ETH_FCS_LEN; 13579 13580 if (!tso_loopback) { 13581 if (rx_len != tx_len) 13582 goto out; 13583 13584 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { 13585 if (opaque_key != RXD_OPAQUE_RING_STD) 13586 goto out; 13587 } else { 13588 if (opaque_key != RXD_OPAQUE_RING_JUMBO) 13589 goto out; 13590 } 13591 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 13592 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 13593 >> RXD_TCPCSUM_SHIFT != 0xffff) { 13594 goto out; 13595 } 13596 13597 if (opaque_key == RXD_OPAQUE_RING_STD) { 13598 rx_data = tpr->rx_std_buffers[desc_idx].data; 13599 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], 13600 mapping); 13601 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 13602 rx_data = tpr->rx_jmb_buffers[desc_idx].data; 13603 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], 13604 mapping); 13605 } else 13606 goto out; 13607 13608 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, 13609 PCI_DMA_FROMDEVICE); 13610 13611 rx_data += TG3_RX_OFFSET(tp); 13612 for (i = data_off; i < rx_len; i++, val++) { 13613 if (*(rx_data + i) != (u8) (val & 0xff)) 13614 goto out; 13615 } 13616 } 13617 13618 err = 0; 13619 13620 /* tg3_free_rings will unmap and free the rx_data */ 13621 out: 13622 return err; 13623 } 13624 13625 #define TG3_STD_LOOPBACK_FAILED 1 13626 #define TG3_JMB_LOOPBACK_FAILED 2 13627 #define TG3_TSO_LOOPBACK_FAILED 4 13628 #define TG3_LOOPBACK_FAILED \ 13629 (TG3_STD_LOOPBACK_FAILED | \ 13630 TG3_JMB_LOOPBACK_FAILED | \ 13631 TG3_TSO_LOOPBACK_FAILED) 13632 13633 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk) 13634 { 13635 int err = -EIO; 13636 u32 eee_cap; 13637 u32 jmb_pkt_sz = 9000; 13638 13639 if (tp->dma_limit) 13640 jmb_pkt_sz = tp->dma_limit - ETH_HLEN; 13641 13642 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; 13643 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 13644 13645 if (!netif_running(tp->dev)) { 13646 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13647 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13648 if (do_extlpbk) 13649 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13650 goto done; 13651 } 13652 13653 err = tg3_reset_hw(tp, true); 13654 if (err) { 13655 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13656 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13657 if (do_extlpbk) 13658 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13659 goto done; 13660 } 13661 13662 if (tg3_flag(tp, ENABLE_RSS)) { 13663 int i; 13664 13665 /* Reroute all rx packets to the 1st queue */ 13666 for (i = MAC_RSS_INDIR_TBL_0; 13667 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) 13668 tw32(i, 0x0); 13669 } 13670 13671 /* HW errata - mac loopback fails in some cases on 5780. 13672 * Normal traffic and PHY loopback are not affected by 13673 * errata. Also, the MAC loopback test is deprecated for 13674 * all newer ASIC revisions. 13675 */ 13676 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 13677 !tg3_flag(tp, CPMU_PRESENT)) { 13678 tg3_mac_loopback(tp, true); 13679 13680 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13681 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13682 13683 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13684 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13685 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13686 13687 tg3_mac_loopback(tp, false); 13688 } 13689 13690 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 13691 !tg3_flag(tp, USE_PHYLIB)) { 13692 int i; 13693 13694 tg3_phy_lpbk_set(tp, 0, false); 13695 13696 /* Wait for link */ 13697 for (i = 0; i < 100; i++) { 13698 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 13699 break; 13700 mdelay(1); 13701 } 13702 13703 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13704 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13705 if (tg3_flag(tp, TSO_CAPABLE) && 13706 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13707 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED; 13708 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13709 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13710 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13711 13712 if (do_extlpbk) { 13713 tg3_phy_lpbk_set(tp, 0, true); 13714 13715 /* All link indications report up, but the hardware 13716 * isn't really ready for about 20 msec. Double it 13717 * to be sure. 13718 */ 13719 mdelay(40); 13720 13721 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13722 data[TG3_EXT_LOOPB_TEST] |= 13723 TG3_STD_LOOPBACK_FAILED; 13724 if (tg3_flag(tp, TSO_CAPABLE) && 13725 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13726 data[TG3_EXT_LOOPB_TEST] |= 13727 TG3_TSO_LOOPBACK_FAILED; 13728 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13729 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13730 data[TG3_EXT_LOOPB_TEST] |= 13731 TG3_JMB_LOOPBACK_FAILED; 13732 } 13733 13734 /* Re-enable gphy autopowerdown. */ 13735 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 13736 tg3_phy_toggle_apd(tp, true); 13737 } 13738 13739 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] | 13740 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0; 13741 13742 done: 13743 tp->phy_flags |= eee_cap; 13744 13745 return err; 13746 } 13747 13748 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, 13749 u64 *data) 13750 { 13751 struct tg3 *tp = netdev_priv(dev); 13752 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; 13753 13754 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 13755 if (tg3_power_up(tp)) { 13756 etest->flags |= ETH_TEST_FL_FAILED; 13757 memset(data, 1, sizeof(u64) * TG3_NUM_TEST); 13758 return; 13759 } 13760 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 13761 } 13762 13763 memset(data, 0, sizeof(u64) * TG3_NUM_TEST); 13764 13765 if (tg3_test_nvram(tp) != 0) { 13766 etest->flags |= ETH_TEST_FL_FAILED; 13767 data[TG3_NVRAM_TEST] = 1; 13768 } 13769 if (!doextlpbk && tg3_test_link(tp)) { 13770 etest->flags |= ETH_TEST_FL_FAILED; 13771 data[TG3_LINK_TEST] = 1; 13772 } 13773 if (etest->flags & ETH_TEST_FL_OFFLINE) { 13774 int err, err2 = 0, irq_sync = 0; 13775 13776 if (netif_running(dev)) { 13777 tg3_phy_stop(tp); 13778 tg3_netif_stop(tp); 13779 irq_sync = 1; 13780 } 13781 13782 tg3_full_lock(tp, irq_sync); 13783 tg3_halt(tp, RESET_KIND_SUSPEND, 1); 13784 err = tg3_nvram_lock(tp); 13785 tg3_halt_cpu(tp, RX_CPU_BASE); 13786 if (!tg3_flag(tp, 5705_PLUS)) 13787 tg3_halt_cpu(tp, TX_CPU_BASE); 13788 if (!err) 13789 tg3_nvram_unlock(tp); 13790 13791 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 13792 tg3_phy_reset(tp); 13793 13794 if (tg3_test_registers(tp) != 0) { 13795 etest->flags |= ETH_TEST_FL_FAILED; 13796 data[TG3_REGISTER_TEST] = 1; 13797 } 13798 13799 if (tg3_test_memory(tp) != 0) { 13800 etest->flags |= ETH_TEST_FL_FAILED; 13801 data[TG3_MEMORY_TEST] = 1; 13802 } 13803 13804 if (doextlpbk) 13805 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 13806 13807 if (tg3_test_loopback(tp, data, doextlpbk)) 13808 etest->flags |= ETH_TEST_FL_FAILED; 13809 13810 tg3_full_unlock(tp); 13811 13812 if (tg3_test_interrupt(tp) != 0) { 13813 etest->flags |= ETH_TEST_FL_FAILED; 13814 data[TG3_INTERRUPT_TEST] = 1; 13815 } 13816 13817 tg3_full_lock(tp, 0); 13818 13819 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 13820 if (netif_running(dev)) { 13821 tg3_flag_set(tp, INIT_COMPLETE); 13822 err2 = tg3_restart_hw(tp, true); 13823 if (!err2) 13824 tg3_netif_start(tp); 13825 } 13826 13827 tg3_full_unlock(tp); 13828 13829 if (irq_sync && !err2) 13830 tg3_phy_start(tp); 13831 } 13832 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 13833 tg3_power_down_prepare(tp); 13834 13835 } 13836 13837 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 13838 { 13839 struct tg3 *tp = netdev_priv(dev); 13840 struct hwtstamp_config stmpconf; 13841 13842 if (!tg3_flag(tp, PTP_CAPABLE)) 13843 return -EOPNOTSUPP; 13844 13845 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) 13846 return -EFAULT; 13847 13848 if (stmpconf.flags) 13849 return -EINVAL; 13850 13851 if (stmpconf.tx_type != HWTSTAMP_TX_ON && 13852 stmpconf.tx_type != HWTSTAMP_TX_OFF) 13853 return -ERANGE; 13854 13855 switch (stmpconf.rx_filter) { 13856 case HWTSTAMP_FILTER_NONE: 13857 tp->rxptpctl = 0; 13858 break; 13859 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 13860 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13861 TG3_RX_PTP_CTL_ALL_V1_EVENTS; 13862 break; 13863 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 13864 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13865 TG3_RX_PTP_CTL_SYNC_EVNT; 13866 break; 13867 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 13868 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13869 TG3_RX_PTP_CTL_DELAY_REQ; 13870 break; 13871 case HWTSTAMP_FILTER_PTP_V2_EVENT: 13872 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13873 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13874 break; 13875 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 13876 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13877 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13878 break; 13879 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 13880 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13881 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13882 break; 13883 case HWTSTAMP_FILTER_PTP_V2_SYNC: 13884 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13885 TG3_RX_PTP_CTL_SYNC_EVNT; 13886 break; 13887 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 13888 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13889 TG3_RX_PTP_CTL_SYNC_EVNT; 13890 break; 13891 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 13892 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13893 TG3_RX_PTP_CTL_SYNC_EVNT; 13894 break; 13895 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 13896 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13897 TG3_RX_PTP_CTL_DELAY_REQ; 13898 break; 13899 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 13900 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13901 TG3_RX_PTP_CTL_DELAY_REQ; 13902 break; 13903 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 13904 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13905 TG3_RX_PTP_CTL_DELAY_REQ; 13906 break; 13907 default: 13908 return -ERANGE; 13909 } 13910 13911 if (netif_running(dev) && tp->rxptpctl) 13912 tw32(TG3_RX_PTP_CTL, 13913 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 13914 13915 if (stmpconf.tx_type == HWTSTAMP_TX_ON) 13916 tg3_flag_set(tp, TX_TSTAMP_EN); 13917 else 13918 tg3_flag_clear(tp, TX_TSTAMP_EN); 13919 13920 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13921 -EFAULT : 0; 13922 } 13923 13924 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 13925 { 13926 struct tg3 *tp = netdev_priv(dev); 13927 struct hwtstamp_config stmpconf; 13928 13929 if (!tg3_flag(tp, PTP_CAPABLE)) 13930 return -EOPNOTSUPP; 13931 13932 stmpconf.flags = 0; 13933 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ? 13934 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF); 13935 13936 switch (tp->rxptpctl) { 13937 case 0: 13938 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE; 13939 break; 13940 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS: 13941 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 13942 break; 13943 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13944 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 13945 break; 13946 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13947 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 13948 break; 13949 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13950 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 13951 break; 13952 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13953 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 13954 break; 13955 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13956 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 13957 break; 13958 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13959 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 13960 break; 13961 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13962 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC; 13963 break; 13964 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13965 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 13966 break; 13967 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13968 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 13969 break; 13970 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13971 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ; 13972 break; 13973 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13974 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 13975 break; 13976 default: 13977 WARN_ON_ONCE(1); 13978 return -ERANGE; 13979 } 13980 13981 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13982 -EFAULT : 0; 13983 } 13984 13985 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 13986 { 13987 struct mii_ioctl_data *data = if_mii(ifr); 13988 struct tg3 *tp = netdev_priv(dev); 13989 int err; 13990 13991 if (tg3_flag(tp, USE_PHYLIB)) { 13992 struct phy_device *phydev; 13993 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 13994 return -EAGAIN; 13995 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 13996 return phy_mii_ioctl(phydev, ifr, cmd); 13997 } 13998 13999 switch (cmd) { 14000 case SIOCGMIIPHY: 14001 data->phy_id = tp->phy_addr; 14002 14003 /* fall through */ 14004 case SIOCGMIIREG: { 14005 u32 mii_regval; 14006 14007 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 14008 break; /* We have no PHY */ 14009 14010 if (!netif_running(dev)) 14011 return -EAGAIN; 14012 14013 spin_lock_bh(&tp->lock); 14014 err = __tg3_readphy(tp, data->phy_id & 0x1f, 14015 data->reg_num & 0x1f, &mii_regval); 14016 spin_unlock_bh(&tp->lock); 14017 14018 data->val_out = mii_regval; 14019 14020 return err; 14021 } 14022 14023 case SIOCSMIIREG: 14024 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 14025 break; /* We have no PHY */ 14026 14027 if (!netif_running(dev)) 14028 return -EAGAIN; 14029 14030 spin_lock_bh(&tp->lock); 14031 err = __tg3_writephy(tp, data->phy_id & 0x1f, 14032 data->reg_num & 0x1f, data->val_in); 14033 spin_unlock_bh(&tp->lock); 14034 14035 return err; 14036 14037 case SIOCSHWTSTAMP: 14038 return tg3_hwtstamp_set(dev, ifr); 14039 14040 case SIOCGHWTSTAMP: 14041 return tg3_hwtstamp_get(dev, ifr); 14042 14043 default: 14044 /* do nothing */ 14045 break; 14046 } 14047 return -EOPNOTSUPP; 14048 } 14049 14050 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 14051 { 14052 struct tg3 *tp = netdev_priv(dev); 14053 14054 memcpy(ec, &tp->coal, sizeof(*ec)); 14055 return 0; 14056 } 14057 14058 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 14059 { 14060 struct tg3 *tp = netdev_priv(dev); 14061 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; 14062 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; 14063 14064 if (!tg3_flag(tp, 5705_PLUS)) { 14065 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; 14066 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; 14067 max_stat_coal_ticks = MAX_STAT_COAL_TICKS; 14068 min_stat_coal_ticks = MIN_STAT_COAL_TICKS; 14069 } 14070 14071 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || 14072 (!ec->rx_coalesce_usecs) || 14073 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || 14074 (!ec->tx_coalesce_usecs) || 14075 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || 14076 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || 14077 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || 14078 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || 14079 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || 14080 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || 14081 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || 14082 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) 14083 return -EINVAL; 14084 14085 /* Only copy relevant parameters, ignore all others. */ 14086 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; 14087 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; 14088 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; 14089 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; 14090 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; 14091 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; 14092 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; 14093 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; 14094 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; 14095 14096 if (netif_running(dev)) { 14097 tg3_full_lock(tp, 0); 14098 __tg3_set_coalesce(tp, &tp->coal); 14099 tg3_full_unlock(tp); 14100 } 14101 return 0; 14102 } 14103 14104 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) 14105 { 14106 struct tg3 *tp = netdev_priv(dev); 14107 14108 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14109 netdev_warn(tp->dev, "Board does not support EEE!\n"); 14110 return -EOPNOTSUPP; 14111 } 14112 14113 if (edata->advertised != tp->eee.advertised) { 14114 netdev_warn(tp->dev, 14115 "Direct manipulation of EEE advertisement is not supported\n"); 14116 return -EINVAL; 14117 } 14118 14119 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) { 14120 netdev_warn(tp->dev, 14121 "Maximal Tx Lpi timer supported is %#x(u)\n", 14122 TG3_CPMU_DBTMR1_LNKIDLE_MAX); 14123 return -EINVAL; 14124 } 14125 14126 tp->eee = *edata; 14127 14128 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 14129 tg3_warn_mgmt_link_flap(tp); 14130 14131 if (netif_running(tp->dev)) { 14132 tg3_full_lock(tp, 0); 14133 tg3_setup_eee(tp); 14134 tg3_phy_reset(tp); 14135 tg3_full_unlock(tp); 14136 } 14137 14138 return 0; 14139 } 14140 14141 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata) 14142 { 14143 struct tg3 *tp = netdev_priv(dev); 14144 14145 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14146 netdev_warn(tp->dev, 14147 "Board does not support EEE!\n"); 14148 return -EOPNOTSUPP; 14149 } 14150 14151 *edata = tp->eee; 14152 return 0; 14153 } 14154 14155 static const struct ethtool_ops tg3_ethtool_ops = { 14156 .get_drvinfo = tg3_get_drvinfo, 14157 .get_regs_len = tg3_get_regs_len, 14158 .get_regs = tg3_get_regs, 14159 .get_wol = tg3_get_wol, 14160 .set_wol = tg3_set_wol, 14161 .get_msglevel = tg3_get_msglevel, 14162 .set_msglevel = tg3_set_msglevel, 14163 .nway_reset = tg3_nway_reset, 14164 .get_link = ethtool_op_get_link, 14165 .get_eeprom_len = tg3_get_eeprom_len, 14166 .get_eeprom = tg3_get_eeprom, 14167 .set_eeprom = tg3_set_eeprom, 14168 .get_ringparam = tg3_get_ringparam, 14169 .set_ringparam = tg3_set_ringparam, 14170 .get_pauseparam = tg3_get_pauseparam, 14171 .set_pauseparam = tg3_set_pauseparam, 14172 .self_test = tg3_self_test, 14173 .get_strings = tg3_get_strings, 14174 .set_phys_id = tg3_set_phys_id, 14175 .get_ethtool_stats = tg3_get_ethtool_stats, 14176 .get_coalesce = tg3_get_coalesce, 14177 .set_coalesce = tg3_set_coalesce, 14178 .get_sset_count = tg3_get_sset_count, 14179 .get_rxnfc = tg3_get_rxnfc, 14180 .get_rxfh_indir_size = tg3_get_rxfh_indir_size, 14181 .get_rxfh = tg3_get_rxfh, 14182 .set_rxfh = tg3_set_rxfh, 14183 .get_channels = tg3_get_channels, 14184 .set_channels = tg3_set_channels, 14185 .get_ts_info = tg3_get_ts_info, 14186 .get_eee = tg3_get_eee, 14187 .set_eee = tg3_set_eee, 14188 .get_link_ksettings = tg3_get_link_ksettings, 14189 .set_link_ksettings = tg3_set_link_ksettings, 14190 }; 14191 14192 static void tg3_get_stats64(struct net_device *dev, 14193 struct rtnl_link_stats64 *stats) 14194 { 14195 struct tg3 *tp = netdev_priv(dev); 14196 14197 spin_lock_bh(&tp->lock); 14198 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) { 14199 *stats = tp->net_stats_prev; 14200 spin_unlock_bh(&tp->lock); 14201 return; 14202 } 14203 14204 tg3_get_nstats(tp, stats); 14205 spin_unlock_bh(&tp->lock); 14206 } 14207 14208 static void tg3_set_rx_mode(struct net_device *dev) 14209 { 14210 struct tg3 *tp = netdev_priv(dev); 14211 14212 if (!netif_running(dev)) 14213 return; 14214 14215 tg3_full_lock(tp, 0); 14216 __tg3_set_rx_mode(dev); 14217 tg3_full_unlock(tp); 14218 } 14219 14220 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, 14221 int new_mtu) 14222 { 14223 dev->mtu = new_mtu; 14224 14225 if (new_mtu > ETH_DATA_LEN) { 14226 if (tg3_flag(tp, 5780_CLASS)) { 14227 netdev_update_features(dev); 14228 tg3_flag_clear(tp, TSO_CAPABLE); 14229 } else { 14230 tg3_flag_set(tp, JUMBO_RING_ENABLE); 14231 } 14232 } else { 14233 if (tg3_flag(tp, 5780_CLASS)) { 14234 tg3_flag_set(tp, TSO_CAPABLE); 14235 netdev_update_features(dev); 14236 } 14237 tg3_flag_clear(tp, JUMBO_RING_ENABLE); 14238 } 14239 } 14240 14241 static int tg3_change_mtu(struct net_device *dev, int new_mtu) 14242 { 14243 struct tg3 *tp = netdev_priv(dev); 14244 int err; 14245 bool reset_phy = false; 14246 14247 if (!netif_running(dev)) { 14248 /* We'll just catch it later when the 14249 * device is up'd. 14250 */ 14251 tg3_set_mtu(dev, tp, new_mtu); 14252 return 0; 14253 } 14254 14255 tg3_phy_stop(tp); 14256 14257 tg3_netif_stop(tp); 14258 14259 tg3_set_mtu(dev, tp, new_mtu); 14260 14261 tg3_full_lock(tp, 1); 14262 14263 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 14264 14265 /* Reset PHY, otherwise the read DMA engine will be in a mode that 14266 * breaks all requests to 256 bytes. 14267 */ 14268 if (tg3_asic_rev(tp) == ASIC_REV_57766 || 14269 tg3_asic_rev(tp) == ASIC_REV_5717 || 14270 tg3_asic_rev(tp) == ASIC_REV_5719 || 14271 tg3_asic_rev(tp) == ASIC_REV_5720) 14272 reset_phy = true; 14273 14274 err = tg3_restart_hw(tp, reset_phy); 14275 14276 if (!err) 14277 tg3_netif_start(tp); 14278 14279 tg3_full_unlock(tp); 14280 14281 if (!err) 14282 tg3_phy_start(tp); 14283 14284 return err; 14285 } 14286 14287 static const struct net_device_ops tg3_netdev_ops = { 14288 .ndo_open = tg3_open, 14289 .ndo_stop = tg3_close, 14290 .ndo_start_xmit = tg3_start_xmit, 14291 .ndo_get_stats64 = tg3_get_stats64, 14292 .ndo_validate_addr = eth_validate_addr, 14293 .ndo_set_rx_mode = tg3_set_rx_mode, 14294 .ndo_set_mac_address = tg3_set_mac_addr, 14295 .ndo_do_ioctl = tg3_ioctl, 14296 .ndo_tx_timeout = tg3_tx_timeout, 14297 .ndo_change_mtu = tg3_change_mtu, 14298 .ndo_fix_features = tg3_fix_features, 14299 .ndo_set_features = tg3_set_features, 14300 #ifdef CONFIG_NET_POLL_CONTROLLER 14301 .ndo_poll_controller = tg3_poll_controller, 14302 #endif 14303 }; 14304 14305 static void tg3_get_eeprom_size(struct tg3 *tp) 14306 { 14307 u32 cursize, val, magic; 14308 14309 tp->nvram_size = EEPROM_CHIP_SIZE; 14310 14311 if (tg3_nvram_read(tp, 0, &magic) != 0) 14312 return; 14313 14314 if ((magic != TG3_EEPROM_MAGIC) && 14315 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && 14316 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) 14317 return; 14318 14319 /* 14320 * Size the chip by reading offsets at increasing powers of two. 14321 * When we encounter our validation signature, we know the addressing 14322 * has wrapped around, and thus have our chip size. 14323 */ 14324 cursize = 0x10; 14325 14326 while (cursize < tp->nvram_size) { 14327 if (tg3_nvram_read(tp, cursize, &val) != 0) 14328 return; 14329 14330 if (val == magic) 14331 break; 14332 14333 cursize <<= 1; 14334 } 14335 14336 tp->nvram_size = cursize; 14337 } 14338 14339 static void tg3_get_nvram_size(struct tg3 *tp) 14340 { 14341 u32 val; 14342 14343 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) 14344 return; 14345 14346 /* Selfboot format */ 14347 if (val != TG3_EEPROM_MAGIC) { 14348 tg3_get_eeprom_size(tp); 14349 return; 14350 } 14351 14352 if (tg3_nvram_read(tp, 0xf0, &val) == 0) { 14353 if (val != 0) { 14354 /* This is confusing. We want to operate on the 14355 * 16-bit value at offset 0xf2. The tg3_nvram_read() 14356 * call will read from NVRAM and byteswap the data 14357 * according to the byteswapping settings for all 14358 * other register accesses. This ensures the data we 14359 * want will always reside in the lower 16-bits. 14360 * However, the data in NVRAM is in LE format, which 14361 * means the data from the NVRAM read will always be 14362 * opposite the endianness of the CPU. The 16-bit 14363 * byteswap then brings the data to CPU endianness. 14364 */ 14365 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; 14366 return; 14367 } 14368 } 14369 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14370 } 14371 14372 static void tg3_get_nvram_info(struct tg3 *tp) 14373 { 14374 u32 nvcfg1; 14375 14376 nvcfg1 = tr32(NVRAM_CFG1); 14377 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { 14378 tg3_flag_set(tp, FLASH); 14379 } else { 14380 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14381 tw32(NVRAM_CFG1, nvcfg1); 14382 } 14383 14384 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 14385 tg3_flag(tp, 5780_CLASS)) { 14386 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { 14387 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: 14388 tp->nvram_jedecnum = JEDEC_ATMEL; 14389 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14390 tg3_flag_set(tp, NVRAM_BUFFERED); 14391 break; 14392 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: 14393 tp->nvram_jedecnum = JEDEC_ATMEL; 14394 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; 14395 break; 14396 case FLASH_VENDOR_ATMEL_EEPROM: 14397 tp->nvram_jedecnum = JEDEC_ATMEL; 14398 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14399 tg3_flag_set(tp, NVRAM_BUFFERED); 14400 break; 14401 case FLASH_VENDOR_ST: 14402 tp->nvram_jedecnum = JEDEC_ST; 14403 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; 14404 tg3_flag_set(tp, NVRAM_BUFFERED); 14405 break; 14406 case FLASH_VENDOR_SAIFUN: 14407 tp->nvram_jedecnum = JEDEC_SAIFUN; 14408 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; 14409 break; 14410 case FLASH_VENDOR_SST_SMALL: 14411 case FLASH_VENDOR_SST_LARGE: 14412 tp->nvram_jedecnum = JEDEC_SST; 14413 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; 14414 break; 14415 } 14416 } else { 14417 tp->nvram_jedecnum = JEDEC_ATMEL; 14418 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14419 tg3_flag_set(tp, NVRAM_BUFFERED); 14420 } 14421 } 14422 14423 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) 14424 { 14425 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { 14426 case FLASH_5752PAGE_SIZE_256: 14427 tp->nvram_pagesize = 256; 14428 break; 14429 case FLASH_5752PAGE_SIZE_512: 14430 tp->nvram_pagesize = 512; 14431 break; 14432 case FLASH_5752PAGE_SIZE_1K: 14433 tp->nvram_pagesize = 1024; 14434 break; 14435 case FLASH_5752PAGE_SIZE_2K: 14436 tp->nvram_pagesize = 2048; 14437 break; 14438 case FLASH_5752PAGE_SIZE_4K: 14439 tp->nvram_pagesize = 4096; 14440 break; 14441 case FLASH_5752PAGE_SIZE_264: 14442 tp->nvram_pagesize = 264; 14443 break; 14444 case FLASH_5752PAGE_SIZE_528: 14445 tp->nvram_pagesize = 528; 14446 break; 14447 } 14448 } 14449 14450 static void tg3_get_5752_nvram_info(struct tg3 *tp) 14451 { 14452 u32 nvcfg1; 14453 14454 nvcfg1 = tr32(NVRAM_CFG1); 14455 14456 /* NVRAM protection for TPM */ 14457 if (nvcfg1 & (1 << 27)) 14458 tg3_flag_set(tp, PROTECTED_NVRAM); 14459 14460 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14461 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 14462 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: 14463 tp->nvram_jedecnum = JEDEC_ATMEL; 14464 tg3_flag_set(tp, NVRAM_BUFFERED); 14465 break; 14466 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14467 tp->nvram_jedecnum = JEDEC_ATMEL; 14468 tg3_flag_set(tp, NVRAM_BUFFERED); 14469 tg3_flag_set(tp, FLASH); 14470 break; 14471 case FLASH_5752VENDOR_ST_M45PE10: 14472 case FLASH_5752VENDOR_ST_M45PE20: 14473 case FLASH_5752VENDOR_ST_M45PE40: 14474 tp->nvram_jedecnum = JEDEC_ST; 14475 tg3_flag_set(tp, NVRAM_BUFFERED); 14476 tg3_flag_set(tp, FLASH); 14477 break; 14478 } 14479 14480 if (tg3_flag(tp, FLASH)) { 14481 tg3_nvram_get_pagesize(tp, nvcfg1); 14482 } else { 14483 /* For eeprom, set pagesize to maximum eeprom size */ 14484 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14485 14486 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14487 tw32(NVRAM_CFG1, nvcfg1); 14488 } 14489 } 14490 14491 static void tg3_get_5755_nvram_info(struct tg3 *tp) 14492 { 14493 u32 nvcfg1, protect = 0; 14494 14495 nvcfg1 = tr32(NVRAM_CFG1); 14496 14497 /* NVRAM protection for TPM */ 14498 if (nvcfg1 & (1 << 27)) { 14499 tg3_flag_set(tp, PROTECTED_NVRAM); 14500 protect = 1; 14501 } 14502 14503 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14504 switch (nvcfg1) { 14505 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14506 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14507 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14508 case FLASH_5755VENDOR_ATMEL_FLASH_5: 14509 tp->nvram_jedecnum = JEDEC_ATMEL; 14510 tg3_flag_set(tp, NVRAM_BUFFERED); 14511 tg3_flag_set(tp, FLASH); 14512 tp->nvram_pagesize = 264; 14513 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || 14514 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) 14515 tp->nvram_size = (protect ? 0x3e200 : 14516 TG3_NVRAM_SIZE_512KB); 14517 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) 14518 tp->nvram_size = (protect ? 0x1f200 : 14519 TG3_NVRAM_SIZE_256KB); 14520 else 14521 tp->nvram_size = (protect ? 0x1f200 : 14522 TG3_NVRAM_SIZE_128KB); 14523 break; 14524 case FLASH_5752VENDOR_ST_M45PE10: 14525 case FLASH_5752VENDOR_ST_M45PE20: 14526 case FLASH_5752VENDOR_ST_M45PE40: 14527 tp->nvram_jedecnum = JEDEC_ST; 14528 tg3_flag_set(tp, NVRAM_BUFFERED); 14529 tg3_flag_set(tp, FLASH); 14530 tp->nvram_pagesize = 256; 14531 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) 14532 tp->nvram_size = (protect ? 14533 TG3_NVRAM_SIZE_64KB : 14534 TG3_NVRAM_SIZE_128KB); 14535 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) 14536 tp->nvram_size = (protect ? 14537 TG3_NVRAM_SIZE_64KB : 14538 TG3_NVRAM_SIZE_256KB); 14539 else 14540 tp->nvram_size = (protect ? 14541 TG3_NVRAM_SIZE_128KB : 14542 TG3_NVRAM_SIZE_512KB); 14543 break; 14544 } 14545 } 14546 14547 static void tg3_get_5787_nvram_info(struct tg3 *tp) 14548 { 14549 u32 nvcfg1; 14550 14551 nvcfg1 = tr32(NVRAM_CFG1); 14552 14553 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14554 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: 14555 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14556 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: 14557 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14558 tp->nvram_jedecnum = JEDEC_ATMEL; 14559 tg3_flag_set(tp, NVRAM_BUFFERED); 14560 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14561 14562 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14563 tw32(NVRAM_CFG1, nvcfg1); 14564 break; 14565 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14566 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14567 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14568 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14569 tp->nvram_jedecnum = JEDEC_ATMEL; 14570 tg3_flag_set(tp, NVRAM_BUFFERED); 14571 tg3_flag_set(tp, FLASH); 14572 tp->nvram_pagesize = 264; 14573 break; 14574 case FLASH_5752VENDOR_ST_M45PE10: 14575 case FLASH_5752VENDOR_ST_M45PE20: 14576 case FLASH_5752VENDOR_ST_M45PE40: 14577 tp->nvram_jedecnum = JEDEC_ST; 14578 tg3_flag_set(tp, NVRAM_BUFFERED); 14579 tg3_flag_set(tp, FLASH); 14580 tp->nvram_pagesize = 256; 14581 break; 14582 } 14583 } 14584 14585 static void tg3_get_5761_nvram_info(struct tg3 *tp) 14586 { 14587 u32 nvcfg1, protect = 0; 14588 14589 nvcfg1 = tr32(NVRAM_CFG1); 14590 14591 /* NVRAM protection for TPM */ 14592 if (nvcfg1 & (1 << 27)) { 14593 tg3_flag_set(tp, PROTECTED_NVRAM); 14594 protect = 1; 14595 } 14596 14597 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14598 switch (nvcfg1) { 14599 case FLASH_5761VENDOR_ATMEL_ADB021D: 14600 case FLASH_5761VENDOR_ATMEL_ADB041D: 14601 case FLASH_5761VENDOR_ATMEL_ADB081D: 14602 case FLASH_5761VENDOR_ATMEL_ADB161D: 14603 case FLASH_5761VENDOR_ATMEL_MDB021D: 14604 case FLASH_5761VENDOR_ATMEL_MDB041D: 14605 case FLASH_5761VENDOR_ATMEL_MDB081D: 14606 case FLASH_5761VENDOR_ATMEL_MDB161D: 14607 tp->nvram_jedecnum = JEDEC_ATMEL; 14608 tg3_flag_set(tp, NVRAM_BUFFERED); 14609 tg3_flag_set(tp, FLASH); 14610 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14611 tp->nvram_pagesize = 256; 14612 break; 14613 case FLASH_5761VENDOR_ST_A_M45PE20: 14614 case FLASH_5761VENDOR_ST_A_M45PE40: 14615 case FLASH_5761VENDOR_ST_A_M45PE80: 14616 case FLASH_5761VENDOR_ST_A_M45PE16: 14617 case FLASH_5761VENDOR_ST_M_M45PE20: 14618 case FLASH_5761VENDOR_ST_M_M45PE40: 14619 case FLASH_5761VENDOR_ST_M_M45PE80: 14620 case FLASH_5761VENDOR_ST_M_M45PE16: 14621 tp->nvram_jedecnum = JEDEC_ST; 14622 tg3_flag_set(tp, NVRAM_BUFFERED); 14623 tg3_flag_set(tp, FLASH); 14624 tp->nvram_pagesize = 256; 14625 break; 14626 } 14627 14628 if (protect) { 14629 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); 14630 } else { 14631 switch (nvcfg1) { 14632 case FLASH_5761VENDOR_ATMEL_ADB161D: 14633 case FLASH_5761VENDOR_ATMEL_MDB161D: 14634 case FLASH_5761VENDOR_ST_A_M45PE16: 14635 case FLASH_5761VENDOR_ST_M_M45PE16: 14636 tp->nvram_size = TG3_NVRAM_SIZE_2MB; 14637 break; 14638 case FLASH_5761VENDOR_ATMEL_ADB081D: 14639 case FLASH_5761VENDOR_ATMEL_MDB081D: 14640 case FLASH_5761VENDOR_ST_A_M45PE80: 14641 case FLASH_5761VENDOR_ST_M_M45PE80: 14642 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14643 break; 14644 case FLASH_5761VENDOR_ATMEL_ADB041D: 14645 case FLASH_5761VENDOR_ATMEL_MDB041D: 14646 case FLASH_5761VENDOR_ST_A_M45PE40: 14647 case FLASH_5761VENDOR_ST_M_M45PE40: 14648 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14649 break; 14650 case FLASH_5761VENDOR_ATMEL_ADB021D: 14651 case FLASH_5761VENDOR_ATMEL_MDB021D: 14652 case FLASH_5761VENDOR_ST_A_M45PE20: 14653 case FLASH_5761VENDOR_ST_M_M45PE20: 14654 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14655 break; 14656 } 14657 } 14658 } 14659 14660 static void tg3_get_5906_nvram_info(struct tg3 *tp) 14661 { 14662 tp->nvram_jedecnum = JEDEC_ATMEL; 14663 tg3_flag_set(tp, NVRAM_BUFFERED); 14664 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14665 } 14666 14667 static void tg3_get_57780_nvram_info(struct tg3 *tp) 14668 { 14669 u32 nvcfg1; 14670 14671 nvcfg1 = tr32(NVRAM_CFG1); 14672 14673 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14674 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14675 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14676 tp->nvram_jedecnum = JEDEC_ATMEL; 14677 tg3_flag_set(tp, NVRAM_BUFFERED); 14678 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14679 14680 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14681 tw32(NVRAM_CFG1, nvcfg1); 14682 return; 14683 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14684 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14685 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14686 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14687 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14688 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14689 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14690 tp->nvram_jedecnum = JEDEC_ATMEL; 14691 tg3_flag_set(tp, NVRAM_BUFFERED); 14692 tg3_flag_set(tp, FLASH); 14693 14694 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14695 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14696 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14697 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14698 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14699 break; 14700 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14701 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14702 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14703 break; 14704 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14705 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14706 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14707 break; 14708 } 14709 break; 14710 case FLASH_5752VENDOR_ST_M45PE10: 14711 case FLASH_5752VENDOR_ST_M45PE20: 14712 case FLASH_5752VENDOR_ST_M45PE40: 14713 tp->nvram_jedecnum = JEDEC_ST; 14714 tg3_flag_set(tp, NVRAM_BUFFERED); 14715 tg3_flag_set(tp, FLASH); 14716 14717 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14718 case FLASH_5752VENDOR_ST_M45PE10: 14719 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14720 break; 14721 case FLASH_5752VENDOR_ST_M45PE20: 14722 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14723 break; 14724 case FLASH_5752VENDOR_ST_M45PE40: 14725 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14726 break; 14727 } 14728 break; 14729 default: 14730 tg3_flag_set(tp, NO_NVRAM); 14731 return; 14732 } 14733 14734 tg3_nvram_get_pagesize(tp, nvcfg1); 14735 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14736 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14737 } 14738 14739 14740 static void tg3_get_5717_nvram_info(struct tg3 *tp) 14741 { 14742 u32 nvcfg1; 14743 14744 nvcfg1 = tr32(NVRAM_CFG1); 14745 14746 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14747 case FLASH_5717VENDOR_ATMEL_EEPROM: 14748 case FLASH_5717VENDOR_MICRO_EEPROM: 14749 tp->nvram_jedecnum = JEDEC_ATMEL; 14750 tg3_flag_set(tp, NVRAM_BUFFERED); 14751 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14752 14753 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14754 tw32(NVRAM_CFG1, nvcfg1); 14755 return; 14756 case FLASH_5717VENDOR_ATMEL_MDB011D: 14757 case FLASH_5717VENDOR_ATMEL_ADB011B: 14758 case FLASH_5717VENDOR_ATMEL_ADB011D: 14759 case FLASH_5717VENDOR_ATMEL_MDB021D: 14760 case FLASH_5717VENDOR_ATMEL_ADB021B: 14761 case FLASH_5717VENDOR_ATMEL_ADB021D: 14762 case FLASH_5717VENDOR_ATMEL_45USPT: 14763 tp->nvram_jedecnum = JEDEC_ATMEL; 14764 tg3_flag_set(tp, NVRAM_BUFFERED); 14765 tg3_flag_set(tp, FLASH); 14766 14767 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14768 case FLASH_5717VENDOR_ATMEL_MDB021D: 14769 /* Detect size with tg3_nvram_get_size() */ 14770 break; 14771 case FLASH_5717VENDOR_ATMEL_ADB021B: 14772 case FLASH_5717VENDOR_ATMEL_ADB021D: 14773 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14774 break; 14775 default: 14776 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14777 break; 14778 } 14779 break; 14780 case FLASH_5717VENDOR_ST_M_M25PE10: 14781 case FLASH_5717VENDOR_ST_A_M25PE10: 14782 case FLASH_5717VENDOR_ST_M_M45PE10: 14783 case FLASH_5717VENDOR_ST_A_M45PE10: 14784 case FLASH_5717VENDOR_ST_M_M25PE20: 14785 case FLASH_5717VENDOR_ST_A_M25PE20: 14786 case FLASH_5717VENDOR_ST_M_M45PE20: 14787 case FLASH_5717VENDOR_ST_A_M45PE20: 14788 case FLASH_5717VENDOR_ST_25USPT: 14789 case FLASH_5717VENDOR_ST_45USPT: 14790 tp->nvram_jedecnum = JEDEC_ST; 14791 tg3_flag_set(tp, NVRAM_BUFFERED); 14792 tg3_flag_set(tp, FLASH); 14793 14794 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14795 case FLASH_5717VENDOR_ST_M_M25PE20: 14796 case FLASH_5717VENDOR_ST_M_M45PE20: 14797 /* Detect size with tg3_nvram_get_size() */ 14798 break; 14799 case FLASH_5717VENDOR_ST_A_M25PE20: 14800 case FLASH_5717VENDOR_ST_A_M45PE20: 14801 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14802 break; 14803 default: 14804 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14805 break; 14806 } 14807 break; 14808 default: 14809 tg3_flag_set(tp, NO_NVRAM); 14810 return; 14811 } 14812 14813 tg3_nvram_get_pagesize(tp, nvcfg1); 14814 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14815 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14816 } 14817 14818 static void tg3_get_5720_nvram_info(struct tg3 *tp) 14819 { 14820 u32 nvcfg1, nvmpinstrp, nv_status; 14821 14822 nvcfg1 = tr32(NVRAM_CFG1); 14823 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; 14824 14825 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14826 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) { 14827 tg3_flag_set(tp, NO_NVRAM); 14828 return; 14829 } 14830 14831 switch (nvmpinstrp) { 14832 case FLASH_5762_MX25L_100: 14833 case FLASH_5762_MX25L_200: 14834 case FLASH_5762_MX25L_400: 14835 case FLASH_5762_MX25L_800: 14836 case FLASH_5762_MX25L_160_320: 14837 tp->nvram_pagesize = 4096; 14838 tp->nvram_jedecnum = JEDEC_MACRONIX; 14839 tg3_flag_set(tp, NVRAM_BUFFERED); 14840 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14841 tg3_flag_set(tp, FLASH); 14842 nv_status = tr32(NVRAM_AUTOSENSE_STATUS); 14843 tp->nvram_size = 14844 (1 << (nv_status >> AUTOSENSE_DEVID & 14845 AUTOSENSE_DEVID_MASK) 14846 << AUTOSENSE_SIZE_IN_MB); 14847 return; 14848 14849 case FLASH_5762_EEPROM_HD: 14850 nvmpinstrp = FLASH_5720_EEPROM_HD; 14851 break; 14852 case FLASH_5762_EEPROM_LD: 14853 nvmpinstrp = FLASH_5720_EEPROM_LD; 14854 break; 14855 case FLASH_5720VENDOR_M_ST_M45PE20: 14856 /* This pinstrap supports multiple sizes, so force it 14857 * to read the actual size from location 0xf0. 14858 */ 14859 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT; 14860 break; 14861 } 14862 } 14863 14864 switch (nvmpinstrp) { 14865 case FLASH_5720_EEPROM_HD: 14866 case FLASH_5720_EEPROM_LD: 14867 tp->nvram_jedecnum = JEDEC_ATMEL; 14868 tg3_flag_set(tp, NVRAM_BUFFERED); 14869 14870 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14871 tw32(NVRAM_CFG1, nvcfg1); 14872 if (nvmpinstrp == FLASH_5720_EEPROM_HD) 14873 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14874 else 14875 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; 14876 return; 14877 case FLASH_5720VENDOR_M_ATMEL_DB011D: 14878 case FLASH_5720VENDOR_A_ATMEL_DB011B: 14879 case FLASH_5720VENDOR_A_ATMEL_DB011D: 14880 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14881 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14882 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14883 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14884 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14885 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14886 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14887 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14888 case FLASH_5720VENDOR_ATMEL_45USPT: 14889 tp->nvram_jedecnum = JEDEC_ATMEL; 14890 tg3_flag_set(tp, NVRAM_BUFFERED); 14891 tg3_flag_set(tp, FLASH); 14892 14893 switch (nvmpinstrp) { 14894 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14895 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14896 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14897 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14898 break; 14899 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14900 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14901 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14902 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14903 break; 14904 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14905 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14906 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14907 break; 14908 default: 14909 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14910 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14911 break; 14912 } 14913 break; 14914 case FLASH_5720VENDOR_M_ST_M25PE10: 14915 case FLASH_5720VENDOR_M_ST_M45PE10: 14916 case FLASH_5720VENDOR_A_ST_M25PE10: 14917 case FLASH_5720VENDOR_A_ST_M45PE10: 14918 case FLASH_5720VENDOR_M_ST_M25PE20: 14919 case FLASH_5720VENDOR_M_ST_M45PE20: 14920 case FLASH_5720VENDOR_A_ST_M25PE20: 14921 case FLASH_5720VENDOR_A_ST_M45PE20: 14922 case FLASH_5720VENDOR_M_ST_M25PE40: 14923 case FLASH_5720VENDOR_M_ST_M45PE40: 14924 case FLASH_5720VENDOR_A_ST_M25PE40: 14925 case FLASH_5720VENDOR_A_ST_M45PE40: 14926 case FLASH_5720VENDOR_M_ST_M25PE80: 14927 case FLASH_5720VENDOR_M_ST_M45PE80: 14928 case FLASH_5720VENDOR_A_ST_M25PE80: 14929 case FLASH_5720VENDOR_A_ST_M45PE80: 14930 case FLASH_5720VENDOR_ST_25USPT: 14931 case FLASH_5720VENDOR_ST_45USPT: 14932 tp->nvram_jedecnum = JEDEC_ST; 14933 tg3_flag_set(tp, NVRAM_BUFFERED); 14934 tg3_flag_set(tp, FLASH); 14935 14936 switch (nvmpinstrp) { 14937 case FLASH_5720VENDOR_M_ST_M25PE20: 14938 case FLASH_5720VENDOR_M_ST_M45PE20: 14939 case FLASH_5720VENDOR_A_ST_M25PE20: 14940 case FLASH_5720VENDOR_A_ST_M45PE20: 14941 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14942 break; 14943 case FLASH_5720VENDOR_M_ST_M25PE40: 14944 case FLASH_5720VENDOR_M_ST_M45PE40: 14945 case FLASH_5720VENDOR_A_ST_M25PE40: 14946 case FLASH_5720VENDOR_A_ST_M45PE40: 14947 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14948 break; 14949 case FLASH_5720VENDOR_M_ST_M25PE80: 14950 case FLASH_5720VENDOR_M_ST_M45PE80: 14951 case FLASH_5720VENDOR_A_ST_M25PE80: 14952 case FLASH_5720VENDOR_A_ST_M45PE80: 14953 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14954 break; 14955 default: 14956 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14957 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14958 break; 14959 } 14960 break; 14961 default: 14962 tg3_flag_set(tp, NO_NVRAM); 14963 return; 14964 } 14965 14966 tg3_nvram_get_pagesize(tp, nvcfg1); 14967 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14968 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14969 14970 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14971 u32 val; 14972 14973 if (tg3_nvram_read(tp, 0, &val)) 14974 return; 14975 14976 if (val != TG3_EEPROM_MAGIC && 14977 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) 14978 tg3_flag_set(tp, NO_NVRAM); 14979 } 14980 } 14981 14982 /* Chips other than 5700/5701 use the NVRAM for fetching info. */ 14983 static void tg3_nvram_init(struct tg3 *tp) 14984 { 14985 if (tg3_flag(tp, IS_SSB_CORE)) { 14986 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */ 14987 tg3_flag_clear(tp, NVRAM); 14988 tg3_flag_clear(tp, NVRAM_BUFFERED); 14989 tg3_flag_set(tp, NO_NVRAM); 14990 return; 14991 } 14992 14993 tw32_f(GRC_EEPROM_ADDR, 14994 (EEPROM_ADDR_FSM_RESET | 14995 (EEPROM_DEFAULT_CLOCK_PERIOD << 14996 EEPROM_ADDR_CLKPERD_SHIFT))); 14997 14998 msleep(1); 14999 15000 /* Enable seeprom accesses. */ 15001 tw32_f(GRC_LOCAL_CTRL, 15002 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); 15003 udelay(100); 15004 15005 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 15006 tg3_asic_rev(tp) != ASIC_REV_5701) { 15007 tg3_flag_set(tp, NVRAM); 15008 15009 if (tg3_nvram_lock(tp)) { 15010 netdev_warn(tp->dev, 15011 "Cannot get nvram lock, %s failed\n", 15012 __func__); 15013 return; 15014 } 15015 tg3_enable_nvram_access(tp); 15016 15017 tp->nvram_size = 0; 15018 15019 if (tg3_asic_rev(tp) == ASIC_REV_5752) 15020 tg3_get_5752_nvram_info(tp); 15021 else if (tg3_asic_rev(tp) == ASIC_REV_5755) 15022 tg3_get_5755_nvram_info(tp); 15023 else if (tg3_asic_rev(tp) == ASIC_REV_5787 || 15024 tg3_asic_rev(tp) == ASIC_REV_5784 || 15025 tg3_asic_rev(tp) == ASIC_REV_5785) 15026 tg3_get_5787_nvram_info(tp); 15027 else if (tg3_asic_rev(tp) == ASIC_REV_5761) 15028 tg3_get_5761_nvram_info(tp); 15029 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 15030 tg3_get_5906_nvram_info(tp); 15031 else if (tg3_asic_rev(tp) == ASIC_REV_57780 || 15032 tg3_flag(tp, 57765_CLASS)) 15033 tg3_get_57780_nvram_info(tp); 15034 else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 15035 tg3_asic_rev(tp) == ASIC_REV_5719) 15036 tg3_get_5717_nvram_info(tp); 15037 else if (tg3_asic_rev(tp) == ASIC_REV_5720 || 15038 tg3_asic_rev(tp) == ASIC_REV_5762) 15039 tg3_get_5720_nvram_info(tp); 15040 else 15041 tg3_get_nvram_info(tp); 15042 15043 if (tp->nvram_size == 0) 15044 tg3_get_nvram_size(tp); 15045 15046 tg3_disable_nvram_access(tp); 15047 tg3_nvram_unlock(tp); 15048 15049 } else { 15050 tg3_flag_clear(tp, NVRAM); 15051 tg3_flag_clear(tp, NVRAM_BUFFERED); 15052 15053 tg3_get_eeprom_size(tp); 15054 } 15055 } 15056 15057 struct subsys_tbl_ent { 15058 u16 subsys_vendor, subsys_devid; 15059 u32 phy_id; 15060 }; 15061 15062 static struct subsys_tbl_ent subsys_id_to_phy_id[] = { 15063 /* Broadcom boards. */ 15064 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15065 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, 15066 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15067 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, 15068 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15069 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, 15070 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15071 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, 15072 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15073 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, 15074 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15075 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, 15076 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15077 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, 15078 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15079 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, 15080 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15081 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, 15082 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15083 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, 15084 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15085 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, 15086 15087 /* 3com boards. */ 15088 { TG3PCI_SUBVENDOR_ID_3COM, 15089 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, 15090 { TG3PCI_SUBVENDOR_ID_3COM, 15091 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, 15092 { TG3PCI_SUBVENDOR_ID_3COM, 15093 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, 15094 { TG3PCI_SUBVENDOR_ID_3COM, 15095 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, 15096 { TG3PCI_SUBVENDOR_ID_3COM, 15097 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, 15098 15099 /* DELL boards. */ 15100 { TG3PCI_SUBVENDOR_ID_DELL, 15101 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, 15102 { TG3PCI_SUBVENDOR_ID_DELL, 15103 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, 15104 { TG3PCI_SUBVENDOR_ID_DELL, 15105 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, 15106 { TG3PCI_SUBVENDOR_ID_DELL, 15107 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, 15108 15109 /* Compaq boards. */ 15110 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15111 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, 15112 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15113 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, 15114 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15115 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, 15116 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15117 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, 15118 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15119 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, 15120 15121 /* IBM boards. */ 15122 { TG3PCI_SUBVENDOR_ID_IBM, 15123 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } 15124 }; 15125 15126 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp) 15127 { 15128 int i; 15129 15130 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { 15131 if ((subsys_id_to_phy_id[i].subsys_vendor == 15132 tp->pdev->subsystem_vendor) && 15133 (subsys_id_to_phy_id[i].subsys_devid == 15134 tp->pdev->subsystem_device)) 15135 return &subsys_id_to_phy_id[i]; 15136 } 15137 return NULL; 15138 } 15139 15140 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp) 15141 { 15142 u32 val; 15143 15144 tp->phy_id = TG3_PHY_ID_INVALID; 15145 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15146 15147 /* Assume an onboard device and WOL capable by default. */ 15148 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15149 tg3_flag_set(tp, WOL_CAP); 15150 15151 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15152 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { 15153 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15154 tg3_flag_set(tp, IS_NIC); 15155 } 15156 val = tr32(VCPU_CFGSHDW); 15157 if (val & VCPU_CFGSHDW_ASPM_DBNC) 15158 tg3_flag_set(tp, ASPM_WORKAROUND); 15159 if ((val & VCPU_CFGSHDW_WOL_ENABLE) && 15160 (val & VCPU_CFGSHDW_WOL_MAGPKT)) { 15161 tg3_flag_set(tp, WOL_ENABLE); 15162 device_set_wakeup_enable(&tp->pdev->dev, true); 15163 } 15164 goto done; 15165 } 15166 15167 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 15168 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 15169 u32 nic_cfg, led_cfg; 15170 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0; 15171 u32 nic_phy_id, ver, eeprom_phy_id; 15172 int eeprom_phy_serdes = 0; 15173 15174 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 15175 tp->nic_sram_data_cfg = nic_cfg; 15176 15177 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); 15178 ver >>= NIC_SRAM_DATA_VER_SHIFT; 15179 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 15180 tg3_asic_rev(tp) != ASIC_REV_5701 && 15181 tg3_asic_rev(tp) != ASIC_REV_5703 && 15182 (ver > 0) && (ver < 0x100)) 15183 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); 15184 15185 if (tg3_asic_rev(tp) == ASIC_REV_5785) 15186 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); 15187 15188 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 15189 tg3_asic_rev(tp) == ASIC_REV_5719 || 15190 tg3_asic_rev(tp) == ASIC_REV_5720) 15191 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5); 15192 15193 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == 15194 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) 15195 eeprom_phy_serdes = 1; 15196 15197 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); 15198 if (nic_phy_id != 0) { 15199 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; 15200 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; 15201 15202 eeprom_phy_id = (id1 >> 16) << 10; 15203 eeprom_phy_id |= (id2 & 0xfc00) << 16; 15204 eeprom_phy_id |= (id2 & 0x03ff) << 0; 15205 } else 15206 eeprom_phy_id = 0; 15207 15208 tp->phy_id = eeprom_phy_id; 15209 if (eeprom_phy_serdes) { 15210 if (!tg3_flag(tp, 5705_PLUS)) 15211 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15212 else 15213 tp->phy_flags |= TG3_PHYFLG_MII_SERDES; 15214 } 15215 15216 if (tg3_flag(tp, 5750_PLUS)) 15217 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | 15218 SHASTA_EXT_LED_MODE_MASK); 15219 else 15220 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; 15221 15222 switch (led_cfg) { 15223 default: 15224 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: 15225 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15226 break; 15227 15228 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: 15229 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15230 break; 15231 15232 case NIC_SRAM_DATA_CFG_LED_MODE_MAC: 15233 tp->led_ctrl = LED_CTRL_MODE_MAC; 15234 15235 /* Default to PHY_1_MODE if 0 (MAC_MODE) is 15236 * read on some older 5700/5701 bootcode. 15237 */ 15238 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 15239 tg3_asic_rev(tp) == ASIC_REV_5701) 15240 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15241 15242 break; 15243 15244 case SHASTA_EXT_LED_SHARED: 15245 tp->led_ctrl = LED_CTRL_MODE_SHARED; 15246 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 15247 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1) 15248 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15249 LED_CTRL_MODE_PHY_2); 15250 15251 if (tg3_flag(tp, 5717_PLUS) || 15252 tg3_asic_rev(tp) == ASIC_REV_5762) 15253 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE | 15254 LED_CTRL_BLINK_RATE_MASK; 15255 15256 break; 15257 15258 case SHASTA_EXT_LED_MAC: 15259 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; 15260 break; 15261 15262 case SHASTA_EXT_LED_COMBO: 15263 tp->led_ctrl = LED_CTRL_MODE_COMBO; 15264 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) 15265 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15266 LED_CTRL_MODE_PHY_2); 15267 break; 15268 15269 } 15270 15271 if ((tg3_asic_rev(tp) == ASIC_REV_5700 || 15272 tg3_asic_rev(tp) == ASIC_REV_5701) && 15273 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) 15274 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15275 15276 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) 15277 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15278 15279 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { 15280 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15281 if ((tp->pdev->subsystem_vendor == 15282 PCI_VENDOR_ID_ARIMA) && 15283 (tp->pdev->subsystem_device == 0x205a || 15284 tp->pdev->subsystem_device == 0x2063)) 15285 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15286 } else { 15287 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15288 tg3_flag_set(tp, IS_NIC); 15289 } 15290 15291 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 15292 tg3_flag_set(tp, ENABLE_ASF); 15293 if (tg3_flag(tp, 5750_PLUS)) 15294 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 15295 } 15296 15297 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && 15298 tg3_flag(tp, 5750_PLUS)) 15299 tg3_flag_set(tp, ENABLE_APE); 15300 15301 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && 15302 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) 15303 tg3_flag_clear(tp, WOL_CAP); 15304 15305 if (tg3_flag(tp, WOL_CAP) && 15306 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { 15307 tg3_flag_set(tp, WOL_ENABLE); 15308 device_set_wakeup_enable(&tp->pdev->dev, true); 15309 } 15310 15311 if (cfg2 & (1 << 17)) 15312 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; 15313 15314 /* serdes signal pre-emphasis in register 0x590 set by */ 15315 /* bootcode if bit 18 is set */ 15316 if (cfg2 & (1 << 18)) 15317 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; 15318 15319 if ((tg3_flag(tp, 57765_PLUS) || 15320 (tg3_asic_rev(tp) == ASIC_REV_5784 && 15321 tg3_chip_rev(tp) != CHIPREV_5784_AX)) && 15322 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 15323 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; 15324 15325 if (tg3_flag(tp, PCI_EXPRESS)) { 15326 u32 cfg3; 15327 15328 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); 15329 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 15330 !tg3_flag(tp, 57765_PLUS) && 15331 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)) 15332 tg3_flag_set(tp, ASPM_WORKAROUND); 15333 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID) 15334 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 15335 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK) 15336 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 15337 } 15338 15339 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) 15340 tg3_flag_set(tp, RGMII_INBAND_DISABLE); 15341 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) 15342 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); 15343 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) 15344 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); 15345 15346 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV) 15347 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV; 15348 } 15349 done: 15350 if (tg3_flag(tp, WOL_CAP)) 15351 device_set_wakeup_enable(&tp->pdev->dev, 15352 tg3_flag(tp, WOL_ENABLE)); 15353 else 15354 device_set_wakeup_capable(&tp->pdev->dev, false); 15355 } 15356 15357 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val) 15358 { 15359 int i, err; 15360 u32 val2, off = offset * 8; 15361 15362 err = tg3_nvram_lock(tp); 15363 if (err) 15364 return err; 15365 15366 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE); 15367 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN | 15368 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START); 15369 tg3_ape_read32(tp, TG3_APE_OTP_CTRL); 15370 udelay(10); 15371 15372 for (i = 0; i < 100; i++) { 15373 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS); 15374 if (val2 & APE_OTP_STATUS_CMD_DONE) { 15375 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA); 15376 break; 15377 } 15378 udelay(10); 15379 } 15380 15381 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0); 15382 15383 tg3_nvram_unlock(tp); 15384 if (val2 & APE_OTP_STATUS_CMD_DONE) 15385 return 0; 15386 15387 return -EBUSY; 15388 } 15389 15390 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd) 15391 { 15392 int i; 15393 u32 val; 15394 15395 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); 15396 tw32(OTP_CTRL, cmd); 15397 15398 /* Wait for up to 1 ms for command to execute. */ 15399 for (i = 0; i < 100; i++) { 15400 val = tr32(OTP_STATUS); 15401 if (val & OTP_STATUS_CMD_DONE) 15402 break; 15403 udelay(10); 15404 } 15405 15406 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; 15407 } 15408 15409 /* Read the gphy configuration from the OTP region of the chip. The gphy 15410 * configuration is a 32-bit value that straddles the alignment boundary. 15411 * We do two 32-bit reads and then shift and merge the results. 15412 */ 15413 static u32 tg3_read_otp_phycfg(struct tg3 *tp) 15414 { 15415 u32 bhalf_otp, thalf_otp; 15416 15417 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); 15418 15419 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) 15420 return 0; 15421 15422 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); 15423 15424 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15425 return 0; 15426 15427 thalf_otp = tr32(OTP_READ_DATA); 15428 15429 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); 15430 15431 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15432 return 0; 15433 15434 bhalf_otp = tr32(OTP_READ_DATA); 15435 15436 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); 15437 } 15438 15439 static void tg3_phy_init_link_config(struct tg3 *tp) 15440 { 15441 u32 adv = ADVERTISED_Autoneg; 15442 15443 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 15444 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV)) 15445 adv |= ADVERTISED_1000baseT_Half; 15446 adv |= ADVERTISED_1000baseT_Full; 15447 } 15448 15449 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 15450 adv |= ADVERTISED_100baseT_Half | 15451 ADVERTISED_100baseT_Full | 15452 ADVERTISED_10baseT_Half | 15453 ADVERTISED_10baseT_Full | 15454 ADVERTISED_TP; 15455 else 15456 adv |= ADVERTISED_FIBRE; 15457 15458 tp->link_config.advertising = adv; 15459 tp->link_config.speed = SPEED_UNKNOWN; 15460 tp->link_config.duplex = DUPLEX_UNKNOWN; 15461 tp->link_config.autoneg = AUTONEG_ENABLE; 15462 tp->link_config.active_speed = SPEED_UNKNOWN; 15463 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 15464 15465 tp->old_link = -1; 15466 } 15467 15468 static int tg3_phy_probe(struct tg3 *tp) 15469 { 15470 u32 hw_phy_id_1, hw_phy_id_2; 15471 u32 hw_phy_id, hw_phy_id_masked; 15472 int err; 15473 15474 /* flow control autonegotiation is default behavior */ 15475 tg3_flag_set(tp, PAUSE_AUTONEG); 15476 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; 15477 15478 if (tg3_flag(tp, ENABLE_APE)) { 15479 switch (tp->pci_fn) { 15480 case 0: 15481 tp->phy_ape_lock = TG3_APE_LOCK_PHY0; 15482 break; 15483 case 1: 15484 tp->phy_ape_lock = TG3_APE_LOCK_PHY1; 15485 break; 15486 case 2: 15487 tp->phy_ape_lock = TG3_APE_LOCK_PHY2; 15488 break; 15489 case 3: 15490 tp->phy_ape_lock = TG3_APE_LOCK_PHY3; 15491 break; 15492 } 15493 } 15494 15495 if (!tg3_flag(tp, ENABLE_ASF) && 15496 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15497 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 15498 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 15499 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 15500 15501 if (tg3_flag(tp, USE_PHYLIB)) 15502 return tg3_phy_init(tp); 15503 15504 /* Reading the PHY ID register can conflict with ASF 15505 * firmware access to the PHY hardware. 15506 */ 15507 err = 0; 15508 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { 15509 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; 15510 } else { 15511 /* Now read the physical PHY_ID from the chip and verify 15512 * that it is sane. If it doesn't look good, we fall back 15513 * to either the hard-coded table based PHY_ID and failing 15514 * that the value found in the eeprom area. 15515 */ 15516 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); 15517 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); 15518 15519 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; 15520 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; 15521 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; 15522 15523 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; 15524 } 15525 15526 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { 15527 tp->phy_id = hw_phy_id; 15528 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) 15529 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15530 else 15531 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; 15532 } else { 15533 if (tp->phy_id != TG3_PHY_ID_INVALID) { 15534 /* Do nothing, phy ID already set up in 15535 * tg3_get_eeprom_hw_cfg(). 15536 */ 15537 } else { 15538 struct subsys_tbl_ent *p; 15539 15540 /* No eeprom signature? Try the hardcoded 15541 * subsys device table. 15542 */ 15543 p = tg3_lookup_by_subsys(tp); 15544 if (p) { 15545 tp->phy_id = p->phy_id; 15546 } else if (!tg3_flag(tp, IS_SSB_CORE)) { 15547 /* For now we saw the IDs 0xbc050cd0, 15548 * 0xbc050f80 and 0xbc050c30 on devices 15549 * connected to an BCM4785 and there are 15550 * probably more. Just assume that the phy is 15551 * supported when it is connected to a SSB core 15552 * for now. 15553 */ 15554 return -ENODEV; 15555 } 15556 15557 if (!tp->phy_id || 15558 tp->phy_id == TG3_PHY_ID_BCM8002) 15559 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15560 } 15561 } 15562 15563 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15564 (tg3_asic_rev(tp) == ASIC_REV_5719 || 15565 tg3_asic_rev(tp) == ASIC_REV_5720 || 15566 tg3_asic_rev(tp) == ASIC_REV_57766 || 15567 tg3_asic_rev(tp) == ASIC_REV_5762 || 15568 (tg3_asic_rev(tp) == ASIC_REV_5717 && 15569 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || 15570 (tg3_asic_rev(tp) == ASIC_REV_57765 && 15571 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) { 15572 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 15573 15574 tp->eee.supported = SUPPORTED_100baseT_Full | 15575 SUPPORTED_1000baseT_Full; 15576 tp->eee.advertised = ADVERTISED_100baseT_Full | 15577 ADVERTISED_1000baseT_Full; 15578 tp->eee.eee_enabled = 1; 15579 tp->eee.tx_lpi_enabled = 1; 15580 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; 15581 } 15582 15583 tg3_phy_init_link_config(tp); 15584 15585 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 15586 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15587 !tg3_flag(tp, ENABLE_APE) && 15588 !tg3_flag(tp, ENABLE_ASF)) { 15589 u32 bmsr, dummy; 15590 15591 tg3_readphy(tp, MII_BMSR, &bmsr); 15592 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 15593 (bmsr & BMSR_LSTATUS)) 15594 goto skip_phy_reset; 15595 15596 err = tg3_phy_reset(tp); 15597 if (err) 15598 return err; 15599 15600 tg3_phy_set_wirespeed(tp); 15601 15602 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) { 15603 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, 15604 tp->link_config.flowctrl); 15605 15606 tg3_writephy(tp, MII_BMCR, 15607 BMCR_ANENABLE | BMCR_ANRESTART); 15608 } 15609 } 15610 15611 skip_phy_reset: 15612 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 15613 err = tg3_init_5401phy_dsp(tp); 15614 if (err) 15615 return err; 15616 15617 err = tg3_init_5401phy_dsp(tp); 15618 } 15619 15620 return err; 15621 } 15622 15623 static void tg3_read_vpd(struct tg3 *tp) 15624 { 15625 u8 *vpd_data; 15626 unsigned int block_end, rosize, len; 15627 u32 vpdlen; 15628 int j, i = 0; 15629 15630 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); 15631 if (!vpd_data) 15632 goto out_no_vpd; 15633 15634 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA); 15635 if (i < 0) 15636 goto out_not_found; 15637 15638 rosize = pci_vpd_lrdt_size(&vpd_data[i]); 15639 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize; 15640 i += PCI_VPD_LRDT_TAG_SIZE; 15641 15642 if (block_end > vpdlen) 15643 goto out_not_found; 15644 15645 j = pci_vpd_find_info_keyword(vpd_data, i, rosize, 15646 PCI_VPD_RO_KEYWORD_MFR_ID); 15647 if (j > 0) { 15648 len = pci_vpd_info_field_size(&vpd_data[j]); 15649 15650 j += PCI_VPD_INFO_FLD_HDR_SIZE; 15651 if (j + len > block_end || len != 4 || 15652 memcmp(&vpd_data[j], "1028", 4)) 15653 goto partno; 15654 15655 j = pci_vpd_find_info_keyword(vpd_data, i, rosize, 15656 PCI_VPD_RO_KEYWORD_VENDOR0); 15657 if (j < 0) 15658 goto partno; 15659 15660 len = pci_vpd_info_field_size(&vpd_data[j]); 15661 15662 j += PCI_VPD_INFO_FLD_HDR_SIZE; 15663 if (j + len > block_end) 15664 goto partno; 15665 15666 if (len >= sizeof(tp->fw_ver)) 15667 len = sizeof(tp->fw_ver) - 1; 15668 memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); 15669 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, 15670 &vpd_data[j]); 15671 } 15672 15673 partno: 15674 i = pci_vpd_find_info_keyword(vpd_data, i, rosize, 15675 PCI_VPD_RO_KEYWORD_PARTNO); 15676 if (i < 0) 15677 goto out_not_found; 15678 15679 len = pci_vpd_info_field_size(&vpd_data[i]); 15680 15681 i += PCI_VPD_INFO_FLD_HDR_SIZE; 15682 if (len > TG3_BPN_SIZE || 15683 (len + i) > vpdlen) 15684 goto out_not_found; 15685 15686 memcpy(tp->board_part_number, &vpd_data[i], len); 15687 15688 out_not_found: 15689 kfree(vpd_data); 15690 if (tp->board_part_number[0]) 15691 return; 15692 15693 out_no_vpd: 15694 if (tg3_asic_rev(tp) == ASIC_REV_5717) { 15695 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 15696 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C) 15697 strcpy(tp->board_part_number, "BCM5717"); 15698 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) 15699 strcpy(tp->board_part_number, "BCM5718"); 15700 else 15701 goto nomatch; 15702 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) { 15703 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) 15704 strcpy(tp->board_part_number, "BCM57780"); 15705 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) 15706 strcpy(tp->board_part_number, "BCM57760"); 15707 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) 15708 strcpy(tp->board_part_number, "BCM57790"); 15709 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) 15710 strcpy(tp->board_part_number, "BCM57788"); 15711 else 15712 goto nomatch; 15713 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) { 15714 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) 15715 strcpy(tp->board_part_number, "BCM57761"); 15716 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) 15717 strcpy(tp->board_part_number, "BCM57765"); 15718 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) 15719 strcpy(tp->board_part_number, "BCM57781"); 15720 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) 15721 strcpy(tp->board_part_number, "BCM57785"); 15722 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) 15723 strcpy(tp->board_part_number, "BCM57791"); 15724 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) 15725 strcpy(tp->board_part_number, "BCM57795"); 15726 else 15727 goto nomatch; 15728 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) { 15729 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) 15730 strcpy(tp->board_part_number, "BCM57762"); 15731 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) 15732 strcpy(tp->board_part_number, "BCM57766"); 15733 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782) 15734 strcpy(tp->board_part_number, "BCM57782"); 15735 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 15736 strcpy(tp->board_part_number, "BCM57786"); 15737 else 15738 goto nomatch; 15739 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15740 strcpy(tp->board_part_number, "BCM95906"); 15741 } else { 15742 nomatch: 15743 strcpy(tp->board_part_number, "none"); 15744 } 15745 } 15746 15747 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) 15748 { 15749 u32 val; 15750 15751 if (tg3_nvram_read(tp, offset, &val) || 15752 (val & 0xfc000000) != 0x0c000000 || 15753 tg3_nvram_read(tp, offset + 4, &val) || 15754 val != 0) 15755 return 0; 15756 15757 return 1; 15758 } 15759 15760 static void tg3_read_bc_ver(struct tg3 *tp) 15761 { 15762 u32 val, offset, start, ver_offset; 15763 int i, dst_off; 15764 bool newver = false; 15765 15766 if (tg3_nvram_read(tp, 0xc, &offset) || 15767 tg3_nvram_read(tp, 0x4, &start)) 15768 return; 15769 15770 offset = tg3_nvram_logical_addr(tp, offset); 15771 15772 if (tg3_nvram_read(tp, offset, &val)) 15773 return; 15774 15775 if ((val & 0xfc000000) == 0x0c000000) { 15776 if (tg3_nvram_read(tp, offset + 4, &val)) 15777 return; 15778 15779 if (val == 0) 15780 newver = true; 15781 } 15782 15783 dst_off = strlen(tp->fw_ver); 15784 15785 if (newver) { 15786 if (TG3_VER_SIZE - dst_off < 16 || 15787 tg3_nvram_read(tp, offset + 8, &ver_offset)) 15788 return; 15789 15790 offset = offset + ver_offset - start; 15791 for (i = 0; i < 16; i += 4) { 15792 __be32 v; 15793 if (tg3_nvram_read_be32(tp, offset + i, &v)) 15794 return; 15795 15796 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); 15797 } 15798 } else { 15799 u32 major, minor; 15800 15801 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) 15802 return; 15803 15804 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> 15805 TG3_NVM_BCVER_MAJSFT; 15806 minor = ver_offset & TG3_NVM_BCVER_MINMSK; 15807 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, 15808 "v%d.%02d", major, minor); 15809 } 15810 } 15811 15812 static void tg3_read_hwsb_ver(struct tg3 *tp) 15813 { 15814 u32 val, major, minor; 15815 15816 /* Use native endian representation */ 15817 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) 15818 return; 15819 15820 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> 15821 TG3_NVM_HWSB_CFG1_MAJSFT; 15822 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> 15823 TG3_NVM_HWSB_CFG1_MINSFT; 15824 15825 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); 15826 } 15827 15828 static void tg3_read_sb_ver(struct tg3 *tp, u32 val) 15829 { 15830 u32 offset, major, minor, build; 15831 15832 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); 15833 15834 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) 15835 return; 15836 15837 switch (val & TG3_EEPROM_SB_REVISION_MASK) { 15838 case TG3_EEPROM_SB_REVISION_0: 15839 offset = TG3_EEPROM_SB_F1R0_EDH_OFF; 15840 break; 15841 case TG3_EEPROM_SB_REVISION_2: 15842 offset = TG3_EEPROM_SB_F1R2_EDH_OFF; 15843 break; 15844 case TG3_EEPROM_SB_REVISION_3: 15845 offset = TG3_EEPROM_SB_F1R3_EDH_OFF; 15846 break; 15847 case TG3_EEPROM_SB_REVISION_4: 15848 offset = TG3_EEPROM_SB_F1R4_EDH_OFF; 15849 break; 15850 case TG3_EEPROM_SB_REVISION_5: 15851 offset = TG3_EEPROM_SB_F1R5_EDH_OFF; 15852 break; 15853 case TG3_EEPROM_SB_REVISION_6: 15854 offset = TG3_EEPROM_SB_F1R6_EDH_OFF; 15855 break; 15856 default: 15857 return; 15858 } 15859 15860 if (tg3_nvram_read(tp, offset, &val)) 15861 return; 15862 15863 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> 15864 TG3_EEPROM_SB_EDH_BLD_SHFT; 15865 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> 15866 TG3_EEPROM_SB_EDH_MAJ_SHFT; 15867 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; 15868 15869 if (minor > 99 || build > 26) 15870 return; 15871 15872 offset = strlen(tp->fw_ver); 15873 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, 15874 " v%d.%02d", major, minor); 15875 15876 if (build > 0) { 15877 offset = strlen(tp->fw_ver); 15878 if (offset < TG3_VER_SIZE - 1) 15879 tp->fw_ver[offset] = 'a' + build - 1; 15880 } 15881 } 15882 15883 static void tg3_read_mgmtfw_ver(struct tg3 *tp) 15884 { 15885 u32 val, offset, start; 15886 int i, vlen; 15887 15888 for (offset = TG3_NVM_DIR_START; 15889 offset < TG3_NVM_DIR_END; 15890 offset += TG3_NVM_DIRENT_SIZE) { 15891 if (tg3_nvram_read(tp, offset, &val)) 15892 return; 15893 15894 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) 15895 break; 15896 } 15897 15898 if (offset == TG3_NVM_DIR_END) 15899 return; 15900 15901 if (!tg3_flag(tp, 5705_PLUS)) 15902 start = 0x08000000; 15903 else if (tg3_nvram_read(tp, offset - 4, &start)) 15904 return; 15905 15906 if (tg3_nvram_read(tp, offset + 4, &offset) || 15907 !tg3_fw_img_is_valid(tp, offset) || 15908 tg3_nvram_read(tp, offset + 8, &val)) 15909 return; 15910 15911 offset += val - start; 15912 15913 vlen = strlen(tp->fw_ver); 15914 15915 tp->fw_ver[vlen++] = ','; 15916 tp->fw_ver[vlen++] = ' '; 15917 15918 for (i = 0; i < 4; i++) { 15919 __be32 v; 15920 if (tg3_nvram_read_be32(tp, offset, &v)) 15921 return; 15922 15923 offset += sizeof(v); 15924 15925 if (vlen > TG3_VER_SIZE - sizeof(v)) { 15926 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); 15927 break; 15928 } 15929 15930 memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); 15931 vlen += sizeof(v); 15932 } 15933 } 15934 15935 static void tg3_probe_ncsi(struct tg3 *tp) 15936 { 15937 u32 apedata; 15938 15939 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 15940 if (apedata != APE_SEG_SIG_MAGIC) 15941 return; 15942 15943 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 15944 if (!(apedata & APE_FW_STATUS_READY)) 15945 return; 15946 15947 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) 15948 tg3_flag_set(tp, APE_HAS_NCSI); 15949 } 15950 15951 static void tg3_read_dash_ver(struct tg3 *tp) 15952 { 15953 int vlen; 15954 u32 apedata; 15955 char *fwtype; 15956 15957 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); 15958 15959 if (tg3_flag(tp, APE_HAS_NCSI)) 15960 fwtype = "NCSI"; 15961 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725) 15962 fwtype = "SMASH"; 15963 else 15964 fwtype = "DASH"; 15965 15966 vlen = strlen(tp->fw_ver); 15967 15968 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", 15969 fwtype, 15970 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, 15971 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, 15972 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, 15973 (apedata & APE_FW_VERSION_BLDMSK)); 15974 } 15975 15976 static void tg3_read_otp_ver(struct tg3 *tp) 15977 { 15978 u32 val, val2; 15979 15980 if (tg3_asic_rev(tp) != ASIC_REV_5762) 15981 return; 15982 15983 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) && 15984 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) && 15985 TG3_OTP_MAGIC0_VALID(val)) { 15986 u64 val64 = (u64) val << 32 | val2; 15987 u32 ver = 0; 15988 int i, vlen; 15989 15990 for (i = 0; i < 7; i++) { 15991 if ((val64 & 0xff) == 0) 15992 break; 15993 ver = val64 & 0xff; 15994 val64 >>= 8; 15995 } 15996 vlen = strlen(tp->fw_ver); 15997 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver); 15998 } 15999 } 16000 16001 static void tg3_read_fw_ver(struct tg3 *tp) 16002 { 16003 u32 val; 16004 bool vpd_vers = false; 16005 16006 if (tp->fw_ver[0] != 0) 16007 vpd_vers = true; 16008 16009 if (tg3_flag(tp, NO_NVRAM)) { 16010 strcat(tp->fw_ver, "sb"); 16011 tg3_read_otp_ver(tp); 16012 return; 16013 } 16014 16015 if (tg3_nvram_read(tp, 0, &val)) 16016 return; 16017 16018 if (val == TG3_EEPROM_MAGIC) 16019 tg3_read_bc_ver(tp); 16020 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) 16021 tg3_read_sb_ver(tp, val); 16022 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 16023 tg3_read_hwsb_ver(tp); 16024 16025 if (tg3_flag(tp, ENABLE_ASF)) { 16026 if (tg3_flag(tp, ENABLE_APE)) { 16027 tg3_probe_ncsi(tp); 16028 if (!vpd_vers) 16029 tg3_read_dash_ver(tp); 16030 } else if (!vpd_vers) { 16031 tg3_read_mgmtfw_ver(tp); 16032 } 16033 } 16034 16035 tp->fw_ver[TG3_VER_SIZE - 1] = 0; 16036 } 16037 16038 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) 16039 { 16040 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 16041 return TG3_RX_RET_MAX_SIZE_5717; 16042 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) 16043 return TG3_RX_RET_MAX_SIZE_5700; 16044 else 16045 return TG3_RX_RET_MAX_SIZE_5705; 16046 } 16047 16048 static const struct pci_device_id tg3_write_reorder_chipsets[] = { 16049 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 16050 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, 16051 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, 16052 { }, 16053 }; 16054 16055 static struct pci_dev *tg3_find_peer(struct tg3 *tp) 16056 { 16057 struct pci_dev *peer; 16058 unsigned int func, devnr = tp->pdev->devfn & ~7; 16059 16060 for (func = 0; func < 8; func++) { 16061 peer = pci_get_slot(tp->pdev->bus, devnr | func); 16062 if (peer && peer != tp->pdev) 16063 break; 16064 pci_dev_put(peer); 16065 } 16066 /* 5704 can be configured in single-port mode, set peer to 16067 * tp->pdev in that case. 16068 */ 16069 if (!peer) { 16070 peer = tp->pdev; 16071 return peer; 16072 } 16073 16074 /* 16075 * We don't need to keep the refcount elevated; there's no way 16076 * to remove one half of this device without removing the other 16077 */ 16078 pci_dev_put(peer); 16079 16080 return peer; 16081 } 16082 16083 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) 16084 { 16085 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT; 16086 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) { 16087 u32 reg; 16088 16089 /* All devices that use the alternate 16090 * ASIC REV location have a CPMU. 16091 */ 16092 tg3_flag_set(tp, CPMU_PRESENT); 16093 16094 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 16095 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 16096 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 16097 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 16098 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 16099 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 16100 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 16101 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 16102 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 16103 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 16104 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) 16105 reg = TG3PCI_GEN2_PRODID_ASICREV; 16106 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || 16107 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || 16108 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || 16109 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || 16110 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || 16111 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || 16112 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 || 16113 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 || 16114 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 || 16115 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 16116 reg = TG3PCI_GEN15_PRODID_ASICREV; 16117 else 16118 reg = TG3PCI_PRODID_ASICREV; 16119 16120 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id); 16121 } 16122 16123 /* Wrong chip ID in 5752 A0. This code can be removed later 16124 * as A0 is not in production. 16125 */ 16126 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW) 16127 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; 16128 16129 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0) 16130 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0; 16131 16132 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16133 tg3_asic_rev(tp) == ASIC_REV_5719 || 16134 tg3_asic_rev(tp) == ASIC_REV_5720) 16135 tg3_flag_set(tp, 5717_PLUS); 16136 16137 if (tg3_asic_rev(tp) == ASIC_REV_57765 || 16138 tg3_asic_rev(tp) == ASIC_REV_57766) 16139 tg3_flag_set(tp, 57765_CLASS); 16140 16141 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) || 16142 tg3_asic_rev(tp) == ASIC_REV_5762) 16143 tg3_flag_set(tp, 57765_PLUS); 16144 16145 /* Intentionally exclude ASIC_REV_5906 */ 16146 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16147 tg3_asic_rev(tp) == ASIC_REV_5787 || 16148 tg3_asic_rev(tp) == ASIC_REV_5784 || 16149 tg3_asic_rev(tp) == ASIC_REV_5761 || 16150 tg3_asic_rev(tp) == ASIC_REV_5785 || 16151 tg3_asic_rev(tp) == ASIC_REV_57780 || 16152 tg3_flag(tp, 57765_PLUS)) 16153 tg3_flag_set(tp, 5755_PLUS); 16154 16155 if (tg3_asic_rev(tp) == ASIC_REV_5780 || 16156 tg3_asic_rev(tp) == ASIC_REV_5714) 16157 tg3_flag_set(tp, 5780_CLASS); 16158 16159 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16160 tg3_asic_rev(tp) == ASIC_REV_5752 || 16161 tg3_asic_rev(tp) == ASIC_REV_5906 || 16162 tg3_flag(tp, 5755_PLUS) || 16163 tg3_flag(tp, 5780_CLASS)) 16164 tg3_flag_set(tp, 5750_PLUS); 16165 16166 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 16167 tg3_flag(tp, 5750_PLUS)) 16168 tg3_flag_set(tp, 5705_PLUS); 16169 } 16170 16171 static bool tg3_10_100_only_device(struct tg3 *tp, 16172 const struct pci_device_id *ent) 16173 { 16174 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK; 16175 16176 if ((tg3_asic_rev(tp) == ASIC_REV_5703 && 16177 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 16178 (tp->phy_flags & TG3_PHYFLG_IS_FET)) 16179 return true; 16180 16181 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) { 16182 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 16183 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100) 16184 return true; 16185 } else { 16186 return true; 16187 } 16188 } 16189 16190 return false; 16191 } 16192 16193 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) 16194 { 16195 u32 misc_ctrl_reg; 16196 u32 pci_state_reg, grc_misc_cfg; 16197 u32 val; 16198 u16 pci_cmd; 16199 int err; 16200 16201 /* Force memory write invalidate off. If we leave it on, 16202 * then on 5700_BX chips we have to enable a workaround. 16203 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary 16204 * to match the cacheline size. The Broadcom driver have this 16205 * workaround but turns MWI off all the times so never uses 16206 * it. This seems to suggest that the workaround is insufficient. 16207 */ 16208 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16209 pci_cmd &= ~PCI_COMMAND_INVALIDATE; 16210 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16211 16212 /* Important! -- Make sure register accesses are byteswapped 16213 * correctly. Also, for those chips that require it, make 16214 * sure that indirect register accesses are enabled before 16215 * the first operation. 16216 */ 16217 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16218 &misc_ctrl_reg); 16219 tp->misc_host_ctrl |= (misc_ctrl_reg & 16220 MISC_HOST_CTRL_CHIPREV); 16221 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16222 tp->misc_host_ctrl); 16223 16224 tg3_detect_asic_rev(tp, misc_ctrl_reg); 16225 16226 /* If we have 5702/03 A1 or A2 on certain ICH chipsets, 16227 * we need to disable memory and use config. cycles 16228 * only to access all registers. The 5702/03 chips 16229 * can mistakenly decode the special cycles from the 16230 * ICH chipsets as memory write cycles, causing corruption 16231 * of register and memory space. Only certain ICH bridges 16232 * will drive special cycles with non-zero data during the 16233 * address phase which can fall within the 5703's address 16234 * range. This is not an ICH bug as the PCI spec allows 16235 * non-zero address during special cycles. However, only 16236 * these ICH bridges are known to drive non-zero addresses 16237 * during special cycles. 16238 * 16239 * Since special cycles do not cross PCI bridges, we only 16240 * enable this workaround if the 5703 is on the secondary 16241 * bus of these ICH bridges. 16242 */ 16243 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) || 16244 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) { 16245 static struct tg3_dev_id { 16246 u32 vendor; 16247 u32 device; 16248 u32 rev; 16249 } ich_chipsets[] = { 16250 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, 16251 PCI_ANY_ID }, 16252 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, 16253 PCI_ANY_ID }, 16254 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, 16255 0xa }, 16256 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, 16257 PCI_ANY_ID }, 16258 { }, 16259 }; 16260 struct tg3_dev_id *pci_id = &ich_chipsets[0]; 16261 struct pci_dev *bridge = NULL; 16262 16263 while (pci_id->vendor != 0) { 16264 bridge = pci_get_device(pci_id->vendor, pci_id->device, 16265 bridge); 16266 if (!bridge) { 16267 pci_id++; 16268 continue; 16269 } 16270 if (pci_id->rev != PCI_ANY_ID) { 16271 if (bridge->revision > pci_id->rev) 16272 continue; 16273 } 16274 if (bridge->subordinate && 16275 (bridge->subordinate->number == 16276 tp->pdev->bus->number)) { 16277 tg3_flag_set(tp, ICH_WORKAROUND); 16278 pci_dev_put(bridge); 16279 break; 16280 } 16281 } 16282 } 16283 16284 if (tg3_asic_rev(tp) == ASIC_REV_5701) { 16285 static struct tg3_dev_id { 16286 u32 vendor; 16287 u32 device; 16288 } bridge_chipsets[] = { 16289 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, 16290 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, 16291 { }, 16292 }; 16293 struct tg3_dev_id *pci_id = &bridge_chipsets[0]; 16294 struct pci_dev *bridge = NULL; 16295 16296 while (pci_id->vendor != 0) { 16297 bridge = pci_get_device(pci_id->vendor, 16298 pci_id->device, 16299 bridge); 16300 if (!bridge) { 16301 pci_id++; 16302 continue; 16303 } 16304 if (bridge->subordinate && 16305 (bridge->subordinate->number <= 16306 tp->pdev->bus->number) && 16307 (bridge->subordinate->busn_res.end >= 16308 tp->pdev->bus->number)) { 16309 tg3_flag_set(tp, 5701_DMA_BUG); 16310 pci_dev_put(bridge); 16311 break; 16312 } 16313 } 16314 } 16315 16316 /* The EPB bridge inside 5714, 5715, and 5780 cannot support 16317 * DMA addresses > 40-bit. This bridge may have other additional 16318 * 57xx devices behind it in some 4-port NIC designs for example. 16319 * Any tg3 device found behind the bridge will also need the 40-bit 16320 * DMA workaround. 16321 */ 16322 if (tg3_flag(tp, 5780_CLASS)) { 16323 tg3_flag_set(tp, 40BIT_DMA_BUG); 16324 tp->msi_cap = tp->pdev->msi_cap; 16325 } else { 16326 struct pci_dev *bridge = NULL; 16327 16328 do { 16329 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, 16330 PCI_DEVICE_ID_SERVERWORKS_EPB, 16331 bridge); 16332 if (bridge && bridge->subordinate && 16333 (bridge->subordinate->number <= 16334 tp->pdev->bus->number) && 16335 (bridge->subordinate->busn_res.end >= 16336 tp->pdev->bus->number)) { 16337 tg3_flag_set(tp, 40BIT_DMA_BUG); 16338 pci_dev_put(bridge); 16339 break; 16340 } 16341 } while (bridge); 16342 } 16343 16344 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16345 tg3_asic_rev(tp) == ASIC_REV_5714) 16346 tp->pdev_peer = tg3_find_peer(tp); 16347 16348 /* Determine TSO capabilities */ 16349 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0) 16350 ; /* Do nothing. HW bug. */ 16351 else if (tg3_flag(tp, 57765_PLUS)) 16352 tg3_flag_set(tp, HW_TSO_3); 16353 else if (tg3_flag(tp, 5755_PLUS) || 16354 tg3_asic_rev(tp) == ASIC_REV_5906) 16355 tg3_flag_set(tp, HW_TSO_2); 16356 else if (tg3_flag(tp, 5750_PLUS)) { 16357 tg3_flag_set(tp, HW_TSO_1); 16358 tg3_flag_set(tp, TSO_BUG); 16359 if (tg3_asic_rev(tp) == ASIC_REV_5750 && 16360 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2) 16361 tg3_flag_clear(tp, TSO_BUG); 16362 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 && 16363 tg3_asic_rev(tp) != ASIC_REV_5701 && 16364 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 16365 tg3_flag_set(tp, FW_TSO); 16366 tg3_flag_set(tp, TSO_BUG); 16367 if (tg3_asic_rev(tp) == ASIC_REV_5705) 16368 tp->fw_needed = FIRMWARE_TG3TSO5; 16369 else 16370 tp->fw_needed = FIRMWARE_TG3TSO; 16371 } 16372 16373 /* Selectively allow TSO based on operating conditions */ 16374 if (tg3_flag(tp, HW_TSO_1) || 16375 tg3_flag(tp, HW_TSO_2) || 16376 tg3_flag(tp, HW_TSO_3) || 16377 tg3_flag(tp, FW_TSO)) { 16378 /* For firmware TSO, assume ASF is disabled. 16379 * We'll disable TSO later if we discover ASF 16380 * is enabled in tg3_get_eeprom_hw_cfg(). 16381 */ 16382 tg3_flag_set(tp, TSO_CAPABLE); 16383 } else { 16384 tg3_flag_clear(tp, TSO_CAPABLE); 16385 tg3_flag_clear(tp, TSO_BUG); 16386 tp->fw_needed = NULL; 16387 } 16388 16389 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) 16390 tp->fw_needed = FIRMWARE_TG3; 16391 16392 if (tg3_asic_rev(tp) == ASIC_REV_57766) 16393 tp->fw_needed = FIRMWARE_TG357766; 16394 16395 tp->irq_max = 1; 16396 16397 if (tg3_flag(tp, 5750_PLUS)) { 16398 tg3_flag_set(tp, SUPPORT_MSI); 16399 if (tg3_chip_rev(tp) == CHIPREV_5750_AX || 16400 tg3_chip_rev(tp) == CHIPREV_5750_BX || 16401 (tg3_asic_rev(tp) == ASIC_REV_5714 && 16402 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 && 16403 tp->pdev_peer == tp->pdev)) 16404 tg3_flag_clear(tp, SUPPORT_MSI); 16405 16406 if (tg3_flag(tp, 5755_PLUS) || 16407 tg3_asic_rev(tp) == ASIC_REV_5906) { 16408 tg3_flag_set(tp, 1SHOT_MSI); 16409 } 16410 16411 if (tg3_flag(tp, 57765_PLUS)) { 16412 tg3_flag_set(tp, SUPPORT_MSIX); 16413 tp->irq_max = TG3_IRQ_MAX_VECS; 16414 } 16415 } 16416 16417 tp->txq_max = 1; 16418 tp->rxq_max = 1; 16419 if (tp->irq_max > 1) { 16420 tp->rxq_max = TG3_RSS_MAX_NUM_QS; 16421 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS); 16422 16423 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 16424 tg3_asic_rev(tp) == ASIC_REV_5720) 16425 tp->txq_max = tp->irq_max - 1; 16426 } 16427 16428 if (tg3_flag(tp, 5755_PLUS) || 16429 tg3_asic_rev(tp) == ASIC_REV_5906) 16430 tg3_flag_set(tp, SHORT_DMA_BUG); 16431 16432 if (tg3_asic_rev(tp) == ASIC_REV_5719) 16433 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; 16434 16435 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16436 tg3_asic_rev(tp) == ASIC_REV_5719 || 16437 tg3_asic_rev(tp) == ASIC_REV_5720 || 16438 tg3_asic_rev(tp) == ASIC_REV_5762) 16439 tg3_flag_set(tp, LRG_PROD_RING_CAP); 16440 16441 if (tg3_flag(tp, 57765_PLUS) && 16442 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0) 16443 tg3_flag_set(tp, USE_JUMBO_BDFLAG); 16444 16445 if (!tg3_flag(tp, 5705_PLUS) || 16446 tg3_flag(tp, 5780_CLASS) || 16447 tg3_flag(tp, USE_JUMBO_BDFLAG)) 16448 tg3_flag_set(tp, JUMBO_CAPABLE); 16449 16450 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16451 &pci_state_reg); 16452 16453 if (pci_is_pcie(tp->pdev)) { 16454 u16 lnkctl; 16455 16456 tg3_flag_set(tp, PCI_EXPRESS); 16457 16458 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl); 16459 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { 16460 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16461 tg3_flag_clear(tp, HW_TSO_2); 16462 tg3_flag_clear(tp, TSO_CAPABLE); 16463 } 16464 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 16465 tg3_asic_rev(tp) == ASIC_REV_5761 || 16466 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 || 16467 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1) 16468 tg3_flag_set(tp, CLKREQ_BUG); 16469 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) { 16470 tg3_flag_set(tp, L1PLLPD_EN); 16471 } 16472 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) { 16473 /* BCM5785 devices are effectively PCIe devices, and should 16474 * follow PCIe codepaths, but do not have a PCIe capabilities 16475 * section. 16476 */ 16477 tg3_flag_set(tp, PCI_EXPRESS); 16478 } else if (!tg3_flag(tp, 5705_PLUS) || 16479 tg3_flag(tp, 5780_CLASS)) { 16480 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); 16481 if (!tp->pcix_cap) { 16482 dev_err(&tp->pdev->dev, 16483 "Cannot find PCI-X capability, aborting\n"); 16484 return -EIO; 16485 } 16486 16487 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) 16488 tg3_flag_set(tp, PCIX_MODE); 16489 } 16490 16491 /* If we have an AMD 762 or VIA K8T800 chipset, write 16492 * reordering to the mailbox registers done by the host 16493 * controller can cause major troubles. We read back from 16494 * every mailbox register write to force the writes to be 16495 * posted to the chip in order. 16496 */ 16497 if (pci_dev_present(tg3_write_reorder_chipsets) && 16498 !tg3_flag(tp, PCI_EXPRESS)) 16499 tg3_flag_set(tp, MBOX_WRITE_REORDER); 16500 16501 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 16502 &tp->pci_cacheline_sz); 16503 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16504 &tp->pci_lat_timer); 16505 if (tg3_asic_rev(tp) == ASIC_REV_5703 && 16506 tp->pci_lat_timer < 64) { 16507 tp->pci_lat_timer = 64; 16508 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16509 tp->pci_lat_timer); 16510 } 16511 16512 /* Important! -- It is critical that the PCI-X hw workaround 16513 * situation is decided before the first MMIO register access. 16514 */ 16515 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) { 16516 /* 5700 BX chips need to have their TX producer index 16517 * mailboxes written twice to workaround a bug. 16518 */ 16519 tg3_flag_set(tp, TXD_MBOX_HWBUG); 16520 16521 /* If we are in PCI-X mode, enable register write workaround. 16522 * 16523 * The workaround is to use indirect register accesses 16524 * for all chip writes not to mailbox registers. 16525 */ 16526 if (tg3_flag(tp, PCIX_MODE)) { 16527 u32 pm_reg; 16528 16529 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16530 16531 /* The chip can have it's power management PCI config 16532 * space registers clobbered due to this bug. 16533 * So explicitly force the chip into D0 here. 16534 */ 16535 pci_read_config_dword(tp->pdev, 16536 tp->pdev->pm_cap + PCI_PM_CTRL, 16537 &pm_reg); 16538 pm_reg &= ~PCI_PM_CTRL_STATE_MASK; 16539 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; 16540 pci_write_config_dword(tp->pdev, 16541 tp->pdev->pm_cap + PCI_PM_CTRL, 16542 pm_reg); 16543 16544 /* Also, force SERR#/PERR# in PCI command. */ 16545 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16546 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; 16547 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16548 } 16549 } 16550 16551 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) 16552 tg3_flag_set(tp, PCI_HIGH_SPEED); 16553 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) 16554 tg3_flag_set(tp, PCI_32BIT); 16555 16556 /* Chip-specific fixup from Broadcom driver */ 16557 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) && 16558 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { 16559 pci_state_reg |= PCISTATE_RETRY_SAME_DMA; 16560 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); 16561 } 16562 16563 /* Default fast path register access methods */ 16564 tp->read32 = tg3_read32; 16565 tp->write32 = tg3_write32; 16566 tp->read32_mbox = tg3_read32; 16567 tp->write32_mbox = tg3_write32; 16568 tp->write32_tx_mbox = tg3_write32; 16569 tp->write32_rx_mbox = tg3_write32; 16570 16571 /* Various workaround register access methods */ 16572 if (tg3_flag(tp, PCIX_TARGET_HWBUG)) 16573 tp->write32 = tg3_write_indirect_reg32; 16574 else if (tg3_asic_rev(tp) == ASIC_REV_5701 || 16575 (tg3_flag(tp, PCI_EXPRESS) && 16576 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) { 16577 /* 16578 * Back to back register writes can cause problems on these 16579 * chips, the workaround is to read back all reg writes 16580 * except those to mailbox regs. 16581 * 16582 * See tg3_write_indirect_reg32(). 16583 */ 16584 tp->write32 = tg3_write_flush_reg32; 16585 } 16586 16587 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { 16588 tp->write32_tx_mbox = tg3_write32_tx_mbox; 16589 if (tg3_flag(tp, MBOX_WRITE_REORDER)) 16590 tp->write32_rx_mbox = tg3_write_flush_reg32; 16591 } 16592 16593 if (tg3_flag(tp, ICH_WORKAROUND)) { 16594 tp->read32 = tg3_read_indirect_reg32; 16595 tp->write32 = tg3_write_indirect_reg32; 16596 tp->read32_mbox = tg3_read_indirect_mbox; 16597 tp->write32_mbox = tg3_write_indirect_mbox; 16598 tp->write32_tx_mbox = tg3_write_indirect_mbox; 16599 tp->write32_rx_mbox = tg3_write_indirect_mbox; 16600 16601 iounmap(tp->regs); 16602 tp->regs = NULL; 16603 16604 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16605 pci_cmd &= ~PCI_COMMAND_MEMORY; 16606 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16607 } 16608 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16609 tp->read32_mbox = tg3_read32_mbox_5906; 16610 tp->write32_mbox = tg3_write32_mbox_5906; 16611 tp->write32_tx_mbox = tg3_write32_mbox_5906; 16612 tp->write32_rx_mbox = tg3_write32_mbox_5906; 16613 } 16614 16615 if (tp->write32 == tg3_write_indirect_reg32 || 16616 (tg3_flag(tp, PCIX_MODE) && 16617 (tg3_asic_rev(tp) == ASIC_REV_5700 || 16618 tg3_asic_rev(tp) == ASIC_REV_5701))) 16619 tg3_flag_set(tp, SRAM_USE_CONFIG); 16620 16621 /* The memory arbiter has to be enabled in order for SRAM accesses 16622 * to succeed. Normally on powerup the tg3 chip firmware will make 16623 * sure it is enabled, but other entities such as system netboot 16624 * code might disable it. 16625 */ 16626 val = tr32(MEMARB_MODE); 16627 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 16628 16629 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; 16630 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16631 tg3_flag(tp, 5780_CLASS)) { 16632 if (tg3_flag(tp, PCIX_MODE)) { 16633 pci_read_config_dword(tp->pdev, 16634 tp->pcix_cap + PCI_X_STATUS, 16635 &val); 16636 tp->pci_fn = val & 0x7; 16637 } 16638 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16639 tg3_asic_rev(tp) == ASIC_REV_5719 || 16640 tg3_asic_rev(tp) == ASIC_REV_5720) { 16641 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); 16642 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG) 16643 val = tr32(TG3_CPMU_STATUS); 16644 16645 if (tg3_asic_rev(tp) == ASIC_REV_5717) 16646 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0; 16647 else 16648 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >> 16649 TG3_CPMU_STATUS_FSHFT_5719; 16650 } 16651 16652 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 16653 tp->write32_tx_mbox = tg3_write_flush_reg32; 16654 tp->write32_rx_mbox = tg3_write_flush_reg32; 16655 } 16656 16657 /* Get eeprom hw config before calling tg3_set_power_state(). 16658 * In particular, the TG3_FLAG_IS_NIC flag must be 16659 * determined before calling tg3_set_power_state() so that 16660 * we know whether or not to switch out of Vaux power. 16661 * When the flag is set, it means that GPIO1 is used for eeprom 16662 * write protect and also implies that it is a LOM where GPIOs 16663 * are not used to switch power. 16664 */ 16665 tg3_get_eeprom_hw_cfg(tp); 16666 16667 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) { 16668 tg3_flag_clear(tp, TSO_CAPABLE); 16669 tg3_flag_clear(tp, TSO_BUG); 16670 tp->fw_needed = NULL; 16671 } 16672 16673 if (tg3_flag(tp, ENABLE_APE)) { 16674 /* Allow reads and writes to the 16675 * APE register and memory space. 16676 */ 16677 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | 16678 PCISTATE_ALLOW_APE_SHMEM_WR | 16679 PCISTATE_ALLOW_APE_PSPACE_WR; 16680 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, 16681 pci_state_reg); 16682 16683 tg3_ape_lock_init(tp); 16684 tp->ape_hb_interval = 16685 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC); 16686 } 16687 16688 /* Set up tp->grc_local_ctrl before calling 16689 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high 16690 * will bring 5700's external PHY out of reset. 16691 * It is also used as eeprom write protect on LOMs. 16692 */ 16693 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; 16694 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16695 tg3_flag(tp, EEPROM_WRITE_PROT)) 16696 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 16697 GRC_LCLCTRL_GPIO_OUTPUT1); 16698 /* Unused GPIO3 must be driven as output on 5752 because there 16699 * are no pull-up resistors on unused GPIO pins. 16700 */ 16701 else if (tg3_asic_rev(tp) == ASIC_REV_5752) 16702 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 16703 16704 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16705 tg3_asic_rev(tp) == ASIC_REV_57780 || 16706 tg3_flag(tp, 57765_CLASS)) 16707 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16708 16709 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 16710 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 16711 /* Turn off the debug UART. */ 16712 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16713 if (tg3_flag(tp, IS_NIC)) 16714 /* Keep VMain power. */ 16715 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 16716 GRC_LCLCTRL_GPIO_OUTPUT0; 16717 } 16718 16719 if (tg3_asic_rev(tp) == ASIC_REV_5762) 16720 tp->grc_local_ctrl |= 16721 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL; 16722 16723 /* Switch out of Vaux if it is a NIC */ 16724 tg3_pwrsrc_switch_to_vmain(tp); 16725 16726 /* Derive initial jumbo mode from MTU assigned in 16727 * ether_setup() via the alloc_etherdev() call 16728 */ 16729 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) 16730 tg3_flag_set(tp, JUMBO_RING_ENABLE); 16731 16732 /* Determine WakeOnLan speed to use. */ 16733 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16734 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16735 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16736 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) { 16737 tg3_flag_clear(tp, WOL_SPEED_100MB); 16738 } else { 16739 tg3_flag_set(tp, WOL_SPEED_100MB); 16740 } 16741 16742 if (tg3_asic_rev(tp) == ASIC_REV_5906) 16743 tp->phy_flags |= TG3_PHYFLG_IS_FET; 16744 16745 /* A few boards don't want Ethernet@WireSpeed phy feature */ 16746 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16747 (tg3_asic_rev(tp) == ASIC_REV_5705 && 16748 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) && 16749 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) || 16750 (tp->phy_flags & TG3_PHYFLG_IS_FET) || 16751 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 16752 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; 16753 16754 if (tg3_chip_rev(tp) == CHIPREV_5703_AX || 16755 tg3_chip_rev(tp) == CHIPREV_5704_AX) 16756 tp->phy_flags |= TG3_PHYFLG_ADC_BUG; 16757 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) 16758 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; 16759 16760 if (tg3_flag(tp, 5705_PLUS) && 16761 !(tp->phy_flags & TG3_PHYFLG_IS_FET) && 16762 tg3_asic_rev(tp) != ASIC_REV_5785 && 16763 tg3_asic_rev(tp) != ASIC_REV_57780 && 16764 !tg3_flag(tp, 57765_PLUS)) { 16765 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16766 tg3_asic_rev(tp) == ASIC_REV_5787 || 16767 tg3_asic_rev(tp) == ASIC_REV_5784 || 16768 tg3_asic_rev(tp) == ASIC_REV_5761) { 16769 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && 16770 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) 16771 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; 16772 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) 16773 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; 16774 } else 16775 tp->phy_flags |= TG3_PHYFLG_BER_BUG; 16776 } 16777 16778 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 16779 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 16780 tp->phy_otp = tg3_read_otp_phycfg(tp); 16781 if (tp->phy_otp == 0) 16782 tp->phy_otp = TG3_OTP_DEFAULT; 16783 } 16784 16785 if (tg3_flag(tp, CPMU_PRESENT)) 16786 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; 16787 else 16788 tp->mi_mode = MAC_MI_MODE_BASE; 16789 16790 tp->coalesce_mode = 0; 16791 if (tg3_chip_rev(tp) != CHIPREV_5700_AX && 16792 tg3_chip_rev(tp) != CHIPREV_5700_BX) 16793 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 16794 16795 /* Set these bits to enable statistics workaround. */ 16796 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16797 tg3_asic_rev(tp) == ASIC_REV_5762 || 16798 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 16799 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) { 16800 tp->coalesce_mode |= HOSTCC_MODE_ATTN; 16801 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; 16802 } 16803 16804 if (tg3_asic_rev(tp) == ASIC_REV_5785 || 16805 tg3_asic_rev(tp) == ASIC_REV_57780) 16806 tg3_flag_set(tp, USE_PHYLIB); 16807 16808 err = tg3_mdio_init(tp); 16809 if (err) 16810 return err; 16811 16812 /* Initialize data/descriptor byte/word swapping. */ 16813 val = tr32(GRC_MODE); 16814 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 16815 tg3_asic_rev(tp) == ASIC_REV_5762) 16816 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | 16817 GRC_MODE_WORD_SWAP_B2HRX_DATA | 16818 GRC_MODE_B2HRX_ENABLE | 16819 GRC_MODE_HTX2B_ENABLE | 16820 GRC_MODE_HOST_STACKUP); 16821 else 16822 val &= GRC_MODE_HOST_STACKUP; 16823 16824 tw32(GRC_MODE, val | tp->grc_mode); 16825 16826 tg3_switch_clocks(tp); 16827 16828 /* Clear this out for sanity. */ 16829 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 16830 16831 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ 16832 tw32(TG3PCI_REG_BASE_ADDR, 0); 16833 16834 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16835 &pci_state_reg); 16836 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && 16837 !tg3_flag(tp, PCIX_TARGET_HWBUG)) { 16838 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16839 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16840 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 || 16841 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) { 16842 void __iomem *sram_base; 16843 16844 /* Write some dummy words into the SRAM status block 16845 * area, see if it reads back correctly. If the return 16846 * value is bad, force enable the PCIX workaround. 16847 */ 16848 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; 16849 16850 writel(0x00000000, sram_base); 16851 writel(0x00000000, sram_base + 4); 16852 writel(0xffffffff, sram_base + 4); 16853 if (readl(sram_base) != 0x00000000) 16854 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16855 } 16856 } 16857 16858 udelay(50); 16859 tg3_nvram_init(tp); 16860 16861 /* If the device has an NVRAM, no need to load patch firmware */ 16862 if (tg3_asic_rev(tp) == ASIC_REV_57766 && 16863 !tg3_flag(tp, NO_NVRAM)) 16864 tp->fw_needed = NULL; 16865 16866 grc_misc_cfg = tr32(GRC_MISC_CFG); 16867 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; 16868 16869 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 16870 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || 16871 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 16872 tg3_flag_set(tp, IS_5788); 16873 16874 if (!tg3_flag(tp, IS_5788) && 16875 tg3_asic_rev(tp) != ASIC_REV_5700) 16876 tg3_flag_set(tp, TAGGED_STATUS); 16877 if (tg3_flag(tp, TAGGED_STATUS)) { 16878 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | 16879 HOSTCC_MODE_CLRTICK_TXBD); 16880 16881 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; 16882 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16883 tp->misc_host_ctrl); 16884 } 16885 16886 /* Preserve the APE MAC_MODE bits */ 16887 if (tg3_flag(tp, ENABLE_APE)) 16888 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 16889 else 16890 tp->mac_mode = 0; 16891 16892 if (tg3_10_100_only_device(tp, ent)) 16893 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; 16894 16895 err = tg3_phy_probe(tp); 16896 if (err) { 16897 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); 16898 /* ... but do not return immediately ... */ 16899 tg3_mdio_fini(tp); 16900 } 16901 16902 tg3_read_vpd(tp); 16903 tg3_read_fw_ver(tp); 16904 16905 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 16906 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16907 } else { 16908 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16909 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16910 else 16911 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16912 } 16913 16914 /* 5700 {AX,BX} chips have a broken status block link 16915 * change bit implementation, so we must use the 16916 * status register in those cases. 16917 */ 16918 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16919 tg3_flag_set(tp, USE_LINKCHG_REG); 16920 else 16921 tg3_flag_clear(tp, USE_LINKCHG_REG); 16922 16923 /* The led_ctrl is set during tg3_phy_probe, here we might 16924 * have to force the link status polling mechanism based 16925 * upon subsystem IDs. 16926 */ 16927 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 16928 tg3_asic_rev(tp) == ASIC_REV_5701 && 16929 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 16930 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16931 tg3_flag_set(tp, USE_LINKCHG_REG); 16932 } 16933 16934 /* For all SERDES we poll the MAC status register. */ 16935 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 16936 tg3_flag_set(tp, POLL_SERDES); 16937 else 16938 tg3_flag_clear(tp, POLL_SERDES); 16939 16940 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF)) 16941 tg3_flag_set(tp, POLL_CPMU_LINK); 16942 16943 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN; 16944 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; 16945 if (tg3_asic_rev(tp) == ASIC_REV_5701 && 16946 tg3_flag(tp, PCIX_MODE)) { 16947 tp->rx_offset = NET_SKB_PAD; 16948 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 16949 tp->rx_copy_thresh = ~(u16)0; 16950 #endif 16951 } 16952 16953 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; 16954 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; 16955 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; 16956 16957 tp->rx_std_max_post = tp->rx_std_ring_mask + 1; 16958 16959 /* Increment the rx prod index on the rx std ring by at most 16960 * 8 for these chips to workaround hw errata. 16961 */ 16962 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16963 tg3_asic_rev(tp) == ASIC_REV_5752 || 16964 tg3_asic_rev(tp) == ASIC_REV_5755) 16965 tp->rx_std_max_post = 8; 16966 16967 if (tg3_flag(tp, ASPM_WORKAROUND)) 16968 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & 16969 PCIE_PWR_MGMT_L1_THRESH_MSK; 16970 16971 return err; 16972 } 16973 16974 static int tg3_get_device_address(struct tg3 *tp) 16975 { 16976 struct net_device *dev = tp->dev; 16977 u32 hi, lo, mac_offset; 16978 int addr_ok = 0; 16979 int err; 16980 16981 if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr)) 16982 return 0; 16983 16984 if (tg3_flag(tp, IS_SSB_CORE)) { 16985 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]); 16986 if (!err && is_valid_ether_addr(&dev->dev_addr[0])) 16987 return 0; 16988 } 16989 16990 mac_offset = 0x7c; 16991 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16992 tg3_flag(tp, 5780_CLASS)) { 16993 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 16994 mac_offset = 0xcc; 16995 if (tg3_nvram_lock(tp)) 16996 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); 16997 else 16998 tg3_nvram_unlock(tp); 16999 } else if (tg3_flag(tp, 5717_PLUS)) { 17000 if (tp->pci_fn & 1) 17001 mac_offset = 0xcc; 17002 if (tp->pci_fn > 1) 17003 mac_offset += 0x18c; 17004 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) 17005 mac_offset = 0x10; 17006 17007 /* First try to get it from MAC address mailbox. */ 17008 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); 17009 if ((hi >> 16) == 0x484b) { 17010 dev->dev_addr[0] = (hi >> 8) & 0xff; 17011 dev->dev_addr[1] = (hi >> 0) & 0xff; 17012 17013 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); 17014 dev->dev_addr[2] = (lo >> 24) & 0xff; 17015 dev->dev_addr[3] = (lo >> 16) & 0xff; 17016 dev->dev_addr[4] = (lo >> 8) & 0xff; 17017 dev->dev_addr[5] = (lo >> 0) & 0xff; 17018 17019 /* Some old bootcode may report a 0 MAC address in SRAM */ 17020 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]); 17021 } 17022 if (!addr_ok) { 17023 /* Next, try NVRAM. */ 17024 if (!tg3_flag(tp, NO_NVRAM) && 17025 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && 17026 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { 17027 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); 17028 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); 17029 } 17030 /* Finally just fetch it out of the MAC control regs. */ 17031 else { 17032 hi = tr32(MAC_ADDR_0_HIGH); 17033 lo = tr32(MAC_ADDR_0_LOW); 17034 17035 dev->dev_addr[5] = lo & 0xff; 17036 dev->dev_addr[4] = (lo >> 8) & 0xff; 17037 dev->dev_addr[3] = (lo >> 16) & 0xff; 17038 dev->dev_addr[2] = (lo >> 24) & 0xff; 17039 dev->dev_addr[1] = hi & 0xff; 17040 dev->dev_addr[0] = (hi >> 8) & 0xff; 17041 } 17042 } 17043 17044 if (!is_valid_ether_addr(&dev->dev_addr[0])) 17045 return -EINVAL; 17046 return 0; 17047 } 17048 17049 #define BOUNDARY_SINGLE_CACHELINE 1 17050 #define BOUNDARY_MULTI_CACHELINE 2 17051 17052 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) 17053 { 17054 int cacheline_size; 17055 u8 byte; 17056 int goal; 17057 17058 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); 17059 if (byte == 0) 17060 cacheline_size = 1024; 17061 else 17062 cacheline_size = (int) byte * 4; 17063 17064 /* On 5703 and later chips, the boundary bits have no 17065 * effect. 17066 */ 17067 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17068 tg3_asic_rev(tp) != ASIC_REV_5701 && 17069 !tg3_flag(tp, PCI_EXPRESS)) 17070 goto out; 17071 17072 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) 17073 goal = BOUNDARY_MULTI_CACHELINE; 17074 #else 17075 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) 17076 goal = BOUNDARY_SINGLE_CACHELINE; 17077 #else 17078 goal = 0; 17079 #endif 17080 #endif 17081 17082 if (tg3_flag(tp, 57765_PLUS)) { 17083 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 17084 goto out; 17085 } 17086 17087 if (!goal) 17088 goto out; 17089 17090 /* PCI controllers on most RISC systems tend to disconnect 17091 * when a device tries to burst across a cache-line boundary. 17092 * Therefore, letting tg3 do so just wastes PCI bandwidth. 17093 * 17094 * Unfortunately, for PCI-E there are only limited 17095 * write-side controls for this, and thus for reads 17096 * we will still get the disconnects. We'll also waste 17097 * these PCI cycles for both read and write for chips 17098 * other than 5700 and 5701 which do not implement the 17099 * boundary bits. 17100 */ 17101 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { 17102 switch (cacheline_size) { 17103 case 16: 17104 case 32: 17105 case 64: 17106 case 128: 17107 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17108 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | 17109 DMA_RWCTRL_WRITE_BNDRY_128_PCIX); 17110 } else { 17111 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17112 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17113 } 17114 break; 17115 17116 case 256: 17117 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | 17118 DMA_RWCTRL_WRITE_BNDRY_256_PCIX); 17119 break; 17120 17121 default: 17122 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17123 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17124 break; 17125 } 17126 } else if (tg3_flag(tp, PCI_EXPRESS)) { 17127 switch (cacheline_size) { 17128 case 16: 17129 case 32: 17130 case 64: 17131 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17132 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17133 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; 17134 break; 17135 } 17136 /* fallthrough */ 17137 case 128: 17138 default: 17139 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17140 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; 17141 break; 17142 } 17143 } else { 17144 switch (cacheline_size) { 17145 case 16: 17146 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17147 val |= (DMA_RWCTRL_READ_BNDRY_16 | 17148 DMA_RWCTRL_WRITE_BNDRY_16); 17149 break; 17150 } 17151 /* fallthrough */ 17152 case 32: 17153 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17154 val |= (DMA_RWCTRL_READ_BNDRY_32 | 17155 DMA_RWCTRL_WRITE_BNDRY_32); 17156 break; 17157 } 17158 /* fallthrough */ 17159 case 64: 17160 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17161 val |= (DMA_RWCTRL_READ_BNDRY_64 | 17162 DMA_RWCTRL_WRITE_BNDRY_64); 17163 break; 17164 } 17165 /* fallthrough */ 17166 case 128: 17167 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17168 val |= (DMA_RWCTRL_READ_BNDRY_128 | 17169 DMA_RWCTRL_WRITE_BNDRY_128); 17170 break; 17171 } 17172 /* fallthrough */ 17173 case 256: 17174 val |= (DMA_RWCTRL_READ_BNDRY_256 | 17175 DMA_RWCTRL_WRITE_BNDRY_256); 17176 break; 17177 case 512: 17178 val |= (DMA_RWCTRL_READ_BNDRY_512 | 17179 DMA_RWCTRL_WRITE_BNDRY_512); 17180 break; 17181 case 1024: 17182 default: 17183 val |= (DMA_RWCTRL_READ_BNDRY_1024 | 17184 DMA_RWCTRL_WRITE_BNDRY_1024); 17185 break; 17186 } 17187 } 17188 17189 out: 17190 return val; 17191 } 17192 17193 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, 17194 int size, bool to_device) 17195 { 17196 struct tg3_internal_buffer_desc test_desc; 17197 u32 sram_dma_descs; 17198 int i, ret; 17199 17200 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; 17201 17202 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); 17203 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); 17204 tw32(RDMAC_STATUS, 0); 17205 tw32(WDMAC_STATUS, 0); 17206 17207 tw32(BUFMGR_MODE, 0); 17208 tw32(FTQ_RESET, 0); 17209 17210 test_desc.addr_hi = ((u64) buf_dma) >> 32; 17211 test_desc.addr_lo = buf_dma & 0xffffffff; 17212 test_desc.nic_mbuf = 0x00002100; 17213 test_desc.len = size; 17214 17215 /* 17216 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz 17217 * the *second* time the tg3 driver was getting loaded after an 17218 * initial scan. 17219 * 17220 * Broadcom tells me: 17221 * ...the DMA engine is connected to the GRC block and a DMA 17222 * reset may affect the GRC block in some unpredictable way... 17223 * The behavior of resets to individual blocks has not been tested. 17224 * 17225 * Broadcom noted the GRC reset will also reset all sub-components. 17226 */ 17227 if (to_device) { 17228 test_desc.cqid_sqid = (13 << 8) | 2; 17229 17230 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); 17231 udelay(40); 17232 } else { 17233 test_desc.cqid_sqid = (16 << 8) | 7; 17234 17235 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); 17236 udelay(40); 17237 } 17238 test_desc.flags = 0x00000005; 17239 17240 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { 17241 u32 val; 17242 17243 val = *(((u32 *)&test_desc) + i); 17244 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 17245 sram_dma_descs + (i * sizeof(u32))); 17246 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 17247 } 17248 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 17249 17250 if (to_device) 17251 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); 17252 else 17253 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); 17254 17255 ret = -ENODEV; 17256 for (i = 0; i < 40; i++) { 17257 u32 val; 17258 17259 if (to_device) 17260 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); 17261 else 17262 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); 17263 if ((val & 0xffff) == sram_dma_descs) { 17264 ret = 0; 17265 break; 17266 } 17267 17268 udelay(100); 17269 } 17270 17271 return ret; 17272 } 17273 17274 #define TEST_BUFFER_SIZE 0x2000 17275 17276 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = { 17277 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, 17278 { }, 17279 }; 17280 17281 static int tg3_test_dma(struct tg3 *tp) 17282 { 17283 dma_addr_t buf_dma; 17284 u32 *buf, saved_dma_rwctrl; 17285 int ret = 0; 17286 17287 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, 17288 &buf_dma, GFP_KERNEL); 17289 if (!buf) { 17290 ret = -ENOMEM; 17291 goto out_nofree; 17292 } 17293 17294 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 17295 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 17296 17297 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 17298 17299 if (tg3_flag(tp, 57765_PLUS)) 17300 goto out; 17301 17302 if (tg3_flag(tp, PCI_EXPRESS)) { 17303 /* DMA read watermark not used on PCIE */ 17304 tp->dma_rwctrl |= 0x00180000; 17305 } else if (!tg3_flag(tp, PCIX_MODE)) { 17306 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 17307 tg3_asic_rev(tp) == ASIC_REV_5750) 17308 tp->dma_rwctrl |= 0x003f0000; 17309 else 17310 tp->dma_rwctrl |= 0x003f000f; 17311 } else { 17312 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17313 tg3_asic_rev(tp) == ASIC_REV_5704) { 17314 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); 17315 u32 read_water = 0x7; 17316 17317 /* If the 5704 is behind the EPB bridge, we can 17318 * do the less restrictive ONE_DMA workaround for 17319 * better performance. 17320 */ 17321 if (tg3_flag(tp, 40BIT_DMA_BUG) && 17322 tg3_asic_rev(tp) == ASIC_REV_5704) 17323 tp->dma_rwctrl |= 0x8000; 17324 else if (ccval == 0x6 || ccval == 0x7) 17325 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17326 17327 if (tg3_asic_rev(tp) == ASIC_REV_5703) 17328 read_water = 4; 17329 /* Set bit 23 to enable PCIX hw bug fix */ 17330 tp->dma_rwctrl |= 17331 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | 17332 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | 17333 (1 << 23); 17334 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) { 17335 /* 5780 always in PCIX mode */ 17336 tp->dma_rwctrl |= 0x00144000; 17337 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) { 17338 /* 5714 always in PCIX mode */ 17339 tp->dma_rwctrl |= 0x00148000; 17340 } else { 17341 tp->dma_rwctrl |= 0x001b000f; 17342 } 17343 } 17344 if (tg3_flag(tp, ONE_DMA_AT_ONCE)) 17345 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17346 17347 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17348 tg3_asic_rev(tp) == ASIC_REV_5704) 17349 tp->dma_rwctrl &= 0xfffffff0; 17350 17351 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 17352 tg3_asic_rev(tp) == ASIC_REV_5701) { 17353 /* Remove this if it causes problems for some boards. */ 17354 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; 17355 17356 /* On 5700/5701 chips, we need to set this bit. 17357 * Otherwise the chip will issue cacheline transactions 17358 * to streamable DMA memory with not all the byte 17359 * enables turned on. This is an error on several 17360 * RISC PCI controllers, in particular sparc64. 17361 * 17362 * On 5703/5704 chips, this bit has been reassigned 17363 * a different meaning. In particular, it is used 17364 * on those chips to enable a PCI-X workaround. 17365 */ 17366 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; 17367 } 17368 17369 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17370 17371 17372 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17373 tg3_asic_rev(tp) != ASIC_REV_5701) 17374 goto out; 17375 17376 /* It is best to perform DMA test with maximum write burst size 17377 * to expose the 5700/5701 write DMA bug. 17378 */ 17379 saved_dma_rwctrl = tp->dma_rwctrl; 17380 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17381 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17382 17383 while (1) { 17384 u32 *p = buf, i; 17385 17386 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) 17387 p[i] = i; 17388 17389 /* Send the buffer to the chip. */ 17390 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true); 17391 if (ret) { 17392 dev_err(&tp->pdev->dev, 17393 "%s: Buffer write failed. err = %d\n", 17394 __func__, ret); 17395 break; 17396 } 17397 17398 /* Now read it back. */ 17399 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false); 17400 if (ret) { 17401 dev_err(&tp->pdev->dev, "%s: Buffer read failed. " 17402 "err = %d\n", __func__, ret); 17403 break; 17404 } 17405 17406 /* Verify it. */ 17407 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { 17408 if (p[i] == i) 17409 continue; 17410 17411 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17412 DMA_RWCTRL_WRITE_BNDRY_16) { 17413 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17414 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17415 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17416 break; 17417 } else { 17418 dev_err(&tp->pdev->dev, 17419 "%s: Buffer corrupted on read back! " 17420 "(%d != %d)\n", __func__, p[i], i); 17421 ret = -ENODEV; 17422 goto out; 17423 } 17424 } 17425 17426 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { 17427 /* Success. */ 17428 ret = 0; 17429 break; 17430 } 17431 } 17432 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17433 DMA_RWCTRL_WRITE_BNDRY_16) { 17434 /* DMA test passed without adjusting DMA boundary, 17435 * now look for chipsets that are known to expose the 17436 * DMA bug without failing the test. 17437 */ 17438 if (pci_dev_present(tg3_dma_wait_state_chipsets)) { 17439 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17440 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17441 } else { 17442 /* Safe to use the calculated DMA boundary. */ 17443 tp->dma_rwctrl = saved_dma_rwctrl; 17444 } 17445 17446 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17447 } 17448 17449 out: 17450 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); 17451 out_nofree: 17452 return ret; 17453 } 17454 17455 static void tg3_init_bufmgr_config(struct tg3 *tp) 17456 { 17457 if (tg3_flag(tp, 57765_PLUS)) { 17458 tp->bufmgr_config.mbuf_read_dma_low_water = 17459 DEFAULT_MB_RDMA_LOW_WATER_5705; 17460 tp->bufmgr_config.mbuf_mac_rx_low_water = 17461 DEFAULT_MB_MACRX_LOW_WATER_57765; 17462 tp->bufmgr_config.mbuf_high_water = 17463 DEFAULT_MB_HIGH_WATER_57765; 17464 17465 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17466 DEFAULT_MB_RDMA_LOW_WATER_5705; 17467 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17468 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; 17469 tp->bufmgr_config.mbuf_high_water_jumbo = 17470 DEFAULT_MB_HIGH_WATER_JUMBO_57765; 17471 } else if (tg3_flag(tp, 5705_PLUS)) { 17472 tp->bufmgr_config.mbuf_read_dma_low_water = 17473 DEFAULT_MB_RDMA_LOW_WATER_5705; 17474 tp->bufmgr_config.mbuf_mac_rx_low_water = 17475 DEFAULT_MB_MACRX_LOW_WATER_5705; 17476 tp->bufmgr_config.mbuf_high_water = 17477 DEFAULT_MB_HIGH_WATER_5705; 17478 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 17479 tp->bufmgr_config.mbuf_mac_rx_low_water = 17480 DEFAULT_MB_MACRX_LOW_WATER_5906; 17481 tp->bufmgr_config.mbuf_high_water = 17482 DEFAULT_MB_HIGH_WATER_5906; 17483 } 17484 17485 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17486 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; 17487 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17488 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; 17489 tp->bufmgr_config.mbuf_high_water_jumbo = 17490 DEFAULT_MB_HIGH_WATER_JUMBO_5780; 17491 } else { 17492 tp->bufmgr_config.mbuf_read_dma_low_water = 17493 DEFAULT_MB_RDMA_LOW_WATER; 17494 tp->bufmgr_config.mbuf_mac_rx_low_water = 17495 DEFAULT_MB_MACRX_LOW_WATER; 17496 tp->bufmgr_config.mbuf_high_water = 17497 DEFAULT_MB_HIGH_WATER; 17498 17499 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17500 DEFAULT_MB_RDMA_LOW_WATER_JUMBO; 17501 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17502 DEFAULT_MB_MACRX_LOW_WATER_JUMBO; 17503 tp->bufmgr_config.mbuf_high_water_jumbo = 17504 DEFAULT_MB_HIGH_WATER_JUMBO; 17505 } 17506 17507 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; 17508 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; 17509 } 17510 17511 static char *tg3_phy_string(struct tg3 *tp) 17512 { 17513 switch (tp->phy_id & TG3_PHY_ID_MASK) { 17514 case TG3_PHY_ID_BCM5400: return "5400"; 17515 case TG3_PHY_ID_BCM5401: return "5401"; 17516 case TG3_PHY_ID_BCM5411: return "5411"; 17517 case TG3_PHY_ID_BCM5701: return "5701"; 17518 case TG3_PHY_ID_BCM5703: return "5703"; 17519 case TG3_PHY_ID_BCM5704: return "5704"; 17520 case TG3_PHY_ID_BCM5705: return "5705"; 17521 case TG3_PHY_ID_BCM5750: return "5750"; 17522 case TG3_PHY_ID_BCM5752: return "5752"; 17523 case TG3_PHY_ID_BCM5714: return "5714"; 17524 case TG3_PHY_ID_BCM5780: return "5780"; 17525 case TG3_PHY_ID_BCM5755: return "5755"; 17526 case TG3_PHY_ID_BCM5787: return "5787"; 17527 case TG3_PHY_ID_BCM5784: return "5784"; 17528 case TG3_PHY_ID_BCM5756: return "5722/5756"; 17529 case TG3_PHY_ID_BCM5906: return "5906"; 17530 case TG3_PHY_ID_BCM5761: return "5761"; 17531 case TG3_PHY_ID_BCM5718C: return "5718C"; 17532 case TG3_PHY_ID_BCM5718S: return "5718S"; 17533 case TG3_PHY_ID_BCM57765: return "57765"; 17534 case TG3_PHY_ID_BCM5719C: return "5719C"; 17535 case TG3_PHY_ID_BCM5720C: return "5720C"; 17536 case TG3_PHY_ID_BCM5762: return "5762C"; 17537 case TG3_PHY_ID_BCM8002: return "8002/serdes"; 17538 case 0: return "serdes"; 17539 default: return "unknown"; 17540 } 17541 } 17542 17543 static char *tg3_bus_string(struct tg3 *tp, char *str) 17544 { 17545 if (tg3_flag(tp, PCI_EXPRESS)) { 17546 strcpy(str, "PCI Express"); 17547 return str; 17548 } else if (tg3_flag(tp, PCIX_MODE)) { 17549 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; 17550 17551 strcpy(str, "PCIX:"); 17552 17553 if ((clock_ctrl == 7) || 17554 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == 17555 GRC_MISC_CFG_BOARD_ID_5704CIOBE)) 17556 strcat(str, "133MHz"); 17557 else if (clock_ctrl == 0) 17558 strcat(str, "33MHz"); 17559 else if (clock_ctrl == 2) 17560 strcat(str, "50MHz"); 17561 else if (clock_ctrl == 4) 17562 strcat(str, "66MHz"); 17563 else if (clock_ctrl == 6) 17564 strcat(str, "100MHz"); 17565 } else { 17566 strcpy(str, "PCI:"); 17567 if (tg3_flag(tp, PCI_HIGH_SPEED)) 17568 strcat(str, "66MHz"); 17569 else 17570 strcat(str, "33MHz"); 17571 } 17572 if (tg3_flag(tp, PCI_32BIT)) 17573 strcat(str, ":32-bit"); 17574 else 17575 strcat(str, ":64-bit"); 17576 return str; 17577 } 17578 17579 static void tg3_init_coal(struct tg3 *tp) 17580 { 17581 struct ethtool_coalesce *ec = &tp->coal; 17582 17583 memset(ec, 0, sizeof(*ec)); 17584 ec->cmd = ETHTOOL_GCOALESCE; 17585 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; 17586 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; 17587 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; 17588 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; 17589 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; 17590 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; 17591 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; 17592 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; 17593 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; 17594 17595 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | 17596 HOSTCC_MODE_CLRTICK_TXBD)) { 17597 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; 17598 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; 17599 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; 17600 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; 17601 } 17602 17603 if (tg3_flag(tp, 5705_PLUS)) { 17604 ec->rx_coalesce_usecs_irq = 0; 17605 ec->tx_coalesce_usecs_irq = 0; 17606 ec->stats_block_coalesce_usecs = 0; 17607 } 17608 } 17609 17610 static int tg3_init_one(struct pci_dev *pdev, 17611 const struct pci_device_id *ent) 17612 { 17613 struct net_device *dev; 17614 struct tg3 *tp; 17615 int i, err; 17616 u32 sndmbx, rcvmbx, intmbx; 17617 char str[40]; 17618 u64 dma_mask, persist_dma_mask; 17619 netdev_features_t features = 0; 17620 17621 printk_once(KERN_INFO "%s\n", version); 17622 17623 err = pci_enable_device(pdev); 17624 if (err) { 17625 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 17626 return err; 17627 } 17628 17629 err = pci_request_regions(pdev, DRV_MODULE_NAME); 17630 if (err) { 17631 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 17632 goto err_out_disable_pdev; 17633 } 17634 17635 pci_set_master(pdev); 17636 17637 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); 17638 if (!dev) { 17639 err = -ENOMEM; 17640 goto err_out_free_res; 17641 } 17642 17643 SET_NETDEV_DEV(dev, &pdev->dev); 17644 17645 tp = netdev_priv(dev); 17646 tp->pdev = pdev; 17647 tp->dev = dev; 17648 tp->rx_mode = TG3_DEF_RX_MODE; 17649 tp->tx_mode = TG3_DEF_TX_MODE; 17650 tp->irq_sync = 1; 17651 tp->pcierr_recovery = false; 17652 17653 if (tg3_debug > 0) 17654 tp->msg_enable = tg3_debug; 17655 else 17656 tp->msg_enable = TG3_DEF_MSG_ENABLE; 17657 17658 if (pdev_is_ssb_gige_core(pdev)) { 17659 tg3_flag_set(tp, IS_SSB_CORE); 17660 if (ssb_gige_must_flush_posted_writes(pdev)) 17661 tg3_flag_set(tp, FLUSH_POSTED_WRITES); 17662 if (ssb_gige_one_dma_at_once(pdev)) 17663 tg3_flag_set(tp, ONE_DMA_AT_ONCE); 17664 if (ssb_gige_have_roboswitch(pdev)) { 17665 tg3_flag_set(tp, USE_PHYLIB); 17666 tg3_flag_set(tp, ROBOSWITCH); 17667 } 17668 if (ssb_gige_is_rgmii(pdev)) 17669 tg3_flag_set(tp, RGMII_MODE); 17670 } 17671 17672 /* The word/byte swap controls here control register access byte 17673 * swapping. DMA data byte swapping is controlled in the GRC_MODE 17674 * setting below. 17675 */ 17676 tp->misc_host_ctrl = 17677 MISC_HOST_CTRL_MASK_PCI_INT | 17678 MISC_HOST_CTRL_WORD_SWAP | 17679 MISC_HOST_CTRL_INDIR_ACCESS | 17680 MISC_HOST_CTRL_PCISTATE_RW; 17681 17682 /* The NONFRM (non-frame) byte/word swap controls take effect 17683 * on descriptor entries, anything which isn't packet data. 17684 * 17685 * The StrongARM chips on the board (one for tx, one for rx) 17686 * are running in big-endian mode. 17687 */ 17688 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | 17689 GRC_MODE_WSWAP_NONFRM_DATA); 17690 #ifdef __BIG_ENDIAN 17691 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; 17692 #endif 17693 spin_lock_init(&tp->lock); 17694 spin_lock_init(&tp->indirect_lock); 17695 INIT_WORK(&tp->reset_task, tg3_reset_task); 17696 17697 tp->regs = pci_ioremap_bar(pdev, BAR_0); 17698 if (!tp->regs) { 17699 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 17700 err = -ENOMEM; 17701 goto err_out_free_dev; 17702 } 17703 17704 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 17705 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || 17706 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || 17707 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || 17708 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 17709 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 17710 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 17711 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 17712 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 17713 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 17714 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 17715 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 17716 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 17717 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 17718 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) { 17719 tg3_flag_set(tp, ENABLE_APE); 17720 tp->aperegs = pci_ioremap_bar(pdev, BAR_2); 17721 if (!tp->aperegs) { 17722 dev_err(&pdev->dev, 17723 "Cannot map APE registers, aborting\n"); 17724 err = -ENOMEM; 17725 goto err_out_iounmap; 17726 } 17727 } 17728 17729 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 17730 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 17731 17732 dev->ethtool_ops = &tg3_ethtool_ops; 17733 dev->watchdog_timeo = TG3_TX_TIMEOUT; 17734 dev->netdev_ops = &tg3_netdev_ops; 17735 dev->irq = pdev->irq; 17736 17737 err = tg3_get_invariants(tp, ent); 17738 if (err) { 17739 dev_err(&pdev->dev, 17740 "Problem fetching invariants of chip, aborting\n"); 17741 goto err_out_apeunmap; 17742 } 17743 17744 /* The EPB bridge inside 5714, 5715, and 5780 and any 17745 * device behind the EPB cannot support DMA addresses > 40-bit. 17746 * On 64-bit systems with IOMMU, use 40-bit dma_mask. 17747 * On 64-bit systems without IOMMU, use 64-bit dma_mask and 17748 * do DMA address check in tg3_start_xmit(). 17749 */ 17750 if (tg3_flag(tp, IS_5788)) 17751 persist_dma_mask = dma_mask = DMA_BIT_MASK(32); 17752 else if (tg3_flag(tp, 40BIT_DMA_BUG)) { 17753 persist_dma_mask = dma_mask = DMA_BIT_MASK(40); 17754 #ifdef CONFIG_HIGHMEM 17755 dma_mask = DMA_BIT_MASK(64); 17756 #endif 17757 } else 17758 persist_dma_mask = dma_mask = DMA_BIT_MASK(64); 17759 17760 /* Configure DMA attributes. */ 17761 if (dma_mask > DMA_BIT_MASK(32)) { 17762 err = pci_set_dma_mask(pdev, dma_mask); 17763 if (!err) { 17764 features |= NETIF_F_HIGHDMA; 17765 err = pci_set_consistent_dma_mask(pdev, 17766 persist_dma_mask); 17767 if (err < 0) { 17768 dev_err(&pdev->dev, "Unable to obtain 64 bit " 17769 "DMA for consistent allocations\n"); 17770 goto err_out_apeunmap; 17771 } 17772 } 17773 } 17774 if (err || dma_mask == DMA_BIT_MASK(32)) { 17775 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 17776 if (err) { 17777 dev_err(&pdev->dev, 17778 "No usable DMA configuration, aborting\n"); 17779 goto err_out_apeunmap; 17780 } 17781 } 17782 17783 tg3_init_bufmgr_config(tp); 17784 17785 /* 5700 B0 chips do not support checksumming correctly due 17786 * to hardware bugs. 17787 */ 17788 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) { 17789 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 17790 17791 if (tg3_flag(tp, 5755_PLUS)) 17792 features |= NETIF_F_IPV6_CSUM; 17793 } 17794 17795 /* TSO is on by default on chips that support hardware TSO. 17796 * Firmware TSO on older chips gives lower performance, so it 17797 * is off by default, but can be enabled using ethtool. 17798 */ 17799 if ((tg3_flag(tp, HW_TSO_1) || 17800 tg3_flag(tp, HW_TSO_2) || 17801 tg3_flag(tp, HW_TSO_3)) && 17802 (features & NETIF_F_IP_CSUM)) 17803 features |= NETIF_F_TSO; 17804 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { 17805 if (features & NETIF_F_IPV6_CSUM) 17806 features |= NETIF_F_TSO6; 17807 if (tg3_flag(tp, HW_TSO_3) || 17808 tg3_asic_rev(tp) == ASIC_REV_5761 || 17809 (tg3_asic_rev(tp) == ASIC_REV_5784 && 17810 tg3_chip_rev(tp) != CHIPREV_5784_AX) || 17811 tg3_asic_rev(tp) == ASIC_REV_5785 || 17812 tg3_asic_rev(tp) == ASIC_REV_57780) 17813 features |= NETIF_F_TSO_ECN; 17814 } 17815 17816 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | 17817 NETIF_F_HW_VLAN_CTAG_RX; 17818 dev->vlan_features |= features; 17819 17820 /* 17821 * Add loopback capability only for a subset of devices that support 17822 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY 17823 * loopback for the remaining devices. 17824 */ 17825 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 17826 !tg3_flag(tp, CPMU_PRESENT)) 17827 /* Add the loopback capability */ 17828 features |= NETIF_F_LOOPBACK; 17829 17830 dev->hw_features |= features; 17831 dev->priv_flags |= IFF_UNICAST_FLT; 17832 17833 /* MTU range: 60 - 9000 or 1500, depending on hardware */ 17834 dev->min_mtu = TG3_MIN_MTU; 17835 dev->max_mtu = TG3_MAX_MTU(tp); 17836 17837 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 && 17838 !tg3_flag(tp, TSO_CAPABLE) && 17839 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 17840 tg3_flag_set(tp, MAX_RXPEND_64); 17841 tp->rx_pending = 63; 17842 } 17843 17844 err = tg3_get_device_address(tp); 17845 if (err) { 17846 dev_err(&pdev->dev, 17847 "Could not obtain valid ethernet address, aborting\n"); 17848 goto err_out_apeunmap; 17849 } 17850 17851 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; 17852 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; 17853 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; 17854 for (i = 0; i < tp->irq_max; i++) { 17855 struct tg3_napi *tnapi = &tp->napi[i]; 17856 17857 tnapi->tp = tp; 17858 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; 17859 17860 tnapi->int_mbox = intmbx; 17861 if (i <= 4) 17862 intmbx += 0x8; 17863 else 17864 intmbx += 0x4; 17865 17866 tnapi->consmbox = rcvmbx; 17867 tnapi->prodmbox = sndmbx; 17868 17869 if (i) 17870 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); 17871 else 17872 tnapi->coal_now = HOSTCC_MODE_NOW; 17873 17874 if (!tg3_flag(tp, SUPPORT_MSIX)) 17875 break; 17876 17877 /* 17878 * If we support MSIX, we'll be using RSS. If we're using 17879 * RSS, the first vector only handles link interrupts and the 17880 * remaining vectors handle rx and tx interrupts. Reuse the 17881 * mailbox values for the next iteration. The values we setup 17882 * above are still useful for the single vectored mode. 17883 */ 17884 if (!i) 17885 continue; 17886 17887 rcvmbx += 0x8; 17888 17889 if (sndmbx & 0x4) 17890 sndmbx -= 0x4; 17891 else 17892 sndmbx += 0xc; 17893 } 17894 17895 /* 17896 * Reset chip in case UNDI or EFI driver did not shutdown 17897 * DMA self test will enable WDMAC and we'll see (spurious) 17898 * pending DMA on the PCI bus at that point. 17899 */ 17900 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || 17901 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 17902 tg3_full_lock(tp, 0); 17903 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 17904 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 17905 tg3_full_unlock(tp); 17906 } 17907 17908 err = tg3_test_dma(tp); 17909 if (err) { 17910 dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); 17911 goto err_out_apeunmap; 17912 } 17913 17914 tg3_init_coal(tp); 17915 17916 pci_set_drvdata(pdev, dev); 17917 17918 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 17919 tg3_asic_rev(tp) == ASIC_REV_5720 || 17920 tg3_asic_rev(tp) == ASIC_REV_5762) 17921 tg3_flag_set(tp, PTP_CAPABLE); 17922 17923 tg3_timer_init(tp); 17924 17925 tg3_carrier_off(tp); 17926 17927 err = register_netdev(dev); 17928 if (err) { 17929 dev_err(&pdev->dev, "Cannot register net device, aborting\n"); 17930 goto err_out_apeunmap; 17931 } 17932 17933 if (tg3_flag(tp, PTP_CAPABLE)) { 17934 tg3_ptp_init(tp); 17935 tp->ptp_clock = ptp_clock_register(&tp->ptp_info, 17936 &tp->pdev->dev); 17937 if (IS_ERR(tp->ptp_clock)) 17938 tp->ptp_clock = NULL; 17939 } 17940 17941 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", 17942 tp->board_part_number, 17943 tg3_chip_rev_id(tp), 17944 tg3_bus_string(tp, str), 17945 dev->dev_addr); 17946 17947 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) { 17948 char *ethtype; 17949 17950 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 17951 ethtype = "10/100Base-TX"; 17952 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 17953 ethtype = "1000Base-SX"; 17954 else 17955 ethtype = "10/100/1000Base-T"; 17956 17957 netdev_info(dev, "attached PHY is %s (%s Ethernet) " 17958 "(WireSpeed[%d], EEE[%d])\n", 17959 tg3_phy_string(tp), ethtype, 17960 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, 17961 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); 17962 } 17963 17964 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", 17965 (dev->features & NETIF_F_RXCSUM) != 0, 17966 tg3_flag(tp, USE_LINKCHG_REG) != 0, 17967 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, 17968 tg3_flag(tp, ENABLE_ASF) != 0, 17969 tg3_flag(tp, TSO_CAPABLE) != 0); 17970 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", 17971 tp->dma_rwctrl, 17972 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : 17973 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); 17974 17975 pci_save_state(pdev); 17976 17977 return 0; 17978 17979 err_out_apeunmap: 17980 if (tp->aperegs) { 17981 iounmap(tp->aperegs); 17982 tp->aperegs = NULL; 17983 } 17984 17985 err_out_iounmap: 17986 if (tp->regs) { 17987 iounmap(tp->regs); 17988 tp->regs = NULL; 17989 } 17990 17991 err_out_free_dev: 17992 free_netdev(dev); 17993 17994 err_out_free_res: 17995 pci_release_regions(pdev); 17996 17997 err_out_disable_pdev: 17998 if (pci_is_enabled(pdev)) 17999 pci_disable_device(pdev); 18000 return err; 18001 } 18002 18003 static void tg3_remove_one(struct pci_dev *pdev) 18004 { 18005 struct net_device *dev = pci_get_drvdata(pdev); 18006 18007 if (dev) { 18008 struct tg3 *tp = netdev_priv(dev); 18009 18010 tg3_ptp_fini(tp); 18011 18012 release_firmware(tp->fw); 18013 18014 tg3_reset_task_cancel(tp); 18015 18016 if (tg3_flag(tp, USE_PHYLIB)) { 18017 tg3_phy_fini(tp); 18018 tg3_mdio_fini(tp); 18019 } 18020 18021 unregister_netdev(dev); 18022 if (tp->aperegs) { 18023 iounmap(tp->aperegs); 18024 tp->aperegs = NULL; 18025 } 18026 if (tp->regs) { 18027 iounmap(tp->regs); 18028 tp->regs = NULL; 18029 } 18030 free_netdev(dev); 18031 pci_release_regions(pdev); 18032 pci_disable_device(pdev); 18033 } 18034 } 18035 18036 #ifdef CONFIG_PM_SLEEP 18037 static int tg3_suspend(struct device *device) 18038 { 18039 struct pci_dev *pdev = to_pci_dev(device); 18040 struct net_device *dev = pci_get_drvdata(pdev); 18041 struct tg3 *tp = netdev_priv(dev); 18042 int err = 0; 18043 18044 rtnl_lock(); 18045 18046 if (!netif_running(dev)) 18047 goto unlock; 18048 18049 tg3_reset_task_cancel(tp); 18050 tg3_phy_stop(tp); 18051 tg3_netif_stop(tp); 18052 18053 tg3_timer_stop(tp); 18054 18055 tg3_full_lock(tp, 1); 18056 tg3_disable_ints(tp); 18057 tg3_full_unlock(tp); 18058 18059 netif_device_detach(dev); 18060 18061 tg3_full_lock(tp, 0); 18062 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 18063 tg3_flag_clear(tp, INIT_COMPLETE); 18064 tg3_full_unlock(tp); 18065 18066 err = tg3_power_down_prepare(tp); 18067 if (err) { 18068 int err2; 18069 18070 tg3_full_lock(tp, 0); 18071 18072 tg3_flag_set(tp, INIT_COMPLETE); 18073 err2 = tg3_restart_hw(tp, true); 18074 if (err2) 18075 goto out; 18076 18077 tg3_timer_start(tp); 18078 18079 netif_device_attach(dev); 18080 tg3_netif_start(tp); 18081 18082 out: 18083 tg3_full_unlock(tp); 18084 18085 if (!err2) 18086 tg3_phy_start(tp); 18087 } 18088 18089 unlock: 18090 rtnl_unlock(); 18091 return err; 18092 } 18093 18094 static int tg3_resume(struct device *device) 18095 { 18096 struct pci_dev *pdev = to_pci_dev(device); 18097 struct net_device *dev = pci_get_drvdata(pdev); 18098 struct tg3 *tp = netdev_priv(dev); 18099 int err = 0; 18100 18101 rtnl_lock(); 18102 18103 if (!netif_running(dev)) 18104 goto unlock; 18105 18106 netif_device_attach(dev); 18107 18108 tg3_full_lock(tp, 0); 18109 18110 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18111 18112 tg3_flag_set(tp, INIT_COMPLETE); 18113 err = tg3_restart_hw(tp, 18114 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); 18115 if (err) 18116 goto out; 18117 18118 tg3_timer_start(tp); 18119 18120 tg3_netif_start(tp); 18121 18122 out: 18123 tg3_full_unlock(tp); 18124 18125 if (!err) 18126 tg3_phy_start(tp); 18127 18128 unlock: 18129 rtnl_unlock(); 18130 return err; 18131 } 18132 #endif /* CONFIG_PM_SLEEP */ 18133 18134 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); 18135 18136 static void tg3_shutdown(struct pci_dev *pdev) 18137 { 18138 struct net_device *dev = pci_get_drvdata(pdev); 18139 struct tg3 *tp = netdev_priv(dev); 18140 18141 rtnl_lock(); 18142 netif_device_detach(dev); 18143 18144 if (netif_running(dev)) 18145 dev_close(dev); 18146 18147 if (system_state == SYSTEM_POWER_OFF) 18148 tg3_power_down(tp); 18149 18150 rtnl_unlock(); 18151 } 18152 18153 /** 18154 * tg3_io_error_detected - called when PCI error is detected 18155 * @pdev: Pointer to PCI device 18156 * @state: The current pci connection state 18157 * 18158 * This function is called after a PCI bus error affecting 18159 * this device has been detected. 18160 */ 18161 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, 18162 pci_channel_state_t state) 18163 { 18164 struct net_device *netdev = pci_get_drvdata(pdev); 18165 struct tg3 *tp = netdev_priv(netdev); 18166 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; 18167 18168 netdev_info(netdev, "PCI I/O error detected\n"); 18169 18170 rtnl_lock(); 18171 18172 /* We probably don't have netdev yet */ 18173 if (!netdev || !netif_running(netdev)) 18174 goto done; 18175 18176 /* We needn't recover from permanent error */ 18177 if (state == pci_channel_io_frozen) 18178 tp->pcierr_recovery = true; 18179 18180 tg3_phy_stop(tp); 18181 18182 tg3_netif_stop(tp); 18183 18184 tg3_timer_stop(tp); 18185 18186 /* Want to make sure that the reset task doesn't run */ 18187 tg3_reset_task_cancel(tp); 18188 18189 netif_device_detach(netdev); 18190 18191 /* Clean up software state, even if MMIO is blocked */ 18192 tg3_full_lock(tp, 0); 18193 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 18194 tg3_full_unlock(tp); 18195 18196 done: 18197 if (state == pci_channel_io_perm_failure) { 18198 if (netdev) { 18199 tg3_napi_enable(tp); 18200 dev_close(netdev); 18201 } 18202 err = PCI_ERS_RESULT_DISCONNECT; 18203 } else { 18204 pci_disable_device(pdev); 18205 } 18206 18207 rtnl_unlock(); 18208 18209 return err; 18210 } 18211 18212 /** 18213 * tg3_io_slot_reset - called after the pci bus has been reset. 18214 * @pdev: Pointer to PCI device 18215 * 18216 * Restart the card from scratch, as if from a cold-boot. 18217 * At this point, the card has exprienced a hard reset, 18218 * followed by fixups by BIOS, and has its config space 18219 * set up identically to what it was at cold boot. 18220 */ 18221 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) 18222 { 18223 struct net_device *netdev = pci_get_drvdata(pdev); 18224 struct tg3 *tp = netdev_priv(netdev); 18225 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 18226 int err; 18227 18228 rtnl_lock(); 18229 18230 if (pci_enable_device(pdev)) { 18231 dev_err(&pdev->dev, 18232 "Cannot re-enable PCI device after reset.\n"); 18233 goto done; 18234 } 18235 18236 pci_set_master(pdev); 18237 pci_restore_state(pdev); 18238 pci_save_state(pdev); 18239 18240 if (!netdev || !netif_running(netdev)) { 18241 rc = PCI_ERS_RESULT_RECOVERED; 18242 goto done; 18243 } 18244 18245 err = tg3_power_up(tp); 18246 if (err) 18247 goto done; 18248 18249 rc = PCI_ERS_RESULT_RECOVERED; 18250 18251 done: 18252 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { 18253 tg3_napi_enable(tp); 18254 dev_close(netdev); 18255 } 18256 rtnl_unlock(); 18257 18258 return rc; 18259 } 18260 18261 /** 18262 * tg3_io_resume - called when traffic can start flowing again. 18263 * @pdev: Pointer to PCI device 18264 * 18265 * This callback is called when the error recovery driver tells 18266 * us that its OK to resume normal operation. 18267 */ 18268 static void tg3_io_resume(struct pci_dev *pdev) 18269 { 18270 struct net_device *netdev = pci_get_drvdata(pdev); 18271 struct tg3 *tp = netdev_priv(netdev); 18272 int err; 18273 18274 rtnl_lock(); 18275 18276 if (!netdev || !netif_running(netdev)) 18277 goto done; 18278 18279 tg3_full_lock(tp, 0); 18280 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18281 tg3_flag_set(tp, INIT_COMPLETE); 18282 err = tg3_restart_hw(tp, true); 18283 if (err) { 18284 tg3_full_unlock(tp); 18285 netdev_err(netdev, "Cannot restart hardware after reset.\n"); 18286 goto done; 18287 } 18288 18289 netif_device_attach(netdev); 18290 18291 tg3_timer_start(tp); 18292 18293 tg3_netif_start(tp); 18294 18295 tg3_full_unlock(tp); 18296 18297 tg3_phy_start(tp); 18298 18299 done: 18300 tp->pcierr_recovery = false; 18301 rtnl_unlock(); 18302 } 18303 18304 static const struct pci_error_handlers tg3_err_handler = { 18305 .error_detected = tg3_io_error_detected, 18306 .slot_reset = tg3_io_slot_reset, 18307 .resume = tg3_io_resume 18308 }; 18309 18310 static struct pci_driver tg3_driver = { 18311 .name = DRV_MODULE_NAME, 18312 .id_table = tg3_pci_tbl, 18313 .probe = tg3_init_one, 18314 .remove = tg3_remove_one, 18315 .err_handler = &tg3_err_handler, 18316 .driver.pm = &tg3_pm_ops, 18317 .shutdown = tg3_shutdown, 18318 }; 18319 18320 module_pci_driver(tg3_driver); 18321