1 /* 2 * tg3.c: Broadcom Tigon3 ethernet driver. 3 * 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 6 * Copyright (C) 2004 Sun Microsystems Inc. 7 * Copyright (C) 2005-2014 Broadcom Corporation. 8 * 9 * Firmware is: 10 * Derived from proprietary unpublished source code, 11 * Copyright (C) 2000-2003 Broadcom Corporation. 12 * 13 * Permission is hereby granted for the distribution of this firmware 14 * data in hexadecimal or equivalent format, provided this copyright 15 * notice is accompanying it. 16 */ 17 18 19 #include <linux/module.h> 20 #include <linux/moduleparam.h> 21 #include <linux/stringify.h> 22 #include <linux/kernel.h> 23 #include <linux/types.h> 24 #include <linux/compiler.h> 25 #include <linux/slab.h> 26 #include <linux/delay.h> 27 #include <linux/in.h> 28 #include <linux/interrupt.h> 29 #include <linux/ioport.h> 30 #include <linux/pci.h> 31 #include <linux/netdevice.h> 32 #include <linux/etherdevice.h> 33 #include <linux/skbuff.h> 34 #include <linux/ethtool.h> 35 #include <linux/mdio.h> 36 #include <linux/mii.h> 37 #include <linux/phy.h> 38 #include <linux/brcmphy.h> 39 #include <linux/if.h> 40 #include <linux/if_vlan.h> 41 #include <linux/ip.h> 42 #include <linux/tcp.h> 43 #include <linux/workqueue.h> 44 #include <linux/prefetch.h> 45 #include <linux/dma-mapping.h> 46 #include <linux/firmware.h> 47 #include <linux/ssb/ssb_driver_gige.h> 48 #include <linux/hwmon.h> 49 #include <linux/hwmon-sysfs.h> 50 51 #include <net/checksum.h> 52 #include <net/ip.h> 53 54 #include <linux/io.h> 55 #include <asm/byteorder.h> 56 #include <linux/uaccess.h> 57 58 #include <uapi/linux/net_tstamp.h> 59 #include <linux/ptp_clock_kernel.h> 60 61 #ifdef CONFIG_SPARC 62 #include <asm/idprom.h> 63 #include <asm/prom.h> 64 #endif 65 66 #define BAR_0 0 67 #define BAR_2 2 68 69 #include "tg3.h" 70 71 /* Functions & macros to verify TG3_FLAGS types */ 72 73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) 74 { 75 return test_bit(flag, bits); 76 } 77 78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) 79 { 80 set_bit(flag, bits); 81 } 82 83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) 84 { 85 clear_bit(flag, bits); 86 } 87 88 #define tg3_flag(tp, flag) \ 89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) 90 #define tg3_flag_set(tp, flag) \ 91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) 92 #define tg3_flag_clear(tp, flag) \ 93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) 94 95 #define DRV_MODULE_NAME "tg3" 96 #define TG3_MAJ_NUM 3 97 #define TG3_MIN_NUM 137 98 #define DRV_MODULE_VERSION \ 99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 100 #define DRV_MODULE_RELDATE "May 11, 2014" 101 102 #define RESET_KIND_SHUTDOWN 0 103 #define RESET_KIND_INIT 1 104 #define RESET_KIND_SUSPEND 2 105 106 #define TG3_DEF_RX_MODE 0 107 #define TG3_DEF_TX_MODE 0 108 #define TG3_DEF_MSG_ENABLE \ 109 (NETIF_MSG_DRV | \ 110 NETIF_MSG_PROBE | \ 111 NETIF_MSG_LINK | \ 112 NETIF_MSG_TIMER | \ 113 NETIF_MSG_IFDOWN | \ 114 NETIF_MSG_IFUP | \ 115 NETIF_MSG_RX_ERR | \ 116 NETIF_MSG_TX_ERR) 117 118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100 119 120 /* length of time before we decide the hardware is borked, 121 * and dev->tx_timeout() should be called to fix the problem 122 */ 123 124 #define TG3_TX_TIMEOUT (5 * HZ) 125 126 /* hardware minimum and maximum for a single frame's data payload */ 127 #define TG3_MIN_MTU 60 128 #define TG3_MAX_MTU(tp) \ 129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) 130 131 /* These numbers seem to be hard coded in the NIC firmware somehow. 132 * You can't change the ring sizes, but you can change where you place 133 * them in the NIC onboard memory. 134 */ 135 #define TG3_RX_STD_RING_SIZE(tp) \ 136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) 138 #define TG3_DEF_RX_RING_PENDING 200 139 #define TG3_RX_JMB_RING_SIZE(tp) \ 140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) 142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100 143 144 /* Do not place this n-ring entries value into the tp struct itself, 145 * we really want to expose these constants to GCC so that modulo et 146 * al. operations are done with shifts and masks instead of with 147 * hw multiply/modulo instructions. Another solution would be to 148 * replace things like '% foo' with '& (foo - 1)'. 149 */ 150 151 #define TG3_TX_RING_SIZE 512 152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) 153 154 #define TG3_RX_STD_RING_BYTES(tp) \ 155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) 156 #define TG3_RX_JMB_RING_BYTES(tp) \ 157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) 158 #define TG3_RX_RCB_RING_BYTES(tp) \ 159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) 160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 161 TG3_TX_RING_SIZE) 162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 163 164 #define TG3_DMA_BYTE_ENAB 64 165 166 #define TG3_RX_STD_DMA_SZ 1536 167 #define TG3_RX_JMB_DMA_SZ 9046 168 169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) 170 171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) 172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 173 174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \ 175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) 176 177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ 178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) 179 180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses 181 * that are at least dword aligned when used in PCIX mode. The driver 182 * works around this bug by double copying the packet. This workaround 183 * is built into the normal double copy length check for efficiency. 184 * 185 * However, the double copy is only necessary on those architectures 186 * where unaligned memory accesses are inefficient. For those architectures 187 * where unaligned memory accesses incur little penalty, we can reintegrate 188 * the 5701 in the normal rx path. Doing so saves a device structure 189 * dereference by hardcoding the double copy threshold in place. 190 */ 191 #define TG3_RX_COPY_THRESHOLD 256 192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD 194 #else 195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) 196 #endif 197 198 #if (NET_IP_ALIGN != 0) 199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset) 200 #else 201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD) 202 #endif 203 204 /* minimum number of free TX descriptors required to wake up TX process */ 205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 206 #define TG3_TX_BD_DMA_MAX_2K 2048 207 #define TG3_TX_BD_DMA_MAX_4K 4096 208 209 #define TG3_RAW_IP_ALIGN 2 210 211 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3) 212 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1) 213 214 #define TG3_FW_UPDATE_TIMEOUT_SEC 5 215 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2) 216 217 #define FIRMWARE_TG3 "tigon/tg3.bin" 218 #define FIRMWARE_TG357766 "tigon/tg357766.bin" 219 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" 220 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" 221 222 static char version[] = 223 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; 224 225 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); 226 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); 227 MODULE_LICENSE("GPL"); 228 MODULE_VERSION(DRV_MODULE_VERSION); 229 MODULE_FIRMWARE(FIRMWARE_TG3); 230 MODULE_FIRMWARE(FIRMWARE_TG3TSO); 231 MODULE_FIRMWARE(FIRMWARE_TG3TSO5); 232 233 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ 234 module_param(tg3_debug, int, 0); 235 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 236 237 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001 238 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002 239 240 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { 241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, 242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, 243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, 244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, 245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, 246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, 247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, 248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, 249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, 250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, 251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, 252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, 253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, 254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, 255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, 256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, 257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, 258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, 259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901), 260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 261 TG3_DRV_DATA_FLAG_5705_10_100}, 262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2), 263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 264 TG3_DRV_DATA_FLAG_5705_10_100}, 265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, 266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F), 267 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 268 TG3_DRV_DATA_FLAG_5705_10_100}, 269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, 270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, 271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, 272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, 273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, 274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F), 275 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, 277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, 278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, 279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, 280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F), 281 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, 283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, 284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, 285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, 286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, 287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, 288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, 289 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M, 290 PCI_VENDOR_ID_LENOVO, 291 TG3PCI_SUBDEVICE_ID_LENOVO_5787M), 292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, 294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F), 295 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, 297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, 298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, 299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, 300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, 301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, 302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, 303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, 304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, 305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, 306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, 307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, 308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, 309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, 311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, 312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, 313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, 314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A), 316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 317 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 318 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B), 319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, 321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, 322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790), 323 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, 326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)}, 327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, 328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, 329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, 330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, 331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, 332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791), 333 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795), 335 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, 337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, 338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)}, 339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)}, 340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)}, 341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)}, 342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)}, 343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)}, 344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)}, 345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)}, 346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)}, 347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)}, 348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 349 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, 352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, 353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, 354 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, 355 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ 356 {} 357 }; 358 359 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); 360 361 static const struct { 362 const char string[ETH_GSTRING_LEN]; 363 } ethtool_stats_keys[] = { 364 { "rx_octets" }, 365 { "rx_fragments" }, 366 { "rx_ucast_packets" }, 367 { "rx_mcast_packets" }, 368 { "rx_bcast_packets" }, 369 { "rx_fcs_errors" }, 370 { "rx_align_errors" }, 371 { "rx_xon_pause_rcvd" }, 372 { "rx_xoff_pause_rcvd" }, 373 { "rx_mac_ctrl_rcvd" }, 374 { "rx_xoff_entered" }, 375 { "rx_frame_too_long_errors" }, 376 { "rx_jabbers" }, 377 { "rx_undersize_packets" }, 378 { "rx_in_length_errors" }, 379 { "rx_out_length_errors" }, 380 { "rx_64_or_less_octet_packets" }, 381 { "rx_65_to_127_octet_packets" }, 382 { "rx_128_to_255_octet_packets" }, 383 { "rx_256_to_511_octet_packets" }, 384 { "rx_512_to_1023_octet_packets" }, 385 { "rx_1024_to_1522_octet_packets" }, 386 { "rx_1523_to_2047_octet_packets" }, 387 { "rx_2048_to_4095_octet_packets" }, 388 { "rx_4096_to_8191_octet_packets" }, 389 { "rx_8192_to_9022_octet_packets" }, 390 391 { "tx_octets" }, 392 { "tx_collisions" }, 393 394 { "tx_xon_sent" }, 395 { "tx_xoff_sent" }, 396 { "tx_flow_control" }, 397 { "tx_mac_errors" }, 398 { "tx_single_collisions" }, 399 { "tx_mult_collisions" }, 400 { "tx_deferred" }, 401 { "tx_excessive_collisions" }, 402 { "tx_late_collisions" }, 403 { "tx_collide_2times" }, 404 { "tx_collide_3times" }, 405 { "tx_collide_4times" }, 406 { "tx_collide_5times" }, 407 { "tx_collide_6times" }, 408 { "tx_collide_7times" }, 409 { "tx_collide_8times" }, 410 { "tx_collide_9times" }, 411 { "tx_collide_10times" }, 412 { "tx_collide_11times" }, 413 { "tx_collide_12times" }, 414 { "tx_collide_13times" }, 415 { "tx_collide_14times" }, 416 { "tx_collide_15times" }, 417 { "tx_ucast_packets" }, 418 { "tx_mcast_packets" }, 419 { "tx_bcast_packets" }, 420 { "tx_carrier_sense_errors" }, 421 { "tx_discards" }, 422 { "tx_errors" }, 423 424 { "dma_writeq_full" }, 425 { "dma_write_prioq_full" }, 426 { "rxbds_empty" }, 427 { "rx_discards" }, 428 { "rx_errors" }, 429 { "rx_threshold_hit" }, 430 431 { "dma_readq_full" }, 432 { "dma_read_prioq_full" }, 433 { "tx_comp_queue_full" }, 434 435 { "ring_set_send_prod_index" }, 436 { "ring_status_update" }, 437 { "nic_irqs" }, 438 { "nic_avoided_irqs" }, 439 { "nic_tx_threshold_hit" }, 440 441 { "mbuf_lwm_thresh_hit" }, 442 }; 443 444 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) 445 #define TG3_NVRAM_TEST 0 446 #define TG3_LINK_TEST 1 447 #define TG3_REGISTER_TEST 2 448 #define TG3_MEMORY_TEST 3 449 #define TG3_MAC_LOOPB_TEST 4 450 #define TG3_PHY_LOOPB_TEST 5 451 #define TG3_EXT_LOOPB_TEST 6 452 #define TG3_INTERRUPT_TEST 7 453 454 455 static const struct { 456 const char string[ETH_GSTRING_LEN]; 457 } ethtool_test_keys[] = { 458 [TG3_NVRAM_TEST] = { "nvram test (online) " }, 459 [TG3_LINK_TEST] = { "link test (online) " }, 460 [TG3_REGISTER_TEST] = { "register test (offline)" }, 461 [TG3_MEMORY_TEST] = { "memory test (offline)" }, 462 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" }, 463 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" }, 464 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" }, 465 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" }, 466 }; 467 468 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) 469 470 471 static void tg3_write32(struct tg3 *tp, u32 off, u32 val) 472 { 473 writel(val, tp->regs + off); 474 } 475 476 static u32 tg3_read32(struct tg3 *tp, u32 off) 477 { 478 return readl(tp->regs + off); 479 } 480 481 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) 482 { 483 writel(val, tp->aperegs + off); 484 } 485 486 static u32 tg3_ape_read32(struct tg3 *tp, u32 off) 487 { 488 return readl(tp->aperegs + off); 489 } 490 491 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 492 { 493 unsigned long flags; 494 495 spin_lock_irqsave(&tp->indirect_lock, flags); 496 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 497 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 498 spin_unlock_irqrestore(&tp->indirect_lock, flags); 499 } 500 501 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) 502 { 503 writel(val, tp->regs + off); 504 readl(tp->regs + off); 505 } 506 507 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) 508 { 509 unsigned long flags; 510 u32 val; 511 512 spin_lock_irqsave(&tp->indirect_lock, flags); 513 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 514 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 515 spin_unlock_irqrestore(&tp->indirect_lock, flags); 516 return val; 517 } 518 519 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) 520 { 521 unsigned long flags; 522 523 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { 524 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + 525 TG3_64BIT_REG_LOW, val); 526 return; 527 } 528 if (off == TG3_RX_STD_PROD_IDX_REG) { 529 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + 530 TG3_64BIT_REG_LOW, val); 531 return; 532 } 533 534 spin_lock_irqsave(&tp->indirect_lock, flags); 535 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 536 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 537 spin_unlock_irqrestore(&tp->indirect_lock, flags); 538 539 /* In indirect mode when disabling interrupts, we also need 540 * to clear the interrupt bit in the GRC local ctrl register. 541 */ 542 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && 543 (val == 0x1)) { 544 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, 545 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); 546 } 547 } 548 549 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) 550 { 551 unsigned long flags; 552 u32 val; 553 554 spin_lock_irqsave(&tp->indirect_lock, flags); 555 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 556 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 557 spin_unlock_irqrestore(&tp->indirect_lock, flags); 558 return val; 559 } 560 561 /* usec_wait specifies the wait time in usec when writing to certain registers 562 * where it is unsafe to read back the register without some delay. 563 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. 564 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. 565 */ 566 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) 567 { 568 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) 569 /* Non-posted methods */ 570 tp->write32(tp, off, val); 571 else { 572 /* Posted method */ 573 tg3_write32(tp, off, val); 574 if (usec_wait) 575 udelay(usec_wait); 576 tp->read32(tp, off); 577 } 578 /* Wait again after the read for the posted method to guarantee that 579 * the wait time is met. 580 */ 581 if (usec_wait) 582 udelay(usec_wait); 583 } 584 585 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) 586 { 587 tp->write32_mbox(tp, off, val); 588 if (tg3_flag(tp, FLUSH_POSTED_WRITES) || 589 (!tg3_flag(tp, MBOX_WRITE_REORDER) && 590 !tg3_flag(tp, ICH_WORKAROUND))) 591 tp->read32_mbox(tp, off); 592 } 593 594 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) 595 { 596 void __iomem *mbox = tp->regs + off; 597 writel(val, mbox); 598 if (tg3_flag(tp, TXD_MBOX_HWBUG)) 599 writel(val, mbox); 600 if (tg3_flag(tp, MBOX_WRITE_REORDER) || 601 tg3_flag(tp, FLUSH_POSTED_WRITES)) 602 readl(mbox); 603 } 604 605 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) 606 { 607 return readl(tp->regs + off + GRCMBOX_BASE); 608 } 609 610 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) 611 { 612 writel(val, tp->regs + off + GRCMBOX_BASE); 613 } 614 615 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) 616 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) 617 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) 618 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) 619 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg) 620 621 #define tw32(reg, val) tp->write32(tp, reg, val) 622 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) 623 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) 624 #define tr32(reg) tp->read32(tp, reg) 625 626 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 627 { 628 unsigned long flags; 629 630 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 631 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) 632 return; 633 634 spin_lock_irqsave(&tp->indirect_lock, flags); 635 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 637 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 638 639 /* Always leave this as zero. */ 640 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 641 } else { 642 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 643 tw32_f(TG3PCI_MEM_WIN_DATA, val); 644 645 /* Always leave this as zero. */ 646 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 647 } 648 spin_unlock_irqrestore(&tp->indirect_lock, flags); 649 } 650 651 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 652 { 653 unsigned long flags; 654 655 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 656 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { 657 *val = 0; 658 return; 659 } 660 661 spin_lock_irqsave(&tp->indirect_lock, flags); 662 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 663 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 664 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 665 666 /* Always leave this as zero. */ 667 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 668 } else { 669 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 670 *val = tr32(TG3PCI_MEM_WIN_DATA); 671 672 /* Always leave this as zero. */ 673 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 674 } 675 spin_unlock_irqrestore(&tp->indirect_lock, flags); 676 } 677 678 static void tg3_ape_lock_init(struct tg3 *tp) 679 { 680 int i; 681 u32 regbase, bit; 682 683 if (tg3_asic_rev(tp) == ASIC_REV_5761) 684 regbase = TG3_APE_LOCK_GRANT; 685 else 686 regbase = TG3_APE_PER_LOCK_GRANT; 687 688 /* Make sure the driver hasn't any stale locks. */ 689 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) { 690 switch (i) { 691 case TG3_APE_LOCK_PHY0: 692 case TG3_APE_LOCK_PHY1: 693 case TG3_APE_LOCK_PHY2: 694 case TG3_APE_LOCK_PHY3: 695 bit = APE_LOCK_GRANT_DRIVER; 696 break; 697 default: 698 if (!tp->pci_fn) 699 bit = APE_LOCK_GRANT_DRIVER; 700 else 701 bit = 1 << tp->pci_fn; 702 } 703 tg3_ape_write32(tp, regbase + 4 * i, bit); 704 } 705 706 } 707 708 static int tg3_ape_lock(struct tg3 *tp, int locknum) 709 { 710 int i, off; 711 int ret = 0; 712 u32 status, req, gnt, bit; 713 714 if (!tg3_flag(tp, ENABLE_APE)) 715 return 0; 716 717 switch (locknum) { 718 case TG3_APE_LOCK_GPIO: 719 if (tg3_asic_rev(tp) == ASIC_REV_5761) 720 return 0; 721 case TG3_APE_LOCK_GRC: 722 case TG3_APE_LOCK_MEM: 723 if (!tp->pci_fn) 724 bit = APE_LOCK_REQ_DRIVER; 725 else 726 bit = 1 << tp->pci_fn; 727 break; 728 case TG3_APE_LOCK_PHY0: 729 case TG3_APE_LOCK_PHY1: 730 case TG3_APE_LOCK_PHY2: 731 case TG3_APE_LOCK_PHY3: 732 bit = APE_LOCK_REQ_DRIVER; 733 break; 734 default: 735 return -EINVAL; 736 } 737 738 if (tg3_asic_rev(tp) == ASIC_REV_5761) { 739 req = TG3_APE_LOCK_REQ; 740 gnt = TG3_APE_LOCK_GRANT; 741 } else { 742 req = TG3_APE_PER_LOCK_REQ; 743 gnt = TG3_APE_PER_LOCK_GRANT; 744 } 745 746 off = 4 * locknum; 747 748 tg3_ape_write32(tp, req + off, bit); 749 750 /* Wait for up to 1 millisecond to acquire lock. */ 751 for (i = 0; i < 100; i++) { 752 status = tg3_ape_read32(tp, gnt + off); 753 if (status == bit) 754 break; 755 if (pci_channel_offline(tp->pdev)) 756 break; 757 758 udelay(10); 759 } 760 761 if (status != bit) { 762 /* Revoke the lock request. */ 763 tg3_ape_write32(tp, gnt + off, bit); 764 ret = -EBUSY; 765 } 766 767 return ret; 768 } 769 770 static void tg3_ape_unlock(struct tg3 *tp, int locknum) 771 { 772 u32 gnt, bit; 773 774 if (!tg3_flag(tp, ENABLE_APE)) 775 return; 776 777 switch (locknum) { 778 case TG3_APE_LOCK_GPIO: 779 if (tg3_asic_rev(tp) == ASIC_REV_5761) 780 return; 781 case TG3_APE_LOCK_GRC: 782 case TG3_APE_LOCK_MEM: 783 if (!tp->pci_fn) 784 bit = APE_LOCK_GRANT_DRIVER; 785 else 786 bit = 1 << tp->pci_fn; 787 break; 788 case TG3_APE_LOCK_PHY0: 789 case TG3_APE_LOCK_PHY1: 790 case TG3_APE_LOCK_PHY2: 791 case TG3_APE_LOCK_PHY3: 792 bit = APE_LOCK_GRANT_DRIVER; 793 break; 794 default: 795 return; 796 } 797 798 if (tg3_asic_rev(tp) == ASIC_REV_5761) 799 gnt = TG3_APE_LOCK_GRANT; 800 else 801 gnt = TG3_APE_PER_LOCK_GRANT; 802 803 tg3_ape_write32(tp, gnt + 4 * locknum, bit); 804 } 805 806 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us) 807 { 808 u32 apedata; 809 810 while (timeout_us) { 811 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) 812 return -EBUSY; 813 814 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 815 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 816 break; 817 818 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 819 820 udelay(10); 821 timeout_us -= (timeout_us > 10) ? 10 : timeout_us; 822 } 823 824 return timeout_us ? 0 : -EBUSY; 825 } 826 827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us) 828 { 829 u32 i, apedata; 830 831 for (i = 0; i < timeout_us / 10; i++) { 832 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 833 834 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 835 break; 836 837 udelay(10); 838 } 839 840 return i == timeout_us / 10; 841 } 842 843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, 844 u32 len) 845 { 846 int err; 847 u32 i, bufoff, msgoff, maxlen, apedata; 848 849 if (!tg3_flag(tp, APE_HAS_NCSI)) 850 return 0; 851 852 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 853 if (apedata != APE_SEG_SIG_MAGIC) 854 return -ENODEV; 855 856 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 857 if (!(apedata & APE_FW_STATUS_READY)) 858 return -EAGAIN; 859 860 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) + 861 TG3_APE_SHMEM_BASE; 862 msgoff = bufoff + 2 * sizeof(u32); 863 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN); 864 865 while (len) { 866 u32 length; 867 868 /* Cap xfer sizes to scratchpad limits. */ 869 length = (len > maxlen) ? maxlen : len; 870 len -= length; 871 872 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 873 if (!(apedata & APE_FW_STATUS_READY)) 874 return -EAGAIN; 875 876 /* Wait for up to 1 msec for APE to service previous event. */ 877 err = tg3_ape_event_lock(tp, 1000); 878 if (err) 879 return err; 880 881 apedata = APE_EVENT_STATUS_DRIVER_EVNT | 882 APE_EVENT_STATUS_SCRTCHPD_READ | 883 APE_EVENT_STATUS_EVENT_PENDING; 884 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata); 885 886 tg3_ape_write32(tp, bufoff, base_off); 887 tg3_ape_write32(tp, bufoff + sizeof(u32), length); 888 889 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 890 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 891 892 base_off += length; 893 894 if (tg3_ape_wait_for_event(tp, 30000)) 895 return -EAGAIN; 896 897 for (i = 0; length; i += 4, length -= 4) { 898 u32 val = tg3_ape_read32(tp, msgoff + i); 899 memcpy(data, &val, sizeof(u32)); 900 data++; 901 } 902 } 903 904 return 0; 905 } 906 907 static int tg3_ape_send_event(struct tg3 *tp, u32 event) 908 { 909 int err; 910 u32 apedata; 911 912 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 913 if (apedata != APE_SEG_SIG_MAGIC) 914 return -EAGAIN; 915 916 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 917 if (!(apedata & APE_FW_STATUS_READY)) 918 return -EAGAIN; 919 920 /* Wait for up to 1 millisecond for APE to service previous event. */ 921 err = tg3_ape_event_lock(tp, 1000); 922 if (err) 923 return err; 924 925 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, 926 event | APE_EVENT_STATUS_EVENT_PENDING); 927 928 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 929 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 930 931 return 0; 932 } 933 934 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) 935 { 936 u32 event; 937 u32 apedata; 938 939 if (!tg3_flag(tp, ENABLE_APE)) 940 return; 941 942 switch (kind) { 943 case RESET_KIND_INIT: 944 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 945 APE_HOST_SEG_SIG_MAGIC); 946 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, 947 APE_HOST_SEG_LEN_MAGIC); 948 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); 949 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); 950 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, 951 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); 952 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, 953 APE_HOST_BEHAV_NO_PHYLOCK); 954 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, 955 TG3_APE_HOST_DRVR_STATE_START); 956 957 event = APE_EVENT_STATUS_STATE_START; 958 break; 959 case RESET_KIND_SHUTDOWN: 960 /* With the interface we are currently using, 961 * APE does not track driver state. Wiping 962 * out the HOST SEGMENT SIGNATURE forces 963 * the APE to assume OS absent status. 964 */ 965 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); 966 967 if (device_may_wakeup(&tp->pdev->dev) && 968 tg3_flag(tp, WOL_ENABLE)) { 969 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, 970 TG3_APE_HOST_WOL_SPEED_AUTO); 971 apedata = TG3_APE_HOST_DRVR_STATE_WOL; 972 } else 973 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; 974 975 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); 976 977 event = APE_EVENT_STATUS_STATE_UNLOAD; 978 break; 979 default: 980 return; 981 } 982 983 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; 984 985 tg3_ape_send_event(tp, event); 986 } 987 988 static void tg3_disable_ints(struct tg3 *tp) 989 { 990 int i; 991 992 tw32(TG3PCI_MISC_HOST_CTRL, 993 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); 994 for (i = 0; i < tp->irq_max; i++) 995 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); 996 } 997 998 static void tg3_enable_ints(struct tg3 *tp) 999 { 1000 int i; 1001 1002 tp->irq_sync = 0; 1003 wmb(); 1004 1005 tw32(TG3PCI_MISC_HOST_CTRL, 1006 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 1007 1008 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; 1009 for (i = 0; i < tp->irq_cnt; i++) { 1010 struct tg3_napi *tnapi = &tp->napi[i]; 1011 1012 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1013 if (tg3_flag(tp, 1SHOT_MSI)) 1014 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1015 1016 tp->coal_now |= tnapi->coal_now; 1017 } 1018 1019 /* Force an initial interrupt */ 1020 if (!tg3_flag(tp, TAGGED_STATUS) && 1021 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) 1022 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 1023 else 1024 tw32(HOSTCC_MODE, tp->coal_now); 1025 1026 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); 1027 } 1028 1029 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) 1030 { 1031 struct tg3 *tp = tnapi->tp; 1032 struct tg3_hw_status *sblk = tnapi->hw_status; 1033 unsigned int work_exists = 0; 1034 1035 /* check for phy events */ 1036 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 1037 if (sblk->status & SD_STATUS_LINK_CHG) 1038 work_exists = 1; 1039 } 1040 1041 /* check for TX work to do */ 1042 if (sblk->idx[0].tx_consumer != tnapi->tx_cons) 1043 work_exists = 1; 1044 1045 /* check for RX work to do */ 1046 if (tnapi->rx_rcb_prod_idx && 1047 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 1048 work_exists = 1; 1049 1050 return work_exists; 1051 } 1052 1053 /* tg3_int_reenable 1054 * similar to tg3_enable_ints, but it accurately determines whether there 1055 * is new work pending and can return without flushing the PIO write 1056 * which reenables interrupts 1057 */ 1058 static void tg3_int_reenable(struct tg3_napi *tnapi) 1059 { 1060 struct tg3 *tp = tnapi->tp; 1061 1062 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 1063 mmiowb(); 1064 1065 /* When doing tagged status, this work check is unnecessary. 1066 * The last_tag we write above tells the chip which piece of 1067 * work we've completed. 1068 */ 1069 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) 1070 tw32(HOSTCC_MODE, tp->coalesce_mode | 1071 HOSTCC_MODE_ENABLE | tnapi->coal_now); 1072 } 1073 1074 static void tg3_switch_clocks(struct tg3 *tp) 1075 { 1076 u32 clock_ctrl; 1077 u32 orig_clock_ctrl; 1078 1079 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) 1080 return; 1081 1082 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); 1083 1084 orig_clock_ctrl = clock_ctrl; 1085 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | 1086 CLOCK_CTRL_CLKRUN_OENABLE | 1087 0x1f); 1088 tp->pci_clock_ctrl = clock_ctrl; 1089 1090 if (tg3_flag(tp, 5705_PLUS)) { 1091 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { 1092 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1093 clock_ctrl | CLOCK_CTRL_625_CORE, 40); 1094 } 1095 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { 1096 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1097 clock_ctrl | 1098 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), 1099 40); 1100 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1101 clock_ctrl | (CLOCK_CTRL_ALTCLK), 1102 40); 1103 } 1104 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); 1105 } 1106 1107 #define PHY_BUSY_LOOPS 5000 1108 1109 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg, 1110 u32 *val) 1111 { 1112 u32 frame_val; 1113 unsigned int loops; 1114 int ret; 1115 1116 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1117 tw32_f(MAC_MI_MODE, 1118 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1119 udelay(80); 1120 } 1121 1122 tg3_ape_lock(tp, tp->phy_ape_lock); 1123 1124 *val = 0x0; 1125 1126 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1127 MI_COM_PHY_ADDR_MASK); 1128 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1129 MI_COM_REG_ADDR_MASK); 1130 frame_val |= (MI_COM_CMD_READ | MI_COM_START); 1131 1132 tw32_f(MAC_MI_COM, frame_val); 1133 1134 loops = PHY_BUSY_LOOPS; 1135 while (loops != 0) { 1136 udelay(10); 1137 frame_val = tr32(MAC_MI_COM); 1138 1139 if ((frame_val & MI_COM_BUSY) == 0) { 1140 udelay(5); 1141 frame_val = tr32(MAC_MI_COM); 1142 break; 1143 } 1144 loops -= 1; 1145 } 1146 1147 ret = -EBUSY; 1148 if (loops != 0) { 1149 *val = frame_val & MI_COM_DATA_MASK; 1150 ret = 0; 1151 } 1152 1153 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1154 tw32_f(MAC_MI_MODE, tp->mi_mode); 1155 udelay(80); 1156 } 1157 1158 tg3_ape_unlock(tp, tp->phy_ape_lock); 1159 1160 return ret; 1161 } 1162 1163 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) 1164 { 1165 return __tg3_readphy(tp, tp->phy_addr, reg, val); 1166 } 1167 1168 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg, 1169 u32 val) 1170 { 1171 u32 frame_val; 1172 unsigned int loops; 1173 int ret; 1174 1175 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 1176 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) 1177 return 0; 1178 1179 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1180 tw32_f(MAC_MI_MODE, 1181 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1182 udelay(80); 1183 } 1184 1185 tg3_ape_lock(tp, tp->phy_ape_lock); 1186 1187 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1188 MI_COM_PHY_ADDR_MASK); 1189 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1190 MI_COM_REG_ADDR_MASK); 1191 frame_val |= (val & MI_COM_DATA_MASK); 1192 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); 1193 1194 tw32_f(MAC_MI_COM, frame_val); 1195 1196 loops = PHY_BUSY_LOOPS; 1197 while (loops != 0) { 1198 udelay(10); 1199 frame_val = tr32(MAC_MI_COM); 1200 if ((frame_val & MI_COM_BUSY) == 0) { 1201 udelay(5); 1202 frame_val = tr32(MAC_MI_COM); 1203 break; 1204 } 1205 loops -= 1; 1206 } 1207 1208 ret = -EBUSY; 1209 if (loops != 0) 1210 ret = 0; 1211 1212 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1213 tw32_f(MAC_MI_MODE, tp->mi_mode); 1214 udelay(80); 1215 } 1216 1217 tg3_ape_unlock(tp, tp->phy_ape_lock); 1218 1219 return ret; 1220 } 1221 1222 static int tg3_writephy(struct tg3 *tp, int reg, u32 val) 1223 { 1224 return __tg3_writephy(tp, tp->phy_addr, reg, val); 1225 } 1226 1227 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) 1228 { 1229 int err; 1230 1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1232 if (err) 1233 goto done; 1234 1235 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1236 if (err) 1237 goto done; 1238 1239 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1240 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1241 if (err) 1242 goto done; 1243 1244 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); 1245 1246 done: 1247 return err; 1248 } 1249 1250 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) 1251 { 1252 int err; 1253 1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1255 if (err) 1256 goto done; 1257 1258 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1259 if (err) 1260 goto done; 1261 1262 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1263 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1264 if (err) 1265 goto done; 1266 1267 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); 1268 1269 done: 1270 return err; 1271 } 1272 1273 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) 1274 { 1275 int err; 1276 1277 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1278 if (!err) 1279 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); 1280 1281 return err; 1282 } 1283 1284 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) 1285 { 1286 int err; 1287 1288 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1289 if (!err) 1290 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); 1291 1292 return err; 1293 } 1294 1295 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) 1296 { 1297 int err; 1298 1299 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 1300 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | 1301 MII_TG3_AUXCTL_SHDWSEL_MISC); 1302 if (!err) 1303 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); 1304 1305 return err; 1306 } 1307 1308 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) 1309 { 1310 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) 1311 set |= MII_TG3_AUXCTL_MISC_WREN; 1312 1313 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); 1314 } 1315 1316 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) 1317 { 1318 u32 val; 1319 int err; 1320 1321 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 1322 1323 if (err) 1324 return err; 1325 1326 if (enable) 1327 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1328 else 1329 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1330 1331 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 1332 val | MII_TG3_AUXCTL_ACTL_TX_6DB); 1333 1334 return err; 1335 } 1336 1337 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val) 1338 { 1339 return tg3_writephy(tp, MII_TG3_MISC_SHDW, 1340 reg | val | MII_TG3_MISC_SHDW_WREN); 1341 } 1342 1343 static int tg3_bmcr_reset(struct tg3 *tp) 1344 { 1345 u32 phy_control; 1346 int limit, err; 1347 1348 /* OK, reset it, and poll the BMCR_RESET bit until it 1349 * clears or we time out. 1350 */ 1351 phy_control = BMCR_RESET; 1352 err = tg3_writephy(tp, MII_BMCR, phy_control); 1353 if (err != 0) 1354 return -EBUSY; 1355 1356 limit = 5000; 1357 while (limit--) { 1358 err = tg3_readphy(tp, MII_BMCR, &phy_control); 1359 if (err != 0) 1360 return -EBUSY; 1361 1362 if ((phy_control & BMCR_RESET) == 0) { 1363 udelay(40); 1364 break; 1365 } 1366 udelay(10); 1367 } 1368 if (limit < 0) 1369 return -EBUSY; 1370 1371 return 0; 1372 } 1373 1374 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) 1375 { 1376 struct tg3 *tp = bp->priv; 1377 u32 val; 1378 1379 spin_lock_bh(&tp->lock); 1380 1381 if (__tg3_readphy(tp, mii_id, reg, &val)) 1382 val = -EIO; 1383 1384 spin_unlock_bh(&tp->lock); 1385 1386 return val; 1387 } 1388 1389 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) 1390 { 1391 struct tg3 *tp = bp->priv; 1392 u32 ret = 0; 1393 1394 spin_lock_bh(&tp->lock); 1395 1396 if (__tg3_writephy(tp, mii_id, reg, val)) 1397 ret = -EIO; 1398 1399 spin_unlock_bh(&tp->lock); 1400 1401 return ret; 1402 } 1403 1404 static void tg3_mdio_config_5785(struct tg3 *tp) 1405 { 1406 u32 val; 1407 struct phy_device *phydev; 1408 1409 phydev = tp->mdio_bus->phy_map[tp->phy_addr]; 1410 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1411 case PHY_ID_BCM50610: 1412 case PHY_ID_BCM50610M: 1413 val = MAC_PHYCFG2_50610_LED_MODES; 1414 break; 1415 case PHY_ID_BCMAC131: 1416 val = MAC_PHYCFG2_AC131_LED_MODES; 1417 break; 1418 case PHY_ID_RTL8211C: 1419 val = MAC_PHYCFG2_RTL8211C_LED_MODES; 1420 break; 1421 case PHY_ID_RTL8201E: 1422 val = MAC_PHYCFG2_RTL8201E_LED_MODES; 1423 break; 1424 default: 1425 return; 1426 } 1427 1428 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { 1429 tw32(MAC_PHYCFG2, val); 1430 1431 val = tr32(MAC_PHYCFG1); 1432 val &= ~(MAC_PHYCFG1_RGMII_INT | 1433 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); 1434 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; 1435 tw32(MAC_PHYCFG1, val); 1436 1437 return; 1438 } 1439 1440 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) 1441 val |= MAC_PHYCFG2_EMODE_MASK_MASK | 1442 MAC_PHYCFG2_FMODE_MASK_MASK | 1443 MAC_PHYCFG2_GMODE_MASK_MASK | 1444 MAC_PHYCFG2_ACT_MASK_MASK | 1445 MAC_PHYCFG2_QUAL_MASK_MASK | 1446 MAC_PHYCFG2_INBAND_ENABLE; 1447 1448 tw32(MAC_PHYCFG2, val); 1449 1450 val = tr32(MAC_PHYCFG1); 1451 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | 1452 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); 1453 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1454 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1455 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; 1456 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1457 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; 1458 } 1459 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | 1460 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; 1461 tw32(MAC_PHYCFG1, val); 1462 1463 val = tr32(MAC_EXT_RGMII_MODE); 1464 val &= ~(MAC_RGMII_MODE_RX_INT_B | 1465 MAC_RGMII_MODE_RX_QUALITY | 1466 MAC_RGMII_MODE_RX_ACTIVITY | 1467 MAC_RGMII_MODE_RX_ENG_DET | 1468 MAC_RGMII_MODE_TX_ENABLE | 1469 MAC_RGMII_MODE_TX_LOWPWR | 1470 MAC_RGMII_MODE_TX_RESET); 1471 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1472 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1473 val |= MAC_RGMII_MODE_RX_INT_B | 1474 MAC_RGMII_MODE_RX_QUALITY | 1475 MAC_RGMII_MODE_RX_ACTIVITY | 1476 MAC_RGMII_MODE_RX_ENG_DET; 1477 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1478 val |= MAC_RGMII_MODE_TX_ENABLE | 1479 MAC_RGMII_MODE_TX_LOWPWR | 1480 MAC_RGMII_MODE_TX_RESET; 1481 } 1482 tw32(MAC_EXT_RGMII_MODE, val); 1483 } 1484 1485 static void tg3_mdio_start(struct tg3 *tp) 1486 { 1487 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; 1488 tw32_f(MAC_MI_MODE, tp->mi_mode); 1489 udelay(80); 1490 1491 if (tg3_flag(tp, MDIOBUS_INITED) && 1492 tg3_asic_rev(tp) == ASIC_REV_5785) 1493 tg3_mdio_config_5785(tp); 1494 } 1495 1496 static int tg3_mdio_init(struct tg3 *tp) 1497 { 1498 int i; 1499 u32 reg; 1500 struct phy_device *phydev; 1501 1502 if (tg3_flag(tp, 5717_PLUS)) { 1503 u32 is_serdes; 1504 1505 tp->phy_addr = tp->pci_fn + 1; 1506 1507 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) 1508 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; 1509 else 1510 is_serdes = tr32(TG3_CPMU_PHY_STRAP) & 1511 TG3_CPMU_PHY_STRAP_IS_SERDES; 1512 if (is_serdes) 1513 tp->phy_addr += 7; 1514 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) { 1515 int addr; 1516 1517 addr = ssb_gige_get_phyaddr(tp->pdev); 1518 if (addr < 0) 1519 return addr; 1520 tp->phy_addr = addr; 1521 } else 1522 tp->phy_addr = TG3_PHY_MII_ADDR; 1523 1524 tg3_mdio_start(tp); 1525 1526 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) 1527 return 0; 1528 1529 tp->mdio_bus = mdiobus_alloc(); 1530 if (tp->mdio_bus == NULL) 1531 return -ENOMEM; 1532 1533 tp->mdio_bus->name = "tg3 mdio bus"; 1534 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", 1535 (tp->pdev->bus->number << 8) | tp->pdev->devfn); 1536 tp->mdio_bus->priv = tp; 1537 tp->mdio_bus->parent = &tp->pdev->dev; 1538 tp->mdio_bus->read = &tg3_mdio_read; 1539 tp->mdio_bus->write = &tg3_mdio_write; 1540 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); 1541 tp->mdio_bus->irq = &tp->mdio_irq[0]; 1542 1543 for (i = 0; i < PHY_MAX_ADDR; i++) 1544 tp->mdio_bus->irq[i] = PHY_POLL; 1545 1546 /* The bus registration will look for all the PHYs on the mdio bus. 1547 * Unfortunately, it does not ensure the PHY is powered up before 1548 * accessing the PHY ID registers. A chip reset is the 1549 * quickest way to bring the device back to an operational state.. 1550 */ 1551 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) 1552 tg3_bmcr_reset(tp); 1553 1554 i = mdiobus_register(tp->mdio_bus); 1555 if (i) { 1556 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); 1557 mdiobus_free(tp->mdio_bus); 1558 return i; 1559 } 1560 1561 phydev = tp->mdio_bus->phy_map[tp->phy_addr]; 1562 1563 if (!phydev || !phydev->drv) { 1564 dev_warn(&tp->pdev->dev, "No PHY devices\n"); 1565 mdiobus_unregister(tp->mdio_bus); 1566 mdiobus_free(tp->mdio_bus); 1567 return -ENODEV; 1568 } 1569 1570 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1571 case PHY_ID_BCM57780: 1572 phydev->interface = PHY_INTERFACE_MODE_GMII; 1573 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1574 break; 1575 case PHY_ID_BCM50610: 1576 case PHY_ID_BCM50610M: 1577 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | 1578 PHY_BRCM_RX_REFCLK_UNUSED | 1579 PHY_BRCM_DIS_TXCRXC_NOENRGY | 1580 PHY_BRCM_AUTO_PWRDWN_ENABLE; 1581 if (tg3_flag(tp, RGMII_INBAND_DISABLE)) 1582 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; 1583 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1584 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; 1585 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1586 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; 1587 /* fallthru */ 1588 case PHY_ID_RTL8211C: 1589 phydev->interface = PHY_INTERFACE_MODE_RGMII; 1590 break; 1591 case PHY_ID_RTL8201E: 1592 case PHY_ID_BCMAC131: 1593 phydev->interface = PHY_INTERFACE_MODE_MII; 1594 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1595 tp->phy_flags |= TG3_PHYFLG_IS_FET; 1596 break; 1597 } 1598 1599 tg3_flag_set(tp, MDIOBUS_INITED); 1600 1601 if (tg3_asic_rev(tp) == ASIC_REV_5785) 1602 tg3_mdio_config_5785(tp); 1603 1604 return 0; 1605 } 1606 1607 static void tg3_mdio_fini(struct tg3 *tp) 1608 { 1609 if (tg3_flag(tp, MDIOBUS_INITED)) { 1610 tg3_flag_clear(tp, MDIOBUS_INITED); 1611 mdiobus_unregister(tp->mdio_bus); 1612 mdiobus_free(tp->mdio_bus); 1613 } 1614 } 1615 1616 /* tp->lock is held. */ 1617 static inline void tg3_generate_fw_event(struct tg3 *tp) 1618 { 1619 u32 val; 1620 1621 val = tr32(GRC_RX_CPU_EVENT); 1622 val |= GRC_RX_CPU_DRIVER_EVENT; 1623 tw32_f(GRC_RX_CPU_EVENT, val); 1624 1625 tp->last_event_jiffies = jiffies; 1626 } 1627 1628 #define TG3_FW_EVENT_TIMEOUT_USEC 2500 1629 1630 /* tp->lock is held. */ 1631 static void tg3_wait_for_event_ack(struct tg3 *tp) 1632 { 1633 int i; 1634 unsigned int delay_cnt; 1635 long time_remain; 1636 1637 /* If enough time has passed, no wait is necessary. */ 1638 time_remain = (long)(tp->last_event_jiffies + 1 + 1639 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - 1640 (long)jiffies; 1641 if (time_remain < 0) 1642 return; 1643 1644 /* Check if we can shorten the wait time. */ 1645 delay_cnt = jiffies_to_usecs(time_remain); 1646 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) 1647 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; 1648 delay_cnt = (delay_cnt >> 3) + 1; 1649 1650 for (i = 0; i < delay_cnt; i++) { 1651 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) 1652 break; 1653 if (pci_channel_offline(tp->pdev)) 1654 break; 1655 1656 udelay(8); 1657 } 1658 } 1659 1660 /* tp->lock is held. */ 1661 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data) 1662 { 1663 u32 reg, val; 1664 1665 val = 0; 1666 if (!tg3_readphy(tp, MII_BMCR, ®)) 1667 val = reg << 16; 1668 if (!tg3_readphy(tp, MII_BMSR, ®)) 1669 val |= (reg & 0xffff); 1670 *data++ = val; 1671 1672 val = 0; 1673 if (!tg3_readphy(tp, MII_ADVERTISE, ®)) 1674 val = reg << 16; 1675 if (!tg3_readphy(tp, MII_LPA, ®)) 1676 val |= (reg & 0xffff); 1677 *data++ = val; 1678 1679 val = 0; 1680 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { 1681 if (!tg3_readphy(tp, MII_CTRL1000, ®)) 1682 val = reg << 16; 1683 if (!tg3_readphy(tp, MII_STAT1000, ®)) 1684 val |= (reg & 0xffff); 1685 } 1686 *data++ = val; 1687 1688 if (!tg3_readphy(tp, MII_PHYADDR, ®)) 1689 val = reg << 16; 1690 else 1691 val = 0; 1692 *data++ = val; 1693 } 1694 1695 /* tp->lock is held. */ 1696 static void tg3_ump_link_report(struct tg3 *tp) 1697 { 1698 u32 data[4]; 1699 1700 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) 1701 return; 1702 1703 tg3_phy_gather_ump_data(tp, data); 1704 1705 tg3_wait_for_event_ack(tp); 1706 1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); 1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); 1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]); 1710 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]); 1711 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]); 1712 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]); 1713 1714 tg3_generate_fw_event(tp); 1715 } 1716 1717 /* tp->lock is held. */ 1718 static void tg3_stop_fw(struct tg3 *tp) 1719 { 1720 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 1721 /* Wait for RX cpu to ACK the previous event. */ 1722 tg3_wait_for_event_ack(tp); 1723 1724 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); 1725 1726 tg3_generate_fw_event(tp); 1727 1728 /* Wait for RX cpu to ACK this event. */ 1729 tg3_wait_for_event_ack(tp); 1730 } 1731 } 1732 1733 /* tp->lock is held. */ 1734 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) 1735 { 1736 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, 1737 NIC_SRAM_FIRMWARE_MBOX_MAGIC1); 1738 1739 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1740 switch (kind) { 1741 case RESET_KIND_INIT: 1742 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1743 DRV_STATE_START); 1744 break; 1745 1746 case RESET_KIND_SHUTDOWN: 1747 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1748 DRV_STATE_UNLOAD); 1749 break; 1750 1751 case RESET_KIND_SUSPEND: 1752 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1753 DRV_STATE_SUSPEND); 1754 break; 1755 1756 default: 1757 break; 1758 } 1759 } 1760 } 1761 1762 /* tp->lock is held. */ 1763 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) 1764 { 1765 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1766 switch (kind) { 1767 case RESET_KIND_INIT: 1768 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1769 DRV_STATE_START_DONE); 1770 break; 1771 1772 case RESET_KIND_SHUTDOWN: 1773 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1774 DRV_STATE_UNLOAD_DONE); 1775 break; 1776 1777 default: 1778 break; 1779 } 1780 } 1781 } 1782 1783 /* tp->lock is held. */ 1784 static void tg3_write_sig_legacy(struct tg3 *tp, int kind) 1785 { 1786 if (tg3_flag(tp, ENABLE_ASF)) { 1787 switch (kind) { 1788 case RESET_KIND_INIT: 1789 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1790 DRV_STATE_START); 1791 break; 1792 1793 case RESET_KIND_SHUTDOWN: 1794 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1795 DRV_STATE_UNLOAD); 1796 break; 1797 1798 case RESET_KIND_SUSPEND: 1799 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1800 DRV_STATE_SUSPEND); 1801 break; 1802 1803 default: 1804 break; 1805 } 1806 } 1807 } 1808 1809 static int tg3_poll_fw(struct tg3 *tp) 1810 { 1811 int i; 1812 u32 val; 1813 1814 if (tg3_flag(tp, NO_FWARE_REPORTED)) 1815 return 0; 1816 1817 if (tg3_flag(tp, IS_SSB_CORE)) { 1818 /* We don't use firmware. */ 1819 return 0; 1820 } 1821 1822 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 1823 /* Wait up to 20ms for init done. */ 1824 for (i = 0; i < 200; i++) { 1825 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) 1826 return 0; 1827 if (pci_channel_offline(tp->pdev)) 1828 return -ENODEV; 1829 1830 udelay(100); 1831 } 1832 return -ENODEV; 1833 } 1834 1835 /* Wait for firmware initialization to complete. */ 1836 for (i = 0; i < 100000; i++) { 1837 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); 1838 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 1839 break; 1840 if (pci_channel_offline(tp->pdev)) { 1841 if (!tg3_flag(tp, NO_FWARE_REPORTED)) { 1842 tg3_flag_set(tp, NO_FWARE_REPORTED); 1843 netdev_info(tp->dev, "No firmware running\n"); 1844 } 1845 1846 break; 1847 } 1848 1849 udelay(10); 1850 } 1851 1852 /* Chip might not be fitted with firmware. Some Sun onboard 1853 * parts are configured like that. So don't signal the timeout 1854 * of the above loop as an error, but do report the lack of 1855 * running firmware once. 1856 */ 1857 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { 1858 tg3_flag_set(tp, NO_FWARE_REPORTED); 1859 1860 netdev_info(tp->dev, "No firmware running\n"); 1861 } 1862 1863 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 1864 /* The 57765 A0 needs a little more 1865 * time to do some important work. 1866 */ 1867 mdelay(10); 1868 } 1869 1870 return 0; 1871 } 1872 1873 static void tg3_link_report(struct tg3 *tp) 1874 { 1875 if (!netif_carrier_ok(tp->dev)) { 1876 netif_info(tp, link, tp->dev, "Link is down\n"); 1877 tg3_ump_link_report(tp); 1878 } else if (netif_msg_link(tp)) { 1879 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", 1880 (tp->link_config.active_speed == SPEED_1000 ? 1881 1000 : 1882 (tp->link_config.active_speed == SPEED_100 ? 1883 100 : 10)), 1884 (tp->link_config.active_duplex == DUPLEX_FULL ? 1885 "full" : "half")); 1886 1887 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", 1888 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? 1889 "on" : "off", 1890 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? 1891 "on" : "off"); 1892 1893 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 1894 netdev_info(tp->dev, "EEE is %s\n", 1895 tp->setlpicnt ? "enabled" : "disabled"); 1896 1897 tg3_ump_link_report(tp); 1898 } 1899 1900 tp->link_up = netif_carrier_ok(tp->dev); 1901 } 1902 1903 static u32 tg3_decode_flowctrl_1000T(u32 adv) 1904 { 1905 u32 flowctrl = 0; 1906 1907 if (adv & ADVERTISE_PAUSE_CAP) { 1908 flowctrl |= FLOW_CTRL_RX; 1909 if (!(adv & ADVERTISE_PAUSE_ASYM)) 1910 flowctrl |= FLOW_CTRL_TX; 1911 } else if (adv & ADVERTISE_PAUSE_ASYM) 1912 flowctrl |= FLOW_CTRL_TX; 1913 1914 return flowctrl; 1915 } 1916 1917 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) 1918 { 1919 u16 miireg; 1920 1921 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) 1922 miireg = ADVERTISE_1000XPAUSE; 1923 else if (flow_ctrl & FLOW_CTRL_TX) 1924 miireg = ADVERTISE_1000XPSE_ASYM; 1925 else if (flow_ctrl & FLOW_CTRL_RX) 1926 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; 1927 else 1928 miireg = 0; 1929 1930 return miireg; 1931 } 1932 1933 static u32 tg3_decode_flowctrl_1000X(u32 adv) 1934 { 1935 u32 flowctrl = 0; 1936 1937 if (adv & ADVERTISE_1000XPAUSE) { 1938 flowctrl |= FLOW_CTRL_RX; 1939 if (!(adv & ADVERTISE_1000XPSE_ASYM)) 1940 flowctrl |= FLOW_CTRL_TX; 1941 } else if (adv & ADVERTISE_1000XPSE_ASYM) 1942 flowctrl |= FLOW_CTRL_TX; 1943 1944 return flowctrl; 1945 } 1946 1947 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) 1948 { 1949 u8 cap = 0; 1950 1951 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) { 1952 cap = FLOW_CTRL_TX | FLOW_CTRL_RX; 1953 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) { 1954 if (lcladv & ADVERTISE_1000XPAUSE) 1955 cap = FLOW_CTRL_RX; 1956 if (rmtadv & ADVERTISE_1000XPAUSE) 1957 cap = FLOW_CTRL_TX; 1958 } 1959 1960 return cap; 1961 } 1962 1963 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) 1964 { 1965 u8 autoneg; 1966 u8 flowctrl = 0; 1967 u32 old_rx_mode = tp->rx_mode; 1968 u32 old_tx_mode = tp->tx_mode; 1969 1970 if (tg3_flag(tp, USE_PHYLIB)) 1971 autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg; 1972 else 1973 autoneg = tp->link_config.autoneg; 1974 1975 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { 1976 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 1977 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); 1978 else 1979 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 1980 } else 1981 flowctrl = tp->link_config.flowctrl; 1982 1983 tp->link_config.active_flowctrl = flowctrl; 1984 1985 if (flowctrl & FLOW_CTRL_RX) 1986 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; 1987 else 1988 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; 1989 1990 if (old_rx_mode != tp->rx_mode) 1991 tw32_f(MAC_RX_MODE, tp->rx_mode); 1992 1993 if (flowctrl & FLOW_CTRL_TX) 1994 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; 1995 else 1996 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; 1997 1998 if (old_tx_mode != tp->tx_mode) 1999 tw32_f(MAC_TX_MODE, tp->tx_mode); 2000 } 2001 2002 static void tg3_adjust_link(struct net_device *dev) 2003 { 2004 u8 oldflowctrl, linkmesg = 0; 2005 u32 mac_mode, lcl_adv, rmt_adv; 2006 struct tg3 *tp = netdev_priv(dev); 2007 struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr]; 2008 2009 spin_lock_bh(&tp->lock); 2010 2011 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | 2012 MAC_MODE_HALF_DUPLEX); 2013 2014 oldflowctrl = tp->link_config.active_flowctrl; 2015 2016 if (phydev->link) { 2017 lcl_adv = 0; 2018 rmt_adv = 0; 2019 2020 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) 2021 mac_mode |= MAC_MODE_PORT_MODE_MII; 2022 else if (phydev->speed == SPEED_1000 || 2023 tg3_asic_rev(tp) != ASIC_REV_5785) 2024 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2025 else 2026 mac_mode |= MAC_MODE_PORT_MODE_MII; 2027 2028 if (phydev->duplex == DUPLEX_HALF) 2029 mac_mode |= MAC_MODE_HALF_DUPLEX; 2030 else { 2031 lcl_adv = mii_advertise_flowctrl( 2032 tp->link_config.flowctrl); 2033 2034 if (phydev->pause) 2035 rmt_adv = LPA_PAUSE_CAP; 2036 if (phydev->asym_pause) 2037 rmt_adv |= LPA_PAUSE_ASYM; 2038 } 2039 2040 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 2041 } else 2042 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2043 2044 if (mac_mode != tp->mac_mode) { 2045 tp->mac_mode = mac_mode; 2046 tw32_f(MAC_MODE, tp->mac_mode); 2047 udelay(40); 2048 } 2049 2050 if (tg3_asic_rev(tp) == ASIC_REV_5785) { 2051 if (phydev->speed == SPEED_10) 2052 tw32(MAC_MI_STAT, 2053 MAC_MI_STAT_10MBPS_MODE | 2054 MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2055 else 2056 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2057 } 2058 2059 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) 2060 tw32(MAC_TX_LENGTHS, 2061 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2062 (6 << TX_LENGTHS_IPG_SHIFT) | 2063 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); 2064 else 2065 tw32(MAC_TX_LENGTHS, 2066 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2067 (6 << TX_LENGTHS_IPG_SHIFT) | 2068 (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); 2069 2070 if (phydev->link != tp->old_link || 2071 phydev->speed != tp->link_config.active_speed || 2072 phydev->duplex != tp->link_config.active_duplex || 2073 oldflowctrl != tp->link_config.active_flowctrl) 2074 linkmesg = 1; 2075 2076 tp->old_link = phydev->link; 2077 tp->link_config.active_speed = phydev->speed; 2078 tp->link_config.active_duplex = phydev->duplex; 2079 2080 spin_unlock_bh(&tp->lock); 2081 2082 if (linkmesg) 2083 tg3_link_report(tp); 2084 } 2085 2086 static int tg3_phy_init(struct tg3 *tp) 2087 { 2088 struct phy_device *phydev; 2089 2090 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) 2091 return 0; 2092 2093 /* Bring the PHY back to a known state. */ 2094 tg3_bmcr_reset(tp); 2095 2096 phydev = tp->mdio_bus->phy_map[tp->phy_addr]; 2097 2098 /* Attach the MAC to the PHY. */ 2099 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), 2100 tg3_adjust_link, phydev->interface); 2101 if (IS_ERR(phydev)) { 2102 dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); 2103 return PTR_ERR(phydev); 2104 } 2105 2106 /* Mask with MAC supported features. */ 2107 switch (phydev->interface) { 2108 case PHY_INTERFACE_MODE_GMII: 2109 case PHY_INTERFACE_MODE_RGMII: 2110 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 2111 phydev->supported &= (PHY_GBIT_FEATURES | 2112 SUPPORTED_Pause | 2113 SUPPORTED_Asym_Pause); 2114 break; 2115 } 2116 /* fallthru */ 2117 case PHY_INTERFACE_MODE_MII: 2118 phydev->supported &= (PHY_BASIC_FEATURES | 2119 SUPPORTED_Pause | 2120 SUPPORTED_Asym_Pause); 2121 break; 2122 default: 2123 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]); 2124 return -EINVAL; 2125 } 2126 2127 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; 2128 2129 phydev->advertising = phydev->supported; 2130 2131 return 0; 2132 } 2133 2134 static void tg3_phy_start(struct tg3 *tp) 2135 { 2136 struct phy_device *phydev; 2137 2138 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2139 return; 2140 2141 phydev = tp->mdio_bus->phy_map[tp->phy_addr]; 2142 2143 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 2144 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 2145 phydev->speed = tp->link_config.speed; 2146 phydev->duplex = tp->link_config.duplex; 2147 phydev->autoneg = tp->link_config.autoneg; 2148 phydev->advertising = tp->link_config.advertising; 2149 } 2150 2151 phy_start(phydev); 2152 2153 phy_start_aneg(phydev); 2154 } 2155 2156 static void tg3_phy_stop(struct tg3 *tp) 2157 { 2158 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2159 return; 2160 2161 phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]); 2162 } 2163 2164 static void tg3_phy_fini(struct tg3 *tp) 2165 { 2166 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 2167 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]); 2168 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; 2169 } 2170 } 2171 2172 static int tg3_phy_set_extloopbk(struct tg3 *tp) 2173 { 2174 int err; 2175 u32 val; 2176 2177 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 2178 return 0; 2179 2180 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2181 /* Cannot do read-modify-write on 5401 */ 2182 err = tg3_phy_auxctl_write(tp, 2183 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2184 MII_TG3_AUXCTL_ACTL_EXTLOOPBK | 2185 0x4c20); 2186 goto done; 2187 } 2188 2189 err = tg3_phy_auxctl_read(tp, 2190 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2191 if (err) 2192 return err; 2193 2194 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK; 2195 err = tg3_phy_auxctl_write(tp, 2196 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val); 2197 2198 done: 2199 return err; 2200 } 2201 2202 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) 2203 { 2204 u32 phytest; 2205 2206 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 2207 u32 phy; 2208 2209 tg3_writephy(tp, MII_TG3_FET_TEST, 2210 phytest | MII_TG3_FET_SHADOW_EN); 2211 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { 2212 if (enable) 2213 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; 2214 else 2215 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; 2216 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); 2217 } 2218 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 2219 } 2220 } 2221 2222 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) 2223 { 2224 u32 reg; 2225 2226 if (!tg3_flag(tp, 5705_PLUS) || 2227 (tg3_flag(tp, 5717_PLUS) && 2228 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) 2229 return; 2230 2231 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2232 tg3_phy_fet_toggle_apd(tp, enable); 2233 return; 2234 } 2235 2236 reg = MII_TG3_MISC_SHDW_SCR5_LPED | 2237 MII_TG3_MISC_SHDW_SCR5_DLPTLM | 2238 MII_TG3_MISC_SHDW_SCR5_SDTL | 2239 MII_TG3_MISC_SHDW_SCR5_C125OE; 2240 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable) 2241 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; 2242 2243 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg); 2244 2245 2246 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS; 2247 if (enable) 2248 reg |= MII_TG3_MISC_SHDW_APD_ENABLE; 2249 2250 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg); 2251 } 2252 2253 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable) 2254 { 2255 u32 phy; 2256 2257 if (!tg3_flag(tp, 5705_PLUS) || 2258 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 2259 return; 2260 2261 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2262 u32 ephy; 2263 2264 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { 2265 u32 reg = MII_TG3_FET_SHDW_MISCCTRL; 2266 2267 tg3_writephy(tp, MII_TG3_FET_TEST, 2268 ephy | MII_TG3_FET_SHADOW_EN); 2269 if (!tg3_readphy(tp, reg, &phy)) { 2270 if (enable) 2271 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2272 else 2273 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2274 tg3_writephy(tp, reg, phy); 2275 } 2276 tg3_writephy(tp, MII_TG3_FET_TEST, ephy); 2277 } 2278 } else { 2279 int ret; 2280 2281 ret = tg3_phy_auxctl_read(tp, 2282 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); 2283 if (!ret) { 2284 if (enable) 2285 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2286 else 2287 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2288 tg3_phy_auxctl_write(tp, 2289 MII_TG3_AUXCTL_SHDWSEL_MISC, phy); 2290 } 2291 } 2292 } 2293 2294 static void tg3_phy_set_wirespeed(struct tg3 *tp) 2295 { 2296 int ret; 2297 u32 val; 2298 2299 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) 2300 return; 2301 2302 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); 2303 if (!ret) 2304 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, 2305 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); 2306 } 2307 2308 static void tg3_phy_apply_otp(struct tg3 *tp) 2309 { 2310 u32 otp, phy; 2311 2312 if (!tp->phy_otp) 2313 return; 2314 2315 otp = tp->phy_otp; 2316 2317 if (tg3_phy_toggle_auxctl_smdsp(tp, true)) 2318 return; 2319 2320 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); 2321 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; 2322 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); 2323 2324 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | 2325 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); 2326 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); 2327 2328 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); 2329 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; 2330 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); 2331 2332 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); 2333 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); 2334 2335 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); 2336 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); 2337 2338 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | 2339 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); 2340 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); 2341 2342 tg3_phy_toggle_auxctl_smdsp(tp, false); 2343 } 2344 2345 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee) 2346 { 2347 u32 val; 2348 struct ethtool_eee *dest = &tp->eee; 2349 2350 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2351 return; 2352 2353 if (eee) 2354 dest = eee; 2355 2356 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val)) 2357 return; 2358 2359 /* Pull eee_active */ 2360 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || 2361 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) { 2362 dest->eee_active = 1; 2363 } else 2364 dest->eee_active = 0; 2365 2366 /* Pull lp advertised settings */ 2367 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val)) 2368 return; 2369 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2370 2371 /* Pull advertised and eee_enabled settings */ 2372 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) 2373 return; 2374 dest->eee_enabled = !!val; 2375 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2376 2377 /* Pull tx_lpi_enabled */ 2378 val = tr32(TG3_CPMU_EEE_MODE); 2379 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX); 2380 2381 /* Pull lpi timer value */ 2382 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff; 2383 } 2384 2385 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) 2386 { 2387 u32 val; 2388 2389 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2390 return; 2391 2392 tp->setlpicnt = 0; 2393 2394 if (tp->link_config.autoneg == AUTONEG_ENABLE && 2395 current_link_up && 2396 tp->link_config.active_duplex == DUPLEX_FULL && 2397 (tp->link_config.active_speed == SPEED_100 || 2398 tp->link_config.active_speed == SPEED_1000)) { 2399 u32 eeectl; 2400 2401 if (tp->link_config.active_speed == SPEED_1000) 2402 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; 2403 else 2404 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; 2405 2406 tw32(TG3_CPMU_EEE_CTRL, eeectl); 2407 2408 tg3_eee_pull_config(tp, NULL); 2409 if (tp->eee.eee_active) 2410 tp->setlpicnt = 2; 2411 } 2412 2413 if (!tp->setlpicnt) { 2414 if (current_link_up && 2415 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2416 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); 2417 tg3_phy_toggle_auxctl_smdsp(tp, false); 2418 } 2419 2420 val = tr32(TG3_CPMU_EEE_MODE); 2421 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); 2422 } 2423 } 2424 2425 static void tg3_phy_eee_enable(struct tg3 *tp) 2426 { 2427 u32 val; 2428 2429 if (tp->link_config.active_speed == SPEED_1000 && 2430 (tg3_asic_rev(tp) == ASIC_REV_5717 || 2431 tg3_asic_rev(tp) == ASIC_REV_5719 || 2432 tg3_flag(tp, 57765_CLASS)) && 2433 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2434 val = MII_TG3_DSP_TAP26_ALNOKO | 2435 MII_TG3_DSP_TAP26_RMRXSTO; 2436 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 2437 tg3_phy_toggle_auxctl_smdsp(tp, false); 2438 } 2439 2440 val = tr32(TG3_CPMU_EEE_MODE); 2441 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); 2442 } 2443 2444 static int tg3_wait_macro_done(struct tg3 *tp) 2445 { 2446 int limit = 100; 2447 2448 while (limit--) { 2449 u32 tmp32; 2450 2451 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { 2452 if ((tmp32 & 0x1000) == 0) 2453 break; 2454 } 2455 } 2456 if (limit < 0) 2457 return -EBUSY; 2458 2459 return 0; 2460 } 2461 2462 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) 2463 { 2464 static const u32 test_pat[4][6] = { 2465 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, 2466 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, 2467 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, 2468 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } 2469 }; 2470 int chan; 2471 2472 for (chan = 0; chan < 4; chan++) { 2473 int i; 2474 2475 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2476 (chan * 0x2000) | 0x0200); 2477 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2478 2479 for (i = 0; i < 6; i++) 2480 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 2481 test_pat[chan][i]); 2482 2483 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2484 if (tg3_wait_macro_done(tp)) { 2485 *resetp = 1; 2486 return -EBUSY; 2487 } 2488 2489 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2490 (chan * 0x2000) | 0x0200); 2491 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); 2492 if (tg3_wait_macro_done(tp)) { 2493 *resetp = 1; 2494 return -EBUSY; 2495 } 2496 2497 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); 2498 if (tg3_wait_macro_done(tp)) { 2499 *resetp = 1; 2500 return -EBUSY; 2501 } 2502 2503 for (i = 0; i < 6; i += 2) { 2504 u32 low, high; 2505 2506 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || 2507 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || 2508 tg3_wait_macro_done(tp)) { 2509 *resetp = 1; 2510 return -EBUSY; 2511 } 2512 low &= 0x7fff; 2513 high &= 0x000f; 2514 if (low != test_pat[chan][i] || 2515 high != test_pat[chan][i+1]) { 2516 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); 2517 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); 2518 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); 2519 2520 return -EBUSY; 2521 } 2522 } 2523 } 2524 2525 return 0; 2526 } 2527 2528 static int tg3_phy_reset_chanpat(struct tg3 *tp) 2529 { 2530 int chan; 2531 2532 for (chan = 0; chan < 4; chan++) { 2533 int i; 2534 2535 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2536 (chan * 0x2000) | 0x0200); 2537 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2538 for (i = 0; i < 6; i++) 2539 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); 2540 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2541 if (tg3_wait_macro_done(tp)) 2542 return -EBUSY; 2543 } 2544 2545 return 0; 2546 } 2547 2548 static int tg3_phy_reset_5703_4_5(struct tg3 *tp) 2549 { 2550 u32 reg32, phy9_orig; 2551 int retries, do_phy_reset, err; 2552 2553 retries = 10; 2554 do_phy_reset = 1; 2555 do { 2556 if (do_phy_reset) { 2557 err = tg3_bmcr_reset(tp); 2558 if (err) 2559 return err; 2560 do_phy_reset = 0; 2561 } 2562 2563 /* Disable transmitter and interrupt. */ 2564 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) 2565 continue; 2566 2567 reg32 |= 0x3000; 2568 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2569 2570 /* Set full-duplex, 1000 mbps. */ 2571 tg3_writephy(tp, MII_BMCR, 2572 BMCR_FULLDPLX | BMCR_SPEED1000); 2573 2574 /* Set to master mode. */ 2575 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) 2576 continue; 2577 2578 tg3_writephy(tp, MII_CTRL1000, 2579 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 2580 2581 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 2582 if (err) 2583 return err; 2584 2585 /* Block the PHY control access. */ 2586 tg3_phydsp_write(tp, 0x8005, 0x0800); 2587 2588 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); 2589 if (!err) 2590 break; 2591 } while (--retries); 2592 2593 err = tg3_phy_reset_chanpat(tp); 2594 if (err) 2595 return err; 2596 2597 tg3_phydsp_write(tp, 0x8005, 0x0000); 2598 2599 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); 2600 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); 2601 2602 tg3_phy_toggle_auxctl_smdsp(tp, false); 2603 2604 tg3_writephy(tp, MII_CTRL1000, phy9_orig); 2605 2606 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32); 2607 if (err) 2608 return err; 2609 2610 reg32 &= ~0x3000; 2611 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2612 2613 return 0; 2614 } 2615 2616 static void tg3_carrier_off(struct tg3 *tp) 2617 { 2618 netif_carrier_off(tp->dev); 2619 tp->link_up = false; 2620 } 2621 2622 static void tg3_warn_mgmt_link_flap(struct tg3 *tp) 2623 { 2624 if (tg3_flag(tp, ENABLE_ASF)) 2625 netdev_warn(tp->dev, 2626 "Management side-band traffic will be interrupted during phy settings change\n"); 2627 } 2628 2629 /* This will reset the tigon3 PHY if there is no valid 2630 * link unless the FORCE argument is non-zero. 2631 */ 2632 static int tg3_phy_reset(struct tg3 *tp) 2633 { 2634 u32 val, cpmuctrl; 2635 int err; 2636 2637 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2638 val = tr32(GRC_MISC_CFG); 2639 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); 2640 udelay(40); 2641 } 2642 err = tg3_readphy(tp, MII_BMSR, &val); 2643 err |= tg3_readphy(tp, MII_BMSR, &val); 2644 if (err != 0) 2645 return -EBUSY; 2646 2647 if (netif_running(tp->dev) && tp->link_up) { 2648 netif_carrier_off(tp->dev); 2649 tg3_link_report(tp); 2650 } 2651 2652 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 2653 tg3_asic_rev(tp) == ASIC_REV_5704 || 2654 tg3_asic_rev(tp) == ASIC_REV_5705) { 2655 err = tg3_phy_reset_5703_4_5(tp); 2656 if (err) 2657 return err; 2658 goto out; 2659 } 2660 2661 cpmuctrl = 0; 2662 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 2663 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 2664 cpmuctrl = tr32(TG3_CPMU_CTRL); 2665 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) 2666 tw32(TG3_CPMU_CTRL, 2667 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); 2668 } 2669 2670 err = tg3_bmcr_reset(tp); 2671 if (err) 2672 return err; 2673 2674 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { 2675 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; 2676 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); 2677 2678 tw32(TG3_CPMU_CTRL, cpmuctrl); 2679 } 2680 2681 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 2682 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 2683 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 2684 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == 2685 CPMU_LSPD_1000MB_MACCLK_12_5) { 2686 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 2687 udelay(40); 2688 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 2689 } 2690 } 2691 2692 if (tg3_flag(tp, 5717_PLUS) && 2693 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) 2694 return 0; 2695 2696 tg3_phy_apply_otp(tp); 2697 2698 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 2699 tg3_phy_toggle_apd(tp, true); 2700 else 2701 tg3_phy_toggle_apd(tp, false); 2702 2703 out: 2704 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && 2705 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2706 tg3_phydsp_write(tp, 0x201f, 0x2aaa); 2707 tg3_phydsp_write(tp, 0x000a, 0x0323); 2708 tg3_phy_toggle_auxctl_smdsp(tp, false); 2709 } 2710 2711 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { 2712 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2713 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2714 } 2715 2716 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { 2717 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2718 tg3_phydsp_write(tp, 0x000a, 0x310b); 2719 tg3_phydsp_write(tp, 0x201f, 0x9506); 2720 tg3_phydsp_write(tp, 0x401f, 0x14e2); 2721 tg3_phy_toggle_auxctl_smdsp(tp, false); 2722 } 2723 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { 2724 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2725 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 2726 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { 2727 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); 2728 tg3_writephy(tp, MII_TG3_TEST1, 2729 MII_TG3_TEST1_TRIM_EN | 0x4); 2730 } else 2731 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); 2732 2733 tg3_phy_toggle_auxctl_smdsp(tp, false); 2734 } 2735 } 2736 2737 /* Set Extended packet length bit (bit 14) on all chips that */ 2738 /* support jumbo frames */ 2739 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2740 /* Cannot do read-modify-write on 5401 */ 2741 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 2742 } else if (tg3_flag(tp, JUMBO_CAPABLE)) { 2743 /* Set bit 14 with read-modify-write to preserve other bits */ 2744 err = tg3_phy_auxctl_read(tp, 2745 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2746 if (!err) 2747 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2748 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); 2749 } 2750 2751 /* Set phy register 0x10 bit 0 to high fifo elasticity to support 2752 * jumbo frames transmission. 2753 */ 2754 if (tg3_flag(tp, JUMBO_CAPABLE)) { 2755 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) 2756 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2757 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); 2758 } 2759 2760 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2761 /* adjust output voltage */ 2762 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); 2763 } 2764 2765 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0) 2766 tg3_phydsp_write(tp, 0xffb, 0x4000); 2767 2768 tg3_phy_toggle_automdix(tp, true); 2769 tg3_phy_set_wirespeed(tp); 2770 return 0; 2771 } 2772 2773 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001 2774 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002 2775 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ 2776 TG3_GPIO_MSG_NEED_VAUX) 2777 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ 2778 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ 2779 (TG3_GPIO_MSG_DRVR_PRES << 4) | \ 2780 (TG3_GPIO_MSG_DRVR_PRES << 8) | \ 2781 (TG3_GPIO_MSG_DRVR_PRES << 12)) 2782 2783 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ 2784 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ 2785 (TG3_GPIO_MSG_NEED_VAUX << 4) | \ 2786 (TG3_GPIO_MSG_NEED_VAUX << 8) | \ 2787 (TG3_GPIO_MSG_NEED_VAUX << 12)) 2788 2789 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) 2790 { 2791 u32 status, shift; 2792 2793 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2794 tg3_asic_rev(tp) == ASIC_REV_5719) 2795 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); 2796 else 2797 status = tr32(TG3_CPMU_DRV_STATUS); 2798 2799 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; 2800 status &= ~(TG3_GPIO_MSG_MASK << shift); 2801 status |= (newstat << shift); 2802 2803 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2804 tg3_asic_rev(tp) == ASIC_REV_5719) 2805 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); 2806 else 2807 tw32(TG3_CPMU_DRV_STATUS, status); 2808 2809 return status >> TG3_APE_GPIO_MSG_SHIFT; 2810 } 2811 2812 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) 2813 { 2814 if (!tg3_flag(tp, IS_NIC)) 2815 return 0; 2816 2817 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2818 tg3_asic_rev(tp) == ASIC_REV_5719 || 2819 tg3_asic_rev(tp) == ASIC_REV_5720) { 2820 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2821 return -EIO; 2822 2823 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); 2824 2825 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2826 TG3_GRC_LCLCTL_PWRSW_DELAY); 2827 2828 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2829 } else { 2830 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2831 TG3_GRC_LCLCTL_PWRSW_DELAY); 2832 } 2833 2834 return 0; 2835 } 2836 2837 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) 2838 { 2839 u32 grc_local_ctrl; 2840 2841 if (!tg3_flag(tp, IS_NIC) || 2842 tg3_asic_rev(tp) == ASIC_REV_5700 || 2843 tg3_asic_rev(tp) == ASIC_REV_5701) 2844 return; 2845 2846 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; 2847 2848 tw32_wait_f(GRC_LOCAL_CTRL, 2849 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2850 TG3_GRC_LCLCTL_PWRSW_DELAY); 2851 2852 tw32_wait_f(GRC_LOCAL_CTRL, 2853 grc_local_ctrl, 2854 TG3_GRC_LCLCTL_PWRSW_DELAY); 2855 2856 tw32_wait_f(GRC_LOCAL_CTRL, 2857 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2858 TG3_GRC_LCLCTL_PWRSW_DELAY); 2859 } 2860 2861 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) 2862 { 2863 if (!tg3_flag(tp, IS_NIC)) 2864 return; 2865 2866 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 2867 tg3_asic_rev(tp) == ASIC_REV_5701) { 2868 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2869 (GRC_LCLCTRL_GPIO_OE0 | 2870 GRC_LCLCTRL_GPIO_OE1 | 2871 GRC_LCLCTRL_GPIO_OE2 | 2872 GRC_LCLCTRL_GPIO_OUTPUT0 | 2873 GRC_LCLCTRL_GPIO_OUTPUT1), 2874 TG3_GRC_LCLCTL_PWRSW_DELAY); 2875 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 2876 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 2877 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ 2878 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | 2879 GRC_LCLCTRL_GPIO_OE1 | 2880 GRC_LCLCTRL_GPIO_OE2 | 2881 GRC_LCLCTRL_GPIO_OUTPUT0 | 2882 GRC_LCLCTRL_GPIO_OUTPUT1 | 2883 tp->grc_local_ctrl; 2884 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2885 TG3_GRC_LCLCTL_PWRSW_DELAY); 2886 2887 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; 2888 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2889 TG3_GRC_LCLCTL_PWRSW_DELAY); 2890 2891 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; 2892 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2893 TG3_GRC_LCLCTL_PWRSW_DELAY); 2894 } else { 2895 u32 no_gpio2; 2896 u32 grc_local_ctrl = 0; 2897 2898 /* Workaround to prevent overdrawing Amps. */ 2899 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 2900 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 2901 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2902 grc_local_ctrl, 2903 TG3_GRC_LCLCTL_PWRSW_DELAY); 2904 } 2905 2906 /* On 5753 and variants, GPIO2 cannot be used. */ 2907 no_gpio2 = tp->nic_sram_data_cfg & 2908 NIC_SRAM_DATA_CFG_NO_GPIO2; 2909 2910 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 2911 GRC_LCLCTRL_GPIO_OE1 | 2912 GRC_LCLCTRL_GPIO_OE2 | 2913 GRC_LCLCTRL_GPIO_OUTPUT1 | 2914 GRC_LCLCTRL_GPIO_OUTPUT2; 2915 if (no_gpio2) { 2916 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | 2917 GRC_LCLCTRL_GPIO_OUTPUT2); 2918 } 2919 tw32_wait_f(GRC_LOCAL_CTRL, 2920 tp->grc_local_ctrl | grc_local_ctrl, 2921 TG3_GRC_LCLCTL_PWRSW_DELAY); 2922 2923 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; 2924 2925 tw32_wait_f(GRC_LOCAL_CTRL, 2926 tp->grc_local_ctrl | grc_local_ctrl, 2927 TG3_GRC_LCLCTL_PWRSW_DELAY); 2928 2929 if (!no_gpio2) { 2930 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; 2931 tw32_wait_f(GRC_LOCAL_CTRL, 2932 tp->grc_local_ctrl | grc_local_ctrl, 2933 TG3_GRC_LCLCTL_PWRSW_DELAY); 2934 } 2935 } 2936 } 2937 2938 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) 2939 { 2940 u32 msg = 0; 2941 2942 /* Serialize power state transitions */ 2943 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2944 return; 2945 2946 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) 2947 msg = TG3_GPIO_MSG_NEED_VAUX; 2948 2949 msg = tg3_set_function_status(tp, msg); 2950 2951 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) 2952 goto done; 2953 2954 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) 2955 tg3_pwrsrc_switch_to_vaux(tp); 2956 else 2957 tg3_pwrsrc_die_with_vmain(tp); 2958 2959 done: 2960 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2961 } 2962 2963 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) 2964 { 2965 bool need_vaux = false; 2966 2967 /* The GPIOs do something completely different on 57765. */ 2968 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS)) 2969 return; 2970 2971 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2972 tg3_asic_rev(tp) == ASIC_REV_5719 || 2973 tg3_asic_rev(tp) == ASIC_REV_5720) { 2974 tg3_frob_aux_power_5717(tp, include_wol ? 2975 tg3_flag(tp, WOL_ENABLE) != 0 : 0); 2976 return; 2977 } 2978 2979 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { 2980 struct net_device *dev_peer; 2981 2982 dev_peer = pci_get_drvdata(tp->pdev_peer); 2983 2984 /* remove_one() may have been run on the peer. */ 2985 if (dev_peer) { 2986 struct tg3 *tp_peer = netdev_priv(dev_peer); 2987 2988 if (tg3_flag(tp_peer, INIT_COMPLETE)) 2989 return; 2990 2991 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || 2992 tg3_flag(tp_peer, ENABLE_ASF)) 2993 need_vaux = true; 2994 } 2995 } 2996 2997 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || 2998 tg3_flag(tp, ENABLE_ASF)) 2999 need_vaux = true; 3000 3001 if (need_vaux) 3002 tg3_pwrsrc_switch_to_vaux(tp); 3003 else 3004 tg3_pwrsrc_die_with_vmain(tp); 3005 } 3006 3007 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) 3008 { 3009 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) 3010 return 1; 3011 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { 3012 if (speed != SPEED_10) 3013 return 1; 3014 } else if (speed == SPEED_10) 3015 return 1; 3016 3017 return 0; 3018 } 3019 3020 static bool tg3_phy_power_bug(struct tg3 *tp) 3021 { 3022 switch (tg3_asic_rev(tp)) { 3023 case ASIC_REV_5700: 3024 case ASIC_REV_5704: 3025 return true; 3026 case ASIC_REV_5780: 3027 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 3028 return true; 3029 return false; 3030 case ASIC_REV_5717: 3031 if (!tp->pci_fn) 3032 return true; 3033 return false; 3034 case ASIC_REV_5719: 3035 case ASIC_REV_5720: 3036 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 3037 !tp->pci_fn) 3038 return true; 3039 return false; 3040 } 3041 3042 return false; 3043 } 3044 3045 static bool tg3_phy_led_bug(struct tg3 *tp) 3046 { 3047 switch (tg3_asic_rev(tp)) { 3048 case ASIC_REV_5719: 3049 case ASIC_REV_5720: 3050 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 3051 !tp->pci_fn) 3052 return true; 3053 return false; 3054 } 3055 3056 return false; 3057 } 3058 3059 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) 3060 { 3061 u32 val; 3062 3063 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) 3064 return; 3065 3066 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 3067 if (tg3_asic_rev(tp) == ASIC_REV_5704) { 3068 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); 3069 u32 serdes_cfg = tr32(MAC_SERDES_CFG); 3070 3071 sg_dig_ctrl |= 3072 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; 3073 tw32(SG_DIG_CTRL, sg_dig_ctrl); 3074 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); 3075 } 3076 return; 3077 } 3078 3079 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3080 tg3_bmcr_reset(tp); 3081 val = tr32(GRC_MISC_CFG); 3082 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 3083 udelay(40); 3084 return; 3085 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 3086 u32 phytest; 3087 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 3088 u32 phy; 3089 3090 tg3_writephy(tp, MII_ADVERTISE, 0); 3091 tg3_writephy(tp, MII_BMCR, 3092 BMCR_ANENABLE | BMCR_ANRESTART); 3093 3094 tg3_writephy(tp, MII_TG3_FET_TEST, 3095 phytest | MII_TG3_FET_SHADOW_EN); 3096 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { 3097 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; 3098 tg3_writephy(tp, 3099 MII_TG3_FET_SHDW_AUXMODE4, 3100 phy); 3101 } 3102 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 3103 } 3104 return; 3105 } else if (do_low_power) { 3106 if (!tg3_phy_led_bug(tp)) 3107 tg3_writephy(tp, MII_TG3_EXT_CTRL, 3108 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 3109 3110 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | 3111 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | 3112 MII_TG3_AUXCTL_PCTL_VREG_11V; 3113 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); 3114 } 3115 3116 /* The PHY should not be powered down on some chips because 3117 * of bugs. 3118 */ 3119 if (tg3_phy_power_bug(tp)) 3120 return; 3121 3122 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 3123 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 3124 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 3125 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 3126 val |= CPMU_LSPD_1000MB_MACCLK_12_5; 3127 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 3128 } 3129 3130 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); 3131 } 3132 3133 /* tp->lock is held. */ 3134 static int tg3_nvram_lock(struct tg3 *tp) 3135 { 3136 if (tg3_flag(tp, NVRAM)) { 3137 int i; 3138 3139 if (tp->nvram_lock_cnt == 0) { 3140 tw32(NVRAM_SWARB, SWARB_REQ_SET1); 3141 for (i = 0; i < 8000; i++) { 3142 if (tr32(NVRAM_SWARB) & SWARB_GNT1) 3143 break; 3144 udelay(20); 3145 } 3146 if (i == 8000) { 3147 tw32(NVRAM_SWARB, SWARB_REQ_CLR1); 3148 return -ENODEV; 3149 } 3150 } 3151 tp->nvram_lock_cnt++; 3152 } 3153 return 0; 3154 } 3155 3156 /* tp->lock is held. */ 3157 static void tg3_nvram_unlock(struct tg3 *tp) 3158 { 3159 if (tg3_flag(tp, NVRAM)) { 3160 if (tp->nvram_lock_cnt > 0) 3161 tp->nvram_lock_cnt--; 3162 if (tp->nvram_lock_cnt == 0) 3163 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); 3164 } 3165 } 3166 3167 /* tp->lock is held. */ 3168 static void tg3_enable_nvram_access(struct tg3 *tp) 3169 { 3170 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3171 u32 nvaccess = tr32(NVRAM_ACCESS); 3172 3173 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); 3174 } 3175 } 3176 3177 /* tp->lock is held. */ 3178 static void tg3_disable_nvram_access(struct tg3 *tp) 3179 { 3180 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3181 u32 nvaccess = tr32(NVRAM_ACCESS); 3182 3183 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); 3184 } 3185 } 3186 3187 static int tg3_nvram_read_using_eeprom(struct tg3 *tp, 3188 u32 offset, u32 *val) 3189 { 3190 u32 tmp; 3191 int i; 3192 3193 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) 3194 return -EINVAL; 3195 3196 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | 3197 EEPROM_ADDR_DEVID_MASK | 3198 EEPROM_ADDR_READ); 3199 tw32(GRC_EEPROM_ADDR, 3200 tmp | 3201 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3202 ((offset << EEPROM_ADDR_ADDR_SHIFT) & 3203 EEPROM_ADDR_ADDR_MASK) | 3204 EEPROM_ADDR_READ | EEPROM_ADDR_START); 3205 3206 for (i = 0; i < 1000; i++) { 3207 tmp = tr32(GRC_EEPROM_ADDR); 3208 3209 if (tmp & EEPROM_ADDR_COMPLETE) 3210 break; 3211 msleep(1); 3212 } 3213 if (!(tmp & EEPROM_ADDR_COMPLETE)) 3214 return -EBUSY; 3215 3216 tmp = tr32(GRC_EEPROM_DATA); 3217 3218 /* 3219 * The data will always be opposite the native endian 3220 * format. Perform a blind byteswap to compensate. 3221 */ 3222 *val = swab32(tmp); 3223 3224 return 0; 3225 } 3226 3227 #define NVRAM_CMD_TIMEOUT 5000 3228 3229 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) 3230 { 3231 int i; 3232 3233 tw32(NVRAM_CMD, nvram_cmd); 3234 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { 3235 usleep_range(10, 40); 3236 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { 3237 udelay(10); 3238 break; 3239 } 3240 } 3241 3242 if (i == NVRAM_CMD_TIMEOUT) 3243 return -EBUSY; 3244 3245 return 0; 3246 } 3247 3248 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) 3249 { 3250 if (tg3_flag(tp, NVRAM) && 3251 tg3_flag(tp, NVRAM_BUFFERED) && 3252 tg3_flag(tp, FLASH) && 3253 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3254 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3255 3256 addr = ((addr / tp->nvram_pagesize) << 3257 ATMEL_AT45DB0X1B_PAGE_POS) + 3258 (addr % tp->nvram_pagesize); 3259 3260 return addr; 3261 } 3262 3263 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) 3264 { 3265 if (tg3_flag(tp, NVRAM) && 3266 tg3_flag(tp, NVRAM_BUFFERED) && 3267 tg3_flag(tp, FLASH) && 3268 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3269 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3270 3271 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * 3272 tp->nvram_pagesize) + 3273 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); 3274 3275 return addr; 3276 } 3277 3278 /* NOTE: Data read in from NVRAM is byteswapped according to 3279 * the byteswapping settings for all other register accesses. 3280 * tg3 devices are BE devices, so on a BE machine, the data 3281 * returned will be exactly as it is seen in NVRAM. On a LE 3282 * machine, the 32-bit value will be byteswapped. 3283 */ 3284 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) 3285 { 3286 int ret; 3287 3288 if (!tg3_flag(tp, NVRAM)) 3289 return tg3_nvram_read_using_eeprom(tp, offset, val); 3290 3291 offset = tg3_nvram_phys_addr(tp, offset); 3292 3293 if (offset > NVRAM_ADDR_MSK) 3294 return -EINVAL; 3295 3296 ret = tg3_nvram_lock(tp); 3297 if (ret) 3298 return ret; 3299 3300 tg3_enable_nvram_access(tp); 3301 3302 tw32(NVRAM_ADDR, offset); 3303 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | 3304 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); 3305 3306 if (ret == 0) 3307 *val = tr32(NVRAM_RDDATA); 3308 3309 tg3_disable_nvram_access(tp); 3310 3311 tg3_nvram_unlock(tp); 3312 3313 return ret; 3314 } 3315 3316 /* Ensures NVRAM data is in bytestream format. */ 3317 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) 3318 { 3319 u32 v; 3320 int res = tg3_nvram_read(tp, offset, &v); 3321 if (!res) 3322 *val = cpu_to_be32(v); 3323 return res; 3324 } 3325 3326 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, 3327 u32 offset, u32 len, u8 *buf) 3328 { 3329 int i, j, rc = 0; 3330 u32 val; 3331 3332 for (i = 0; i < len; i += 4) { 3333 u32 addr; 3334 __be32 data; 3335 3336 addr = offset + i; 3337 3338 memcpy(&data, buf + i, 4); 3339 3340 /* 3341 * The SEEPROM interface expects the data to always be opposite 3342 * the native endian format. We accomplish this by reversing 3343 * all the operations that would have been performed on the 3344 * data from a call to tg3_nvram_read_be32(). 3345 */ 3346 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); 3347 3348 val = tr32(GRC_EEPROM_ADDR); 3349 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); 3350 3351 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | 3352 EEPROM_ADDR_READ); 3353 tw32(GRC_EEPROM_ADDR, val | 3354 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3355 (addr & EEPROM_ADDR_ADDR_MASK) | 3356 EEPROM_ADDR_START | 3357 EEPROM_ADDR_WRITE); 3358 3359 for (j = 0; j < 1000; j++) { 3360 val = tr32(GRC_EEPROM_ADDR); 3361 3362 if (val & EEPROM_ADDR_COMPLETE) 3363 break; 3364 msleep(1); 3365 } 3366 if (!(val & EEPROM_ADDR_COMPLETE)) { 3367 rc = -EBUSY; 3368 break; 3369 } 3370 } 3371 3372 return rc; 3373 } 3374 3375 /* offset and length are dword aligned */ 3376 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, 3377 u8 *buf) 3378 { 3379 int ret = 0; 3380 u32 pagesize = tp->nvram_pagesize; 3381 u32 pagemask = pagesize - 1; 3382 u32 nvram_cmd; 3383 u8 *tmp; 3384 3385 tmp = kmalloc(pagesize, GFP_KERNEL); 3386 if (tmp == NULL) 3387 return -ENOMEM; 3388 3389 while (len) { 3390 int j; 3391 u32 phy_addr, page_off, size; 3392 3393 phy_addr = offset & ~pagemask; 3394 3395 for (j = 0; j < pagesize; j += 4) { 3396 ret = tg3_nvram_read_be32(tp, phy_addr + j, 3397 (__be32 *) (tmp + j)); 3398 if (ret) 3399 break; 3400 } 3401 if (ret) 3402 break; 3403 3404 page_off = offset & pagemask; 3405 size = pagesize; 3406 if (len < size) 3407 size = len; 3408 3409 len -= size; 3410 3411 memcpy(tmp + page_off, buf, size); 3412 3413 offset = offset + (pagesize - page_off); 3414 3415 tg3_enable_nvram_access(tp); 3416 3417 /* 3418 * Before we can erase the flash page, we need 3419 * to issue a special "write enable" command. 3420 */ 3421 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3422 3423 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3424 break; 3425 3426 /* Erase the target page */ 3427 tw32(NVRAM_ADDR, phy_addr); 3428 3429 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | 3430 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; 3431 3432 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3433 break; 3434 3435 /* Issue another write enable to start the write. */ 3436 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3437 3438 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3439 break; 3440 3441 for (j = 0; j < pagesize; j += 4) { 3442 __be32 data; 3443 3444 data = *((__be32 *) (tmp + j)); 3445 3446 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3447 3448 tw32(NVRAM_ADDR, phy_addr + j); 3449 3450 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | 3451 NVRAM_CMD_WR; 3452 3453 if (j == 0) 3454 nvram_cmd |= NVRAM_CMD_FIRST; 3455 else if (j == (pagesize - 4)) 3456 nvram_cmd |= NVRAM_CMD_LAST; 3457 3458 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3459 if (ret) 3460 break; 3461 } 3462 if (ret) 3463 break; 3464 } 3465 3466 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3467 tg3_nvram_exec_cmd(tp, nvram_cmd); 3468 3469 kfree(tmp); 3470 3471 return ret; 3472 } 3473 3474 /* offset and length are dword aligned */ 3475 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, 3476 u8 *buf) 3477 { 3478 int i, ret = 0; 3479 3480 for (i = 0; i < len; i += 4, offset += 4) { 3481 u32 page_off, phy_addr, nvram_cmd; 3482 __be32 data; 3483 3484 memcpy(&data, buf + i, 4); 3485 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3486 3487 page_off = offset % tp->nvram_pagesize; 3488 3489 phy_addr = tg3_nvram_phys_addr(tp, offset); 3490 3491 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; 3492 3493 if (page_off == 0 || i == 0) 3494 nvram_cmd |= NVRAM_CMD_FIRST; 3495 if (page_off == (tp->nvram_pagesize - 4)) 3496 nvram_cmd |= NVRAM_CMD_LAST; 3497 3498 if (i == (len - 4)) 3499 nvram_cmd |= NVRAM_CMD_LAST; 3500 3501 if ((nvram_cmd & NVRAM_CMD_FIRST) || 3502 !tg3_flag(tp, FLASH) || 3503 !tg3_flag(tp, 57765_PLUS)) 3504 tw32(NVRAM_ADDR, phy_addr); 3505 3506 if (tg3_asic_rev(tp) != ASIC_REV_5752 && 3507 !tg3_flag(tp, 5755_PLUS) && 3508 (tp->nvram_jedecnum == JEDEC_ST) && 3509 (nvram_cmd & NVRAM_CMD_FIRST)) { 3510 u32 cmd; 3511 3512 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3513 ret = tg3_nvram_exec_cmd(tp, cmd); 3514 if (ret) 3515 break; 3516 } 3517 if (!tg3_flag(tp, FLASH)) { 3518 /* We always do complete word writes to eeprom. */ 3519 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); 3520 } 3521 3522 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3523 if (ret) 3524 break; 3525 } 3526 return ret; 3527 } 3528 3529 /* offset and length are dword aligned */ 3530 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) 3531 { 3532 int ret; 3533 3534 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3535 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & 3536 ~GRC_LCLCTRL_GPIO_OUTPUT1); 3537 udelay(40); 3538 } 3539 3540 if (!tg3_flag(tp, NVRAM)) { 3541 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); 3542 } else { 3543 u32 grc_mode; 3544 3545 ret = tg3_nvram_lock(tp); 3546 if (ret) 3547 return ret; 3548 3549 tg3_enable_nvram_access(tp); 3550 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) 3551 tw32(NVRAM_WRITE1, 0x406); 3552 3553 grc_mode = tr32(GRC_MODE); 3554 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); 3555 3556 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { 3557 ret = tg3_nvram_write_block_buffered(tp, offset, len, 3558 buf); 3559 } else { 3560 ret = tg3_nvram_write_block_unbuffered(tp, offset, len, 3561 buf); 3562 } 3563 3564 grc_mode = tr32(GRC_MODE); 3565 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); 3566 3567 tg3_disable_nvram_access(tp); 3568 tg3_nvram_unlock(tp); 3569 } 3570 3571 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3572 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 3573 udelay(40); 3574 } 3575 3576 return ret; 3577 } 3578 3579 #define RX_CPU_SCRATCH_BASE 0x30000 3580 #define RX_CPU_SCRATCH_SIZE 0x04000 3581 #define TX_CPU_SCRATCH_BASE 0x34000 3582 #define TX_CPU_SCRATCH_SIZE 0x04000 3583 3584 /* tp->lock is held. */ 3585 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base) 3586 { 3587 int i; 3588 const int iters = 10000; 3589 3590 for (i = 0; i < iters; i++) { 3591 tw32(cpu_base + CPU_STATE, 0xffffffff); 3592 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3593 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) 3594 break; 3595 if (pci_channel_offline(tp->pdev)) 3596 return -EBUSY; 3597 } 3598 3599 return (i == iters) ? -EBUSY : 0; 3600 } 3601 3602 /* tp->lock is held. */ 3603 static int tg3_rxcpu_pause(struct tg3 *tp) 3604 { 3605 int rc = tg3_pause_cpu(tp, RX_CPU_BASE); 3606 3607 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 3608 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); 3609 udelay(10); 3610 3611 return rc; 3612 } 3613 3614 /* tp->lock is held. */ 3615 static int tg3_txcpu_pause(struct tg3 *tp) 3616 { 3617 return tg3_pause_cpu(tp, TX_CPU_BASE); 3618 } 3619 3620 /* tp->lock is held. */ 3621 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base) 3622 { 3623 tw32(cpu_base + CPU_STATE, 0xffffffff); 3624 tw32_f(cpu_base + CPU_MODE, 0x00000000); 3625 } 3626 3627 /* tp->lock is held. */ 3628 static void tg3_rxcpu_resume(struct tg3 *tp) 3629 { 3630 tg3_resume_cpu(tp, RX_CPU_BASE); 3631 } 3632 3633 /* tp->lock is held. */ 3634 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base) 3635 { 3636 int rc; 3637 3638 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); 3639 3640 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3641 u32 val = tr32(GRC_VCPU_EXT_CTRL); 3642 3643 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); 3644 return 0; 3645 } 3646 if (cpu_base == RX_CPU_BASE) { 3647 rc = tg3_rxcpu_pause(tp); 3648 } else { 3649 /* 3650 * There is only an Rx CPU for the 5750 derivative in the 3651 * BCM4785. 3652 */ 3653 if (tg3_flag(tp, IS_SSB_CORE)) 3654 return 0; 3655 3656 rc = tg3_txcpu_pause(tp); 3657 } 3658 3659 if (rc) { 3660 netdev_err(tp->dev, "%s timed out, %s CPU\n", 3661 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX"); 3662 return -ENODEV; 3663 } 3664 3665 /* Clear firmware's nvram arbitration. */ 3666 if (tg3_flag(tp, NVRAM)) 3667 tw32(NVRAM_SWARB, SWARB_REQ_CLR0); 3668 return 0; 3669 } 3670 3671 static int tg3_fw_data_len(struct tg3 *tp, 3672 const struct tg3_firmware_hdr *fw_hdr) 3673 { 3674 int fw_len; 3675 3676 /* Non fragmented firmware have one firmware header followed by a 3677 * contiguous chunk of data to be written. The length field in that 3678 * header is not the length of data to be written but the complete 3679 * length of the bss. The data length is determined based on 3680 * tp->fw->size minus headers. 3681 * 3682 * Fragmented firmware have a main header followed by multiple 3683 * fragments. Each fragment is identical to non fragmented firmware 3684 * with a firmware header followed by a contiguous chunk of data. In 3685 * the main header, the length field is unused and set to 0xffffffff. 3686 * In each fragment header the length is the entire size of that 3687 * fragment i.e. fragment data + header length. Data length is 3688 * therefore length field in the header minus TG3_FW_HDR_LEN. 3689 */ 3690 if (tp->fw_len == 0xffffffff) 3691 fw_len = be32_to_cpu(fw_hdr->len); 3692 else 3693 fw_len = tp->fw->size; 3694 3695 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32); 3696 } 3697 3698 /* tp->lock is held. */ 3699 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, 3700 u32 cpu_scratch_base, int cpu_scratch_size, 3701 const struct tg3_firmware_hdr *fw_hdr) 3702 { 3703 int err, i; 3704 void (*write_op)(struct tg3 *, u32, u32); 3705 int total_len = tp->fw->size; 3706 3707 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { 3708 netdev_err(tp->dev, 3709 "%s: Trying to load TX cpu firmware which is 5705\n", 3710 __func__); 3711 return -EINVAL; 3712 } 3713 3714 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766) 3715 write_op = tg3_write_mem; 3716 else 3717 write_op = tg3_write_indirect_reg32; 3718 3719 if (tg3_asic_rev(tp) != ASIC_REV_57766) { 3720 /* It is possible that bootcode is still loading at this point. 3721 * Get the nvram lock first before halting the cpu. 3722 */ 3723 int lock_err = tg3_nvram_lock(tp); 3724 err = tg3_halt_cpu(tp, cpu_base); 3725 if (!lock_err) 3726 tg3_nvram_unlock(tp); 3727 if (err) 3728 goto out; 3729 3730 for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) 3731 write_op(tp, cpu_scratch_base + i, 0); 3732 tw32(cpu_base + CPU_STATE, 0xffffffff); 3733 tw32(cpu_base + CPU_MODE, 3734 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT); 3735 } else { 3736 /* Subtract additional main header for fragmented firmware and 3737 * advance to the first fragment 3738 */ 3739 total_len -= TG3_FW_HDR_LEN; 3740 fw_hdr++; 3741 } 3742 3743 do { 3744 u32 *fw_data = (u32 *)(fw_hdr + 1); 3745 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++) 3746 write_op(tp, cpu_scratch_base + 3747 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) + 3748 (i * sizeof(u32)), 3749 be32_to_cpu(fw_data[i])); 3750 3751 total_len -= be32_to_cpu(fw_hdr->len); 3752 3753 /* Advance to next fragment */ 3754 fw_hdr = (struct tg3_firmware_hdr *) 3755 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len)); 3756 } while (total_len > 0); 3757 3758 err = 0; 3759 3760 out: 3761 return err; 3762 } 3763 3764 /* tp->lock is held. */ 3765 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc) 3766 { 3767 int i; 3768 const int iters = 5; 3769 3770 tw32(cpu_base + CPU_STATE, 0xffffffff); 3771 tw32_f(cpu_base + CPU_PC, pc); 3772 3773 for (i = 0; i < iters; i++) { 3774 if (tr32(cpu_base + CPU_PC) == pc) 3775 break; 3776 tw32(cpu_base + CPU_STATE, 0xffffffff); 3777 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3778 tw32_f(cpu_base + CPU_PC, pc); 3779 udelay(1000); 3780 } 3781 3782 return (i == iters) ? -EBUSY : 0; 3783 } 3784 3785 /* tp->lock is held. */ 3786 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) 3787 { 3788 const struct tg3_firmware_hdr *fw_hdr; 3789 int err; 3790 3791 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3792 3793 /* Firmware blob starts with version numbers, followed by 3794 start address and length. We are setting complete length. 3795 length = end_address_of_bss - start_address_of_text. 3796 Remainder is the blob to be loaded contiguously 3797 from start address. */ 3798 3799 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, 3800 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, 3801 fw_hdr); 3802 if (err) 3803 return err; 3804 3805 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, 3806 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, 3807 fw_hdr); 3808 if (err) 3809 return err; 3810 3811 /* Now startup only the RX cpu. */ 3812 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE, 3813 be32_to_cpu(fw_hdr->base_addr)); 3814 if (err) { 3815 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " 3816 "should be %08x\n", __func__, 3817 tr32(RX_CPU_BASE + CPU_PC), 3818 be32_to_cpu(fw_hdr->base_addr)); 3819 return -ENODEV; 3820 } 3821 3822 tg3_rxcpu_resume(tp); 3823 3824 return 0; 3825 } 3826 3827 static int tg3_validate_rxcpu_state(struct tg3 *tp) 3828 { 3829 const int iters = 1000; 3830 int i; 3831 u32 val; 3832 3833 /* Wait for boot code to complete initialization and enter service 3834 * loop. It is then safe to download service patches 3835 */ 3836 for (i = 0; i < iters; i++) { 3837 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP) 3838 break; 3839 3840 udelay(10); 3841 } 3842 3843 if (i == iters) { 3844 netdev_err(tp->dev, "Boot code not ready for service patches\n"); 3845 return -EBUSY; 3846 } 3847 3848 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE); 3849 if (val & 0xff) { 3850 netdev_warn(tp->dev, 3851 "Other patches exist. Not downloading EEE patch\n"); 3852 return -EEXIST; 3853 } 3854 3855 return 0; 3856 } 3857 3858 /* tp->lock is held. */ 3859 static void tg3_load_57766_firmware(struct tg3 *tp) 3860 { 3861 struct tg3_firmware_hdr *fw_hdr; 3862 3863 if (!tg3_flag(tp, NO_NVRAM)) 3864 return; 3865 3866 if (tg3_validate_rxcpu_state(tp)) 3867 return; 3868 3869 if (!tp->fw) 3870 return; 3871 3872 /* This firmware blob has a different format than older firmware 3873 * releases as given below. The main difference is we have fragmented 3874 * data to be written to non-contiguous locations. 3875 * 3876 * In the beginning we have a firmware header identical to other 3877 * firmware which consists of version, base addr and length. The length 3878 * here is unused and set to 0xffffffff. 3879 * 3880 * This is followed by a series of firmware fragments which are 3881 * individually identical to previous firmware. i.e. they have the 3882 * firmware header and followed by data for that fragment. The version 3883 * field of the individual fragment header is unused. 3884 */ 3885 3886 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3887 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR) 3888 return; 3889 3890 if (tg3_rxcpu_pause(tp)) 3891 return; 3892 3893 /* tg3_load_firmware_cpu() will always succeed for the 57766 */ 3894 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr); 3895 3896 tg3_rxcpu_resume(tp); 3897 } 3898 3899 /* tp->lock is held. */ 3900 static int tg3_load_tso_firmware(struct tg3 *tp) 3901 { 3902 const struct tg3_firmware_hdr *fw_hdr; 3903 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; 3904 int err; 3905 3906 if (!tg3_flag(tp, FW_TSO)) 3907 return 0; 3908 3909 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3910 3911 /* Firmware blob starts with version numbers, followed by 3912 start address and length. We are setting complete length. 3913 length = end_address_of_bss - start_address_of_text. 3914 Remainder is the blob to be loaded contiguously 3915 from start address. */ 3916 3917 cpu_scratch_size = tp->fw_len; 3918 3919 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 3920 cpu_base = RX_CPU_BASE; 3921 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; 3922 } else { 3923 cpu_base = TX_CPU_BASE; 3924 cpu_scratch_base = TX_CPU_SCRATCH_BASE; 3925 cpu_scratch_size = TX_CPU_SCRATCH_SIZE; 3926 } 3927 3928 err = tg3_load_firmware_cpu(tp, cpu_base, 3929 cpu_scratch_base, cpu_scratch_size, 3930 fw_hdr); 3931 if (err) 3932 return err; 3933 3934 /* Now startup the cpu. */ 3935 err = tg3_pause_cpu_and_set_pc(tp, cpu_base, 3936 be32_to_cpu(fw_hdr->base_addr)); 3937 if (err) { 3938 netdev_err(tp->dev, 3939 "%s fails to set CPU PC, is %08x should be %08x\n", 3940 __func__, tr32(cpu_base + CPU_PC), 3941 be32_to_cpu(fw_hdr->base_addr)); 3942 return -ENODEV; 3943 } 3944 3945 tg3_resume_cpu(tp, cpu_base); 3946 return 0; 3947 } 3948 3949 /* tp->lock is held. */ 3950 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index) 3951 { 3952 u32 addr_high, addr_low; 3953 3954 addr_high = ((mac_addr[0] << 8) | mac_addr[1]); 3955 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) | 3956 (mac_addr[4] << 8) | mac_addr[5]); 3957 3958 if (index < 4) { 3959 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high); 3960 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low); 3961 } else { 3962 index -= 4; 3963 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high); 3964 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low); 3965 } 3966 } 3967 3968 /* tp->lock is held. */ 3969 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1) 3970 { 3971 u32 addr_high; 3972 int i; 3973 3974 for (i = 0; i < 4; i++) { 3975 if (i == 1 && skip_mac_1) 3976 continue; 3977 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3978 } 3979 3980 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 3981 tg3_asic_rev(tp) == ASIC_REV_5704) { 3982 for (i = 4; i < 16; i++) 3983 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3984 } 3985 3986 addr_high = (tp->dev->dev_addr[0] + 3987 tp->dev->dev_addr[1] + 3988 tp->dev->dev_addr[2] + 3989 tp->dev->dev_addr[3] + 3990 tp->dev->dev_addr[4] + 3991 tp->dev->dev_addr[5]) & 3992 TX_BACKOFF_SEED_MASK; 3993 tw32(MAC_TX_BACKOFF_SEED, addr_high); 3994 } 3995 3996 static void tg3_enable_register_access(struct tg3 *tp) 3997 { 3998 /* 3999 * Make sure register accesses (indirect or otherwise) will function 4000 * correctly. 4001 */ 4002 pci_write_config_dword(tp->pdev, 4003 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); 4004 } 4005 4006 static int tg3_power_up(struct tg3 *tp) 4007 { 4008 int err; 4009 4010 tg3_enable_register_access(tp); 4011 4012 err = pci_set_power_state(tp->pdev, PCI_D0); 4013 if (!err) { 4014 /* Switch out of Vaux if it is a NIC */ 4015 tg3_pwrsrc_switch_to_vmain(tp); 4016 } else { 4017 netdev_err(tp->dev, "Transition to D0 failed\n"); 4018 } 4019 4020 return err; 4021 } 4022 4023 static int tg3_setup_phy(struct tg3 *, bool); 4024 4025 static int tg3_power_down_prepare(struct tg3 *tp) 4026 { 4027 u32 misc_host_ctrl; 4028 bool device_should_wake, do_low_power; 4029 4030 tg3_enable_register_access(tp); 4031 4032 /* Restore the CLKREQ setting. */ 4033 if (tg3_flag(tp, CLKREQ_BUG)) 4034 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 4035 PCI_EXP_LNKCTL_CLKREQ_EN); 4036 4037 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 4038 tw32(TG3PCI_MISC_HOST_CTRL, 4039 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 4040 4041 device_should_wake = device_may_wakeup(&tp->pdev->dev) && 4042 tg3_flag(tp, WOL_ENABLE); 4043 4044 if (tg3_flag(tp, USE_PHYLIB)) { 4045 do_low_power = false; 4046 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && 4047 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4048 struct phy_device *phydev; 4049 u32 phyid, advertising; 4050 4051 phydev = tp->mdio_bus->phy_map[tp->phy_addr]; 4052 4053 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4054 4055 tp->link_config.speed = phydev->speed; 4056 tp->link_config.duplex = phydev->duplex; 4057 tp->link_config.autoneg = phydev->autoneg; 4058 tp->link_config.advertising = phydev->advertising; 4059 4060 advertising = ADVERTISED_TP | 4061 ADVERTISED_Pause | 4062 ADVERTISED_Autoneg | 4063 ADVERTISED_10baseT_Half; 4064 4065 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { 4066 if (tg3_flag(tp, WOL_SPEED_100MB)) 4067 advertising |= 4068 ADVERTISED_100baseT_Half | 4069 ADVERTISED_100baseT_Full | 4070 ADVERTISED_10baseT_Full; 4071 else 4072 advertising |= ADVERTISED_10baseT_Full; 4073 } 4074 4075 phydev->advertising = advertising; 4076 4077 phy_start_aneg(phydev); 4078 4079 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; 4080 if (phyid != PHY_ID_BCMAC131) { 4081 phyid &= PHY_BCM_OUI_MASK; 4082 if (phyid == PHY_BCM_OUI_1 || 4083 phyid == PHY_BCM_OUI_2 || 4084 phyid == PHY_BCM_OUI_3) 4085 do_low_power = true; 4086 } 4087 } 4088 } else { 4089 do_low_power = true; 4090 4091 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) 4092 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4093 4094 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 4095 tg3_setup_phy(tp, false); 4096 } 4097 4098 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 4099 u32 val; 4100 4101 val = tr32(GRC_VCPU_EXT_CTRL); 4102 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); 4103 } else if (!tg3_flag(tp, ENABLE_ASF)) { 4104 int i; 4105 u32 val; 4106 4107 for (i = 0; i < 200; i++) { 4108 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); 4109 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 4110 break; 4111 msleep(1); 4112 } 4113 } 4114 if (tg3_flag(tp, WOL_CAP)) 4115 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | 4116 WOL_DRV_STATE_SHUTDOWN | 4117 WOL_DRV_WOL | 4118 WOL_SET_MAGIC_PKT); 4119 4120 if (device_should_wake) { 4121 u32 mac_mode; 4122 4123 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 4124 if (do_low_power && 4125 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 4126 tg3_phy_auxctl_write(tp, 4127 MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 4128 MII_TG3_AUXCTL_PCTL_WOL_EN | 4129 MII_TG3_AUXCTL_PCTL_100TX_LPWR | 4130 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); 4131 udelay(40); 4132 } 4133 4134 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4135 mac_mode = MAC_MODE_PORT_MODE_GMII; 4136 else if (tp->phy_flags & 4137 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) { 4138 if (tp->link_config.active_speed == SPEED_1000) 4139 mac_mode = MAC_MODE_PORT_MODE_GMII; 4140 else 4141 mac_mode = MAC_MODE_PORT_MODE_MII; 4142 } else 4143 mac_mode = MAC_MODE_PORT_MODE_MII; 4144 4145 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; 4146 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 4147 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? 4148 SPEED_100 : SPEED_10; 4149 if (tg3_5700_link_polarity(tp, speed)) 4150 mac_mode |= MAC_MODE_LINK_POLARITY; 4151 else 4152 mac_mode &= ~MAC_MODE_LINK_POLARITY; 4153 } 4154 } else { 4155 mac_mode = MAC_MODE_PORT_MODE_TBI; 4156 } 4157 4158 if (!tg3_flag(tp, 5750_PLUS)) 4159 tw32(MAC_LED_CTRL, tp->led_ctrl); 4160 4161 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; 4162 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && 4163 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) 4164 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; 4165 4166 if (tg3_flag(tp, ENABLE_APE)) 4167 mac_mode |= MAC_MODE_APE_TX_EN | 4168 MAC_MODE_APE_RX_EN | 4169 MAC_MODE_TDE_ENABLE; 4170 4171 tw32_f(MAC_MODE, mac_mode); 4172 udelay(100); 4173 4174 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); 4175 udelay(10); 4176 } 4177 4178 if (!tg3_flag(tp, WOL_SPEED_100MB) && 4179 (tg3_asic_rev(tp) == ASIC_REV_5700 || 4180 tg3_asic_rev(tp) == ASIC_REV_5701)) { 4181 u32 base_val; 4182 4183 base_val = tp->pci_clock_ctrl; 4184 base_val |= (CLOCK_CTRL_RXCLK_DISABLE | 4185 CLOCK_CTRL_TXCLK_DISABLE); 4186 4187 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | 4188 CLOCK_CTRL_PWRDOWN_PLL133, 40); 4189 } else if (tg3_flag(tp, 5780_CLASS) || 4190 tg3_flag(tp, CPMU_PRESENT) || 4191 tg3_asic_rev(tp) == ASIC_REV_5906) { 4192 /* do nothing */ 4193 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { 4194 u32 newbits1, newbits2; 4195 4196 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4197 tg3_asic_rev(tp) == ASIC_REV_5701) { 4198 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | 4199 CLOCK_CTRL_TXCLK_DISABLE | 4200 CLOCK_CTRL_ALTCLK); 4201 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4202 } else if (tg3_flag(tp, 5705_PLUS)) { 4203 newbits1 = CLOCK_CTRL_625_CORE; 4204 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; 4205 } else { 4206 newbits1 = CLOCK_CTRL_ALTCLK; 4207 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4208 } 4209 4210 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, 4211 40); 4212 4213 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, 4214 40); 4215 4216 if (!tg3_flag(tp, 5705_PLUS)) { 4217 u32 newbits3; 4218 4219 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4220 tg3_asic_rev(tp) == ASIC_REV_5701) { 4221 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | 4222 CLOCK_CTRL_TXCLK_DISABLE | 4223 CLOCK_CTRL_44MHZ_CORE); 4224 } else { 4225 newbits3 = CLOCK_CTRL_44MHZ_CORE; 4226 } 4227 4228 tw32_wait_f(TG3PCI_CLOCK_CTRL, 4229 tp->pci_clock_ctrl | newbits3, 40); 4230 } 4231 } 4232 4233 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) 4234 tg3_power_down_phy(tp, do_low_power); 4235 4236 tg3_frob_aux_power(tp, true); 4237 4238 /* Workaround for unstable PLL clock */ 4239 if ((!tg3_flag(tp, IS_SSB_CORE)) && 4240 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) || 4241 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) { 4242 u32 val = tr32(0x7d00); 4243 4244 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); 4245 tw32(0x7d00, val); 4246 if (!tg3_flag(tp, ENABLE_ASF)) { 4247 int err; 4248 4249 err = tg3_nvram_lock(tp); 4250 tg3_halt_cpu(tp, RX_CPU_BASE); 4251 if (!err) 4252 tg3_nvram_unlock(tp); 4253 } 4254 } 4255 4256 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 4257 4258 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); 4259 4260 return 0; 4261 } 4262 4263 static void tg3_power_down(struct tg3 *tp) 4264 { 4265 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); 4266 pci_set_power_state(tp->pdev, PCI_D3hot); 4267 } 4268 4269 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) 4270 { 4271 switch (val & MII_TG3_AUX_STAT_SPDMASK) { 4272 case MII_TG3_AUX_STAT_10HALF: 4273 *speed = SPEED_10; 4274 *duplex = DUPLEX_HALF; 4275 break; 4276 4277 case MII_TG3_AUX_STAT_10FULL: 4278 *speed = SPEED_10; 4279 *duplex = DUPLEX_FULL; 4280 break; 4281 4282 case MII_TG3_AUX_STAT_100HALF: 4283 *speed = SPEED_100; 4284 *duplex = DUPLEX_HALF; 4285 break; 4286 4287 case MII_TG3_AUX_STAT_100FULL: 4288 *speed = SPEED_100; 4289 *duplex = DUPLEX_FULL; 4290 break; 4291 4292 case MII_TG3_AUX_STAT_1000HALF: 4293 *speed = SPEED_1000; 4294 *duplex = DUPLEX_HALF; 4295 break; 4296 4297 case MII_TG3_AUX_STAT_1000FULL: 4298 *speed = SPEED_1000; 4299 *duplex = DUPLEX_FULL; 4300 break; 4301 4302 default: 4303 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4304 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : 4305 SPEED_10; 4306 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : 4307 DUPLEX_HALF; 4308 break; 4309 } 4310 *speed = SPEED_UNKNOWN; 4311 *duplex = DUPLEX_UNKNOWN; 4312 break; 4313 } 4314 } 4315 4316 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) 4317 { 4318 int err = 0; 4319 u32 val, new_adv; 4320 4321 new_adv = ADVERTISE_CSMA; 4322 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; 4323 new_adv |= mii_advertise_flowctrl(flowctrl); 4324 4325 err = tg3_writephy(tp, MII_ADVERTISE, new_adv); 4326 if (err) 4327 goto done; 4328 4329 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4330 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); 4331 4332 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4333 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) 4334 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4335 4336 err = tg3_writephy(tp, MII_CTRL1000, new_adv); 4337 if (err) 4338 goto done; 4339 } 4340 4341 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4342 goto done; 4343 4344 tw32(TG3_CPMU_EEE_MODE, 4345 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); 4346 4347 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 4348 if (!err) { 4349 u32 err2; 4350 4351 val = 0; 4352 /* Advertise 100-BaseTX EEE ability */ 4353 if (advertise & ADVERTISED_100baseT_Full) 4354 val |= MDIO_AN_EEE_ADV_100TX; 4355 /* Advertise 1000-BaseT EEE ability */ 4356 if (advertise & ADVERTISED_1000baseT_Full) 4357 val |= MDIO_AN_EEE_ADV_1000T; 4358 4359 if (!tp->eee.eee_enabled) { 4360 val = 0; 4361 tp->eee.advertised = 0; 4362 } else { 4363 tp->eee.advertised = advertise & 4364 (ADVERTISED_100baseT_Full | 4365 ADVERTISED_1000baseT_Full); 4366 } 4367 4368 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); 4369 if (err) 4370 val = 0; 4371 4372 switch (tg3_asic_rev(tp)) { 4373 case ASIC_REV_5717: 4374 case ASIC_REV_57765: 4375 case ASIC_REV_57766: 4376 case ASIC_REV_5719: 4377 /* If we advertised any eee advertisements above... */ 4378 if (val) 4379 val = MII_TG3_DSP_TAP26_ALNOKO | 4380 MII_TG3_DSP_TAP26_RMRXSTO | 4381 MII_TG3_DSP_TAP26_OPCSINPT; 4382 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 4383 /* Fall through */ 4384 case ASIC_REV_5720: 4385 case ASIC_REV_5762: 4386 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) 4387 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | 4388 MII_TG3_DSP_CH34TP2_HIBW01); 4389 } 4390 4391 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false); 4392 if (!err) 4393 err = err2; 4394 } 4395 4396 done: 4397 return err; 4398 } 4399 4400 static void tg3_phy_copper_begin(struct tg3 *tp) 4401 { 4402 if (tp->link_config.autoneg == AUTONEG_ENABLE || 4403 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4404 u32 adv, fc; 4405 4406 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4407 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4408 adv = ADVERTISED_10baseT_Half | 4409 ADVERTISED_10baseT_Full; 4410 if (tg3_flag(tp, WOL_SPEED_100MB)) 4411 adv |= ADVERTISED_100baseT_Half | 4412 ADVERTISED_100baseT_Full; 4413 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) { 4414 if (!(tp->phy_flags & 4415 TG3_PHYFLG_DISABLE_1G_HD_ADV)) 4416 adv |= ADVERTISED_1000baseT_Half; 4417 adv |= ADVERTISED_1000baseT_Full; 4418 } 4419 4420 fc = FLOW_CTRL_TX | FLOW_CTRL_RX; 4421 } else { 4422 adv = tp->link_config.advertising; 4423 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 4424 adv &= ~(ADVERTISED_1000baseT_Half | 4425 ADVERTISED_1000baseT_Full); 4426 4427 fc = tp->link_config.flowctrl; 4428 } 4429 4430 tg3_phy_autoneg_cfg(tp, adv, fc); 4431 4432 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4433 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4434 /* Normally during power down we want to autonegotiate 4435 * the lowest possible speed for WOL. However, to avoid 4436 * link flap, we leave it untouched. 4437 */ 4438 return; 4439 } 4440 4441 tg3_writephy(tp, MII_BMCR, 4442 BMCR_ANENABLE | BMCR_ANRESTART); 4443 } else { 4444 int i; 4445 u32 bmcr, orig_bmcr; 4446 4447 tp->link_config.active_speed = tp->link_config.speed; 4448 tp->link_config.active_duplex = tp->link_config.duplex; 4449 4450 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 4451 /* With autoneg disabled, 5715 only links up when the 4452 * advertisement register has the configured speed 4453 * enabled. 4454 */ 4455 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); 4456 } 4457 4458 bmcr = 0; 4459 switch (tp->link_config.speed) { 4460 default: 4461 case SPEED_10: 4462 break; 4463 4464 case SPEED_100: 4465 bmcr |= BMCR_SPEED100; 4466 break; 4467 4468 case SPEED_1000: 4469 bmcr |= BMCR_SPEED1000; 4470 break; 4471 } 4472 4473 if (tp->link_config.duplex == DUPLEX_FULL) 4474 bmcr |= BMCR_FULLDPLX; 4475 4476 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && 4477 (bmcr != orig_bmcr)) { 4478 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); 4479 for (i = 0; i < 1500; i++) { 4480 u32 tmp; 4481 4482 udelay(10); 4483 if (tg3_readphy(tp, MII_BMSR, &tmp) || 4484 tg3_readphy(tp, MII_BMSR, &tmp)) 4485 continue; 4486 if (!(tmp & BMSR_LSTATUS)) { 4487 udelay(40); 4488 break; 4489 } 4490 } 4491 tg3_writephy(tp, MII_BMCR, bmcr); 4492 udelay(40); 4493 } 4494 } 4495 } 4496 4497 static int tg3_phy_pull_config(struct tg3 *tp) 4498 { 4499 int err; 4500 u32 val; 4501 4502 err = tg3_readphy(tp, MII_BMCR, &val); 4503 if (err) 4504 goto done; 4505 4506 if (!(val & BMCR_ANENABLE)) { 4507 tp->link_config.autoneg = AUTONEG_DISABLE; 4508 tp->link_config.advertising = 0; 4509 tg3_flag_clear(tp, PAUSE_AUTONEG); 4510 4511 err = -EIO; 4512 4513 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) { 4514 case 0: 4515 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4516 goto done; 4517 4518 tp->link_config.speed = SPEED_10; 4519 break; 4520 case BMCR_SPEED100: 4521 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4522 goto done; 4523 4524 tp->link_config.speed = SPEED_100; 4525 break; 4526 case BMCR_SPEED1000: 4527 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4528 tp->link_config.speed = SPEED_1000; 4529 break; 4530 } 4531 /* Fall through */ 4532 default: 4533 goto done; 4534 } 4535 4536 if (val & BMCR_FULLDPLX) 4537 tp->link_config.duplex = DUPLEX_FULL; 4538 else 4539 tp->link_config.duplex = DUPLEX_HALF; 4540 4541 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; 4542 4543 err = 0; 4544 goto done; 4545 } 4546 4547 tp->link_config.autoneg = AUTONEG_ENABLE; 4548 tp->link_config.advertising = ADVERTISED_Autoneg; 4549 tg3_flag_set(tp, PAUSE_AUTONEG); 4550 4551 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4552 u32 adv; 4553 4554 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4555 if (err) 4556 goto done; 4557 4558 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL); 4559 tp->link_config.advertising |= adv | ADVERTISED_TP; 4560 4561 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val); 4562 } else { 4563 tp->link_config.advertising |= ADVERTISED_FIBRE; 4564 } 4565 4566 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4567 u32 adv; 4568 4569 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4570 err = tg3_readphy(tp, MII_CTRL1000, &val); 4571 if (err) 4572 goto done; 4573 4574 adv = mii_ctrl1000_to_ethtool_adv_t(val); 4575 } else { 4576 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4577 if (err) 4578 goto done; 4579 4580 adv = tg3_decode_flowctrl_1000X(val); 4581 tp->link_config.flowctrl = adv; 4582 4583 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL); 4584 adv = mii_adv_to_ethtool_adv_x(val); 4585 } 4586 4587 tp->link_config.advertising |= adv; 4588 } 4589 4590 done: 4591 return err; 4592 } 4593 4594 static int tg3_init_5401phy_dsp(struct tg3 *tp) 4595 { 4596 int err; 4597 4598 /* Turn off tap power management. */ 4599 /* Set Extended packet length bit */ 4600 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 4601 4602 err |= tg3_phydsp_write(tp, 0x0012, 0x1804); 4603 err |= tg3_phydsp_write(tp, 0x0013, 0x1204); 4604 err |= tg3_phydsp_write(tp, 0x8006, 0x0132); 4605 err |= tg3_phydsp_write(tp, 0x8006, 0x0232); 4606 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); 4607 4608 udelay(40); 4609 4610 return err; 4611 } 4612 4613 static bool tg3_phy_eee_config_ok(struct tg3 *tp) 4614 { 4615 struct ethtool_eee eee; 4616 4617 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4618 return true; 4619 4620 tg3_eee_pull_config(tp, &eee); 4621 4622 if (tp->eee.eee_enabled) { 4623 if (tp->eee.advertised != eee.advertised || 4624 tp->eee.tx_lpi_timer != eee.tx_lpi_timer || 4625 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) 4626 return false; 4627 } else { 4628 /* EEE is disabled but we're advertising */ 4629 if (eee.advertised) 4630 return false; 4631 } 4632 4633 return true; 4634 } 4635 4636 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) 4637 { 4638 u32 advmsk, tgtadv, advertising; 4639 4640 advertising = tp->link_config.advertising; 4641 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; 4642 4643 advmsk = ADVERTISE_ALL; 4644 if (tp->link_config.active_duplex == DUPLEX_FULL) { 4645 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); 4646 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4647 } 4648 4649 if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) 4650 return false; 4651 4652 if ((*lcladv & advmsk) != tgtadv) 4653 return false; 4654 4655 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4656 u32 tg3_ctrl; 4657 4658 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); 4659 4660 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) 4661 return false; 4662 4663 if (tgtadv && 4664 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4665 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) { 4666 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4667 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL | 4668 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 4669 } else { 4670 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); 4671 } 4672 4673 if (tg3_ctrl != tgtadv) 4674 return false; 4675 } 4676 4677 return true; 4678 } 4679 4680 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) 4681 { 4682 u32 lpeth = 0; 4683 4684 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4685 u32 val; 4686 4687 if (tg3_readphy(tp, MII_STAT1000, &val)) 4688 return false; 4689 4690 lpeth = mii_stat1000_to_ethtool_lpa_t(val); 4691 } 4692 4693 if (tg3_readphy(tp, MII_LPA, rmtadv)) 4694 return false; 4695 4696 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); 4697 tp->link_config.rmt_adv = lpeth; 4698 4699 return true; 4700 } 4701 4702 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up) 4703 { 4704 if (curr_link_up != tp->link_up) { 4705 if (curr_link_up) { 4706 netif_carrier_on(tp->dev); 4707 } else { 4708 netif_carrier_off(tp->dev); 4709 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4710 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 4711 } 4712 4713 tg3_link_report(tp); 4714 return true; 4715 } 4716 4717 return false; 4718 } 4719 4720 static void tg3_clear_mac_status(struct tg3 *tp) 4721 { 4722 tw32(MAC_EVENT, 0); 4723 4724 tw32_f(MAC_STATUS, 4725 MAC_STATUS_SYNC_CHANGED | 4726 MAC_STATUS_CFG_CHANGED | 4727 MAC_STATUS_MI_COMPLETION | 4728 MAC_STATUS_LNKSTATE_CHANGED); 4729 udelay(40); 4730 } 4731 4732 static void tg3_setup_eee(struct tg3 *tp) 4733 { 4734 u32 val; 4735 4736 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | 4737 TG3_CPMU_EEE_LNKIDL_UART_IDL; 4738 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 4739 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; 4740 4741 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); 4742 4743 tw32_f(TG3_CPMU_EEE_CTRL, 4744 TG3_CPMU_EEE_CTRL_EXIT_20_1_US); 4745 4746 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | 4747 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) | 4748 TG3_CPMU_EEEMD_LPI_IN_RX | 4749 TG3_CPMU_EEEMD_EEE_ENABLE; 4750 4751 if (tg3_asic_rev(tp) != ASIC_REV_5717) 4752 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; 4753 4754 if (tg3_flag(tp, ENABLE_APE)) 4755 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; 4756 4757 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0); 4758 4759 tw32_f(TG3_CPMU_EEE_DBTMR1, 4760 TG3_CPMU_DBTMR1_PCIEXIT_2047US | 4761 (tp->eee.tx_lpi_timer & 0xffff)); 4762 4763 tw32_f(TG3_CPMU_EEE_DBTMR2, 4764 TG3_CPMU_DBTMR2_APE_TX_2047US | 4765 TG3_CPMU_DBTMR2_TXIDXEQ_2047US); 4766 } 4767 4768 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) 4769 { 4770 bool current_link_up; 4771 u32 bmsr, val; 4772 u32 lcl_adv, rmt_adv; 4773 u16 current_speed; 4774 u8 current_duplex; 4775 int i, err; 4776 4777 tg3_clear_mac_status(tp); 4778 4779 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 4780 tw32_f(MAC_MI_MODE, 4781 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 4782 udelay(80); 4783 } 4784 4785 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); 4786 4787 /* Some third-party PHYs need to be reset on link going 4788 * down. 4789 */ 4790 if ((tg3_asic_rev(tp) == ASIC_REV_5703 || 4791 tg3_asic_rev(tp) == ASIC_REV_5704 || 4792 tg3_asic_rev(tp) == ASIC_REV_5705) && 4793 tp->link_up) { 4794 tg3_readphy(tp, MII_BMSR, &bmsr); 4795 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4796 !(bmsr & BMSR_LSTATUS)) 4797 force_reset = true; 4798 } 4799 if (force_reset) 4800 tg3_phy_reset(tp); 4801 4802 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 4803 tg3_readphy(tp, MII_BMSR, &bmsr); 4804 if (tg3_readphy(tp, MII_BMSR, &bmsr) || 4805 !tg3_flag(tp, INIT_COMPLETE)) 4806 bmsr = 0; 4807 4808 if (!(bmsr & BMSR_LSTATUS)) { 4809 err = tg3_init_5401phy_dsp(tp); 4810 if (err) 4811 return err; 4812 4813 tg3_readphy(tp, MII_BMSR, &bmsr); 4814 for (i = 0; i < 1000; i++) { 4815 udelay(10); 4816 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4817 (bmsr & BMSR_LSTATUS)) { 4818 udelay(40); 4819 break; 4820 } 4821 } 4822 4823 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == 4824 TG3_PHY_REV_BCM5401_B0 && 4825 !(bmsr & BMSR_LSTATUS) && 4826 tp->link_config.active_speed == SPEED_1000) { 4827 err = tg3_phy_reset(tp); 4828 if (!err) 4829 err = tg3_init_5401phy_dsp(tp); 4830 if (err) 4831 return err; 4832 } 4833 } 4834 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4835 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) { 4836 /* 5701 {A0,B0} CRC bug workaround */ 4837 tg3_writephy(tp, 0x15, 0x0a75); 4838 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4839 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 4840 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4841 } 4842 4843 /* Clear pending interrupts... */ 4844 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4845 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4846 4847 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) 4848 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); 4849 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) 4850 tg3_writephy(tp, MII_TG3_IMASK, ~0); 4851 4852 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4853 tg3_asic_rev(tp) == ASIC_REV_5701) { 4854 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) 4855 tg3_writephy(tp, MII_TG3_EXT_CTRL, 4856 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 4857 else 4858 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); 4859 } 4860 4861 current_link_up = false; 4862 current_speed = SPEED_UNKNOWN; 4863 current_duplex = DUPLEX_UNKNOWN; 4864 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; 4865 tp->link_config.rmt_adv = 0; 4866 4867 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { 4868 err = tg3_phy_auxctl_read(tp, 4869 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4870 &val); 4871 if (!err && !(val & (1 << 10))) { 4872 tg3_phy_auxctl_write(tp, 4873 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4874 val | (1 << 10)); 4875 goto relink; 4876 } 4877 } 4878 4879 bmsr = 0; 4880 for (i = 0; i < 100; i++) { 4881 tg3_readphy(tp, MII_BMSR, &bmsr); 4882 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4883 (bmsr & BMSR_LSTATUS)) 4884 break; 4885 udelay(40); 4886 } 4887 4888 if (bmsr & BMSR_LSTATUS) { 4889 u32 aux_stat, bmcr; 4890 4891 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); 4892 for (i = 0; i < 2000; i++) { 4893 udelay(10); 4894 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && 4895 aux_stat) 4896 break; 4897 } 4898 4899 tg3_aux_stat_to_speed_duplex(tp, aux_stat, 4900 ¤t_speed, 4901 ¤t_duplex); 4902 4903 bmcr = 0; 4904 for (i = 0; i < 200; i++) { 4905 tg3_readphy(tp, MII_BMCR, &bmcr); 4906 if (tg3_readphy(tp, MII_BMCR, &bmcr)) 4907 continue; 4908 if (bmcr && bmcr != 0x7fff) 4909 break; 4910 udelay(10); 4911 } 4912 4913 lcl_adv = 0; 4914 rmt_adv = 0; 4915 4916 tp->link_config.active_speed = current_speed; 4917 tp->link_config.active_duplex = current_duplex; 4918 4919 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 4920 bool eee_config_ok = tg3_phy_eee_config_ok(tp); 4921 4922 if ((bmcr & BMCR_ANENABLE) && 4923 eee_config_ok && 4924 tg3_phy_copper_an_config_ok(tp, &lcl_adv) && 4925 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) 4926 current_link_up = true; 4927 4928 /* EEE settings changes take effect only after a phy 4929 * reset. If we have skipped a reset due to Link Flap 4930 * Avoidance being enabled, do it now. 4931 */ 4932 if (!eee_config_ok && 4933 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 4934 !force_reset) { 4935 tg3_setup_eee(tp); 4936 tg3_phy_reset(tp); 4937 } 4938 } else { 4939 if (!(bmcr & BMCR_ANENABLE) && 4940 tp->link_config.speed == current_speed && 4941 tp->link_config.duplex == current_duplex) { 4942 current_link_up = true; 4943 } 4944 } 4945 4946 if (current_link_up && 4947 tp->link_config.active_duplex == DUPLEX_FULL) { 4948 u32 reg, bit; 4949 4950 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4951 reg = MII_TG3_FET_GEN_STAT; 4952 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; 4953 } else { 4954 reg = MII_TG3_EXT_STAT; 4955 bit = MII_TG3_EXT_STAT_MDIX; 4956 } 4957 4958 if (!tg3_readphy(tp, reg, &val) && (val & bit)) 4959 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; 4960 4961 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 4962 } 4963 } 4964 4965 relink: 4966 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4967 tg3_phy_copper_begin(tp); 4968 4969 if (tg3_flag(tp, ROBOSWITCH)) { 4970 current_link_up = true; 4971 /* FIXME: when BCM5325 switch is used use 100 MBit/s */ 4972 current_speed = SPEED_1000; 4973 current_duplex = DUPLEX_FULL; 4974 tp->link_config.active_speed = current_speed; 4975 tp->link_config.active_duplex = current_duplex; 4976 } 4977 4978 tg3_readphy(tp, MII_BMSR, &bmsr); 4979 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || 4980 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 4981 current_link_up = true; 4982 } 4983 4984 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 4985 if (current_link_up) { 4986 if (tp->link_config.active_speed == SPEED_100 || 4987 tp->link_config.active_speed == SPEED_10) 4988 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 4989 else 4990 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 4991 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) 4992 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 4993 else 4994 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 4995 4996 /* In order for the 5750 core in BCM4785 chip to work properly 4997 * in RGMII mode, the Led Control Register must be set up. 4998 */ 4999 if (tg3_flag(tp, RGMII_MODE)) { 5000 u32 led_ctrl = tr32(MAC_LED_CTRL); 5001 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON); 5002 5003 if (tp->link_config.active_speed == SPEED_10) 5004 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE; 5005 else if (tp->link_config.active_speed == SPEED_100) 5006 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5007 LED_CTRL_100MBPS_ON); 5008 else if (tp->link_config.active_speed == SPEED_1000) 5009 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5010 LED_CTRL_1000MBPS_ON); 5011 5012 tw32(MAC_LED_CTRL, led_ctrl); 5013 udelay(40); 5014 } 5015 5016 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5017 if (tp->link_config.active_duplex == DUPLEX_HALF) 5018 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5019 5020 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 5021 if (current_link_up && 5022 tg3_5700_link_polarity(tp, tp->link_config.active_speed)) 5023 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 5024 else 5025 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 5026 } 5027 5028 /* ??? Without this setting Netgear GA302T PHY does not 5029 * ??? send/receive packets... 5030 */ 5031 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && 5032 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) { 5033 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; 5034 tw32_f(MAC_MI_MODE, tp->mi_mode); 5035 udelay(80); 5036 } 5037 5038 tw32_f(MAC_MODE, tp->mac_mode); 5039 udelay(40); 5040 5041 tg3_phy_eee_adjust(tp, current_link_up); 5042 5043 if (tg3_flag(tp, USE_LINKCHG_REG)) { 5044 /* Polled via timer. */ 5045 tw32_f(MAC_EVENT, 0); 5046 } else { 5047 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5048 } 5049 udelay(40); 5050 5051 if (tg3_asic_rev(tp) == ASIC_REV_5700 && 5052 current_link_up && 5053 tp->link_config.active_speed == SPEED_1000 && 5054 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { 5055 udelay(120); 5056 tw32_f(MAC_STATUS, 5057 (MAC_STATUS_SYNC_CHANGED | 5058 MAC_STATUS_CFG_CHANGED)); 5059 udelay(40); 5060 tg3_write_mem(tp, 5061 NIC_SRAM_FIRMWARE_MBOX, 5062 NIC_SRAM_FIRMWARE_MBOX_MAGIC2); 5063 } 5064 5065 /* Prevent send BD corruption. */ 5066 if (tg3_flag(tp, CLKREQ_BUG)) { 5067 if (tp->link_config.active_speed == SPEED_100 || 5068 tp->link_config.active_speed == SPEED_10) 5069 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL, 5070 PCI_EXP_LNKCTL_CLKREQ_EN); 5071 else 5072 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 5073 PCI_EXP_LNKCTL_CLKREQ_EN); 5074 } 5075 5076 tg3_test_and_report_link_chg(tp, current_link_up); 5077 5078 return 0; 5079 } 5080 5081 struct tg3_fiber_aneginfo { 5082 int state; 5083 #define ANEG_STATE_UNKNOWN 0 5084 #define ANEG_STATE_AN_ENABLE 1 5085 #define ANEG_STATE_RESTART_INIT 2 5086 #define ANEG_STATE_RESTART 3 5087 #define ANEG_STATE_DISABLE_LINK_OK 4 5088 #define ANEG_STATE_ABILITY_DETECT_INIT 5 5089 #define ANEG_STATE_ABILITY_DETECT 6 5090 #define ANEG_STATE_ACK_DETECT_INIT 7 5091 #define ANEG_STATE_ACK_DETECT 8 5092 #define ANEG_STATE_COMPLETE_ACK_INIT 9 5093 #define ANEG_STATE_COMPLETE_ACK 10 5094 #define ANEG_STATE_IDLE_DETECT_INIT 11 5095 #define ANEG_STATE_IDLE_DETECT 12 5096 #define ANEG_STATE_LINK_OK 13 5097 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 5098 #define ANEG_STATE_NEXT_PAGE_WAIT 15 5099 5100 u32 flags; 5101 #define MR_AN_ENABLE 0x00000001 5102 #define MR_RESTART_AN 0x00000002 5103 #define MR_AN_COMPLETE 0x00000004 5104 #define MR_PAGE_RX 0x00000008 5105 #define MR_NP_LOADED 0x00000010 5106 #define MR_TOGGLE_TX 0x00000020 5107 #define MR_LP_ADV_FULL_DUPLEX 0x00000040 5108 #define MR_LP_ADV_HALF_DUPLEX 0x00000080 5109 #define MR_LP_ADV_SYM_PAUSE 0x00000100 5110 #define MR_LP_ADV_ASYM_PAUSE 0x00000200 5111 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400 5112 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800 5113 #define MR_LP_ADV_NEXT_PAGE 0x00001000 5114 #define MR_TOGGLE_RX 0x00002000 5115 #define MR_NP_RX 0x00004000 5116 5117 #define MR_LINK_OK 0x80000000 5118 5119 unsigned long link_time, cur_time; 5120 5121 u32 ability_match_cfg; 5122 int ability_match_count; 5123 5124 char ability_match, idle_match, ack_match; 5125 5126 u32 txconfig, rxconfig; 5127 #define ANEG_CFG_NP 0x00000080 5128 #define ANEG_CFG_ACK 0x00000040 5129 #define ANEG_CFG_RF2 0x00000020 5130 #define ANEG_CFG_RF1 0x00000010 5131 #define ANEG_CFG_PS2 0x00000001 5132 #define ANEG_CFG_PS1 0x00008000 5133 #define ANEG_CFG_HD 0x00004000 5134 #define ANEG_CFG_FD 0x00002000 5135 #define ANEG_CFG_INVAL 0x00001f06 5136 5137 }; 5138 #define ANEG_OK 0 5139 #define ANEG_DONE 1 5140 #define ANEG_TIMER_ENAB 2 5141 #define ANEG_FAILED -1 5142 5143 #define ANEG_STATE_SETTLE_TIME 10000 5144 5145 static int tg3_fiber_aneg_smachine(struct tg3 *tp, 5146 struct tg3_fiber_aneginfo *ap) 5147 { 5148 u16 flowctrl; 5149 unsigned long delta; 5150 u32 rx_cfg_reg; 5151 int ret; 5152 5153 if (ap->state == ANEG_STATE_UNKNOWN) { 5154 ap->rxconfig = 0; 5155 ap->link_time = 0; 5156 ap->cur_time = 0; 5157 ap->ability_match_cfg = 0; 5158 ap->ability_match_count = 0; 5159 ap->ability_match = 0; 5160 ap->idle_match = 0; 5161 ap->ack_match = 0; 5162 } 5163 ap->cur_time++; 5164 5165 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { 5166 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); 5167 5168 if (rx_cfg_reg != ap->ability_match_cfg) { 5169 ap->ability_match_cfg = rx_cfg_reg; 5170 ap->ability_match = 0; 5171 ap->ability_match_count = 0; 5172 } else { 5173 if (++ap->ability_match_count > 1) { 5174 ap->ability_match = 1; 5175 ap->ability_match_cfg = rx_cfg_reg; 5176 } 5177 } 5178 if (rx_cfg_reg & ANEG_CFG_ACK) 5179 ap->ack_match = 1; 5180 else 5181 ap->ack_match = 0; 5182 5183 ap->idle_match = 0; 5184 } else { 5185 ap->idle_match = 1; 5186 ap->ability_match_cfg = 0; 5187 ap->ability_match_count = 0; 5188 ap->ability_match = 0; 5189 ap->ack_match = 0; 5190 5191 rx_cfg_reg = 0; 5192 } 5193 5194 ap->rxconfig = rx_cfg_reg; 5195 ret = ANEG_OK; 5196 5197 switch (ap->state) { 5198 case ANEG_STATE_UNKNOWN: 5199 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) 5200 ap->state = ANEG_STATE_AN_ENABLE; 5201 5202 /* fallthru */ 5203 case ANEG_STATE_AN_ENABLE: 5204 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); 5205 if (ap->flags & MR_AN_ENABLE) { 5206 ap->link_time = 0; 5207 ap->cur_time = 0; 5208 ap->ability_match_cfg = 0; 5209 ap->ability_match_count = 0; 5210 ap->ability_match = 0; 5211 ap->idle_match = 0; 5212 ap->ack_match = 0; 5213 5214 ap->state = ANEG_STATE_RESTART_INIT; 5215 } else { 5216 ap->state = ANEG_STATE_DISABLE_LINK_OK; 5217 } 5218 break; 5219 5220 case ANEG_STATE_RESTART_INIT: 5221 ap->link_time = ap->cur_time; 5222 ap->flags &= ~(MR_NP_LOADED); 5223 ap->txconfig = 0; 5224 tw32(MAC_TX_AUTO_NEG, 0); 5225 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5226 tw32_f(MAC_MODE, tp->mac_mode); 5227 udelay(40); 5228 5229 ret = ANEG_TIMER_ENAB; 5230 ap->state = ANEG_STATE_RESTART; 5231 5232 /* fallthru */ 5233 case ANEG_STATE_RESTART: 5234 delta = ap->cur_time - ap->link_time; 5235 if (delta > ANEG_STATE_SETTLE_TIME) 5236 ap->state = ANEG_STATE_ABILITY_DETECT_INIT; 5237 else 5238 ret = ANEG_TIMER_ENAB; 5239 break; 5240 5241 case ANEG_STATE_DISABLE_LINK_OK: 5242 ret = ANEG_DONE; 5243 break; 5244 5245 case ANEG_STATE_ABILITY_DETECT_INIT: 5246 ap->flags &= ~(MR_TOGGLE_TX); 5247 ap->txconfig = ANEG_CFG_FD; 5248 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5249 if (flowctrl & ADVERTISE_1000XPAUSE) 5250 ap->txconfig |= ANEG_CFG_PS1; 5251 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5252 ap->txconfig |= ANEG_CFG_PS2; 5253 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5254 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5255 tw32_f(MAC_MODE, tp->mac_mode); 5256 udelay(40); 5257 5258 ap->state = ANEG_STATE_ABILITY_DETECT; 5259 break; 5260 5261 case ANEG_STATE_ABILITY_DETECT: 5262 if (ap->ability_match != 0 && ap->rxconfig != 0) 5263 ap->state = ANEG_STATE_ACK_DETECT_INIT; 5264 break; 5265 5266 case ANEG_STATE_ACK_DETECT_INIT: 5267 ap->txconfig |= ANEG_CFG_ACK; 5268 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5269 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5270 tw32_f(MAC_MODE, tp->mac_mode); 5271 udelay(40); 5272 5273 ap->state = ANEG_STATE_ACK_DETECT; 5274 5275 /* fallthru */ 5276 case ANEG_STATE_ACK_DETECT: 5277 if (ap->ack_match != 0) { 5278 if ((ap->rxconfig & ~ANEG_CFG_ACK) == 5279 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { 5280 ap->state = ANEG_STATE_COMPLETE_ACK_INIT; 5281 } else { 5282 ap->state = ANEG_STATE_AN_ENABLE; 5283 } 5284 } else if (ap->ability_match != 0 && 5285 ap->rxconfig == 0) { 5286 ap->state = ANEG_STATE_AN_ENABLE; 5287 } 5288 break; 5289 5290 case ANEG_STATE_COMPLETE_ACK_INIT: 5291 if (ap->rxconfig & ANEG_CFG_INVAL) { 5292 ret = ANEG_FAILED; 5293 break; 5294 } 5295 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | 5296 MR_LP_ADV_HALF_DUPLEX | 5297 MR_LP_ADV_SYM_PAUSE | 5298 MR_LP_ADV_ASYM_PAUSE | 5299 MR_LP_ADV_REMOTE_FAULT1 | 5300 MR_LP_ADV_REMOTE_FAULT2 | 5301 MR_LP_ADV_NEXT_PAGE | 5302 MR_TOGGLE_RX | 5303 MR_NP_RX); 5304 if (ap->rxconfig & ANEG_CFG_FD) 5305 ap->flags |= MR_LP_ADV_FULL_DUPLEX; 5306 if (ap->rxconfig & ANEG_CFG_HD) 5307 ap->flags |= MR_LP_ADV_HALF_DUPLEX; 5308 if (ap->rxconfig & ANEG_CFG_PS1) 5309 ap->flags |= MR_LP_ADV_SYM_PAUSE; 5310 if (ap->rxconfig & ANEG_CFG_PS2) 5311 ap->flags |= MR_LP_ADV_ASYM_PAUSE; 5312 if (ap->rxconfig & ANEG_CFG_RF1) 5313 ap->flags |= MR_LP_ADV_REMOTE_FAULT1; 5314 if (ap->rxconfig & ANEG_CFG_RF2) 5315 ap->flags |= MR_LP_ADV_REMOTE_FAULT2; 5316 if (ap->rxconfig & ANEG_CFG_NP) 5317 ap->flags |= MR_LP_ADV_NEXT_PAGE; 5318 5319 ap->link_time = ap->cur_time; 5320 5321 ap->flags ^= (MR_TOGGLE_TX); 5322 if (ap->rxconfig & 0x0008) 5323 ap->flags |= MR_TOGGLE_RX; 5324 if (ap->rxconfig & ANEG_CFG_NP) 5325 ap->flags |= MR_NP_RX; 5326 ap->flags |= MR_PAGE_RX; 5327 5328 ap->state = ANEG_STATE_COMPLETE_ACK; 5329 ret = ANEG_TIMER_ENAB; 5330 break; 5331 5332 case ANEG_STATE_COMPLETE_ACK: 5333 if (ap->ability_match != 0 && 5334 ap->rxconfig == 0) { 5335 ap->state = ANEG_STATE_AN_ENABLE; 5336 break; 5337 } 5338 delta = ap->cur_time - ap->link_time; 5339 if (delta > ANEG_STATE_SETTLE_TIME) { 5340 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { 5341 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5342 } else { 5343 if ((ap->txconfig & ANEG_CFG_NP) == 0 && 5344 !(ap->flags & MR_NP_RX)) { 5345 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5346 } else { 5347 ret = ANEG_FAILED; 5348 } 5349 } 5350 } 5351 break; 5352 5353 case ANEG_STATE_IDLE_DETECT_INIT: 5354 ap->link_time = ap->cur_time; 5355 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5356 tw32_f(MAC_MODE, tp->mac_mode); 5357 udelay(40); 5358 5359 ap->state = ANEG_STATE_IDLE_DETECT; 5360 ret = ANEG_TIMER_ENAB; 5361 break; 5362 5363 case ANEG_STATE_IDLE_DETECT: 5364 if (ap->ability_match != 0 && 5365 ap->rxconfig == 0) { 5366 ap->state = ANEG_STATE_AN_ENABLE; 5367 break; 5368 } 5369 delta = ap->cur_time - ap->link_time; 5370 if (delta > ANEG_STATE_SETTLE_TIME) { 5371 /* XXX another gem from the Broadcom driver :( */ 5372 ap->state = ANEG_STATE_LINK_OK; 5373 } 5374 break; 5375 5376 case ANEG_STATE_LINK_OK: 5377 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); 5378 ret = ANEG_DONE; 5379 break; 5380 5381 case ANEG_STATE_NEXT_PAGE_WAIT_INIT: 5382 /* ??? unimplemented */ 5383 break; 5384 5385 case ANEG_STATE_NEXT_PAGE_WAIT: 5386 /* ??? unimplemented */ 5387 break; 5388 5389 default: 5390 ret = ANEG_FAILED; 5391 break; 5392 } 5393 5394 return ret; 5395 } 5396 5397 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) 5398 { 5399 int res = 0; 5400 struct tg3_fiber_aneginfo aninfo; 5401 int status = ANEG_FAILED; 5402 unsigned int tick; 5403 u32 tmp; 5404 5405 tw32_f(MAC_TX_AUTO_NEG, 0); 5406 5407 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 5408 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); 5409 udelay(40); 5410 5411 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); 5412 udelay(40); 5413 5414 memset(&aninfo, 0, sizeof(aninfo)); 5415 aninfo.flags |= MR_AN_ENABLE; 5416 aninfo.state = ANEG_STATE_UNKNOWN; 5417 aninfo.cur_time = 0; 5418 tick = 0; 5419 while (++tick < 195000) { 5420 status = tg3_fiber_aneg_smachine(tp, &aninfo); 5421 if (status == ANEG_DONE || status == ANEG_FAILED) 5422 break; 5423 5424 udelay(1); 5425 } 5426 5427 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5428 tw32_f(MAC_MODE, tp->mac_mode); 5429 udelay(40); 5430 5431 *txflags = aninfo.txconfig; 5432 *rxflags = aninfo.flags; 5433 5434 if (status == ANEG_DONE && 5435 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | 5436 MR_LP_ADV_FULL_DUPLEX))) 5437 res = 1; 5438 5439 return res; 5440 } 5441 5442 static void tg3_init_bcm8002(struct tg3 *tp) 5443 { 5444 u32 mac_status = tr32(MAC_STATUS); 5445 int i; 5446 5447 /* Reset when initting first time or we have a link. */ 5448 if (tg3_flag(tp, INIT_COMPLETE) && 5449 !(mac_status & MAC_STATUS_PCS_SYNCED)) 5450 return; 5451 5452 /* Set PLL lock range. */ 5453 tg3_writephy(tp, 0x16, 0x8007); 5454 5455 /* SW reset */ 5456 tg3_writephy(tp, MII_BMCR, BMCR_RESET); 5457 5458 /* Wait for reset to complete. */ 5459 /* XXX schedule_timeout() ... */ 5460 for (i = 0; i < 500; i++) 5461 udelay(10); 5462 5463 /* Config mode; select PMA/Ch 1 regs. */ 5464 tg3_writephy(tp, 0x10, 0x8411); 5465 5466 /* Enable auto-lock and comdet, select txclk for tx. */ 5467 tg3_writephy(tp, 0x11, 0x0a10); 5468 5469 tg3_writephy(tp, 0x18, 0x00a0); 5470 tg3_writephy(tp, 0x16, 0x41ff); 5471 5472 /* Assert and deassert POR. */ 5473 tg3_writephy(tp, 0x13, 0x0400); 5474 udelay(40); 5475 tg3_writephy(tp, 0x13, 0x0000); 5476 5477 tg3_writephy(tp, 0x11, 0x0a50); 5478 udelay(40); 5479 tg3_writephy(tp, 0x11, 0x0a10); 5480 5481 /* Wait for signal to stabilize */ 5482 /* XXX schedule_timeout() ... */ 5483 for (i = 0; i < 15000; i++) 5484 udelay(10); 5485 5486 /* Deselect the channel register so we can read the PHYID 5487 * later. 5488 */ 5489 tg3_writephy(tp, 0x10, 0x8011); 5490 } 5491 5492 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) 5493 { 5494 u16 flowctrl; 5495 bool current_link_up; 5496 u32 sg_dig_ctrl, sg_dig_status; 5497 u32 serdes_cfg, expected_sg_dig_ctrl; 5498 int workaround, port_a; 5499 5500 serdes_cfg = 0; 5501 expected_sg_dig_ctrl = 0; 5502 workaround = 0; 5503 port_a = 1; 5504 current_link_up = false; 5505 5506 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 && 5507 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) { 5508 workaround = 1; 5509 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 5510 port_a = 0; 5511 5512 /* preserve bits 0-11,13,14 for signal pre-emphasis */ 5513 /* preserve bits 20-23 for voltage regulator */ 5514 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; 5515 } 5516 5517 sg_dig_ctrl = tr32(SG_DIG_CTRL); 5518 5519 if (tp->link_config.autoneg != AUTONEG_ENABLE) { 5520 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { 5521 if (workaround) { 5522 u32 val = serdes_cfg; 5523 5524 if (port_a) 5525 val |= 0xc010000; 5526 else 5527 val |= 0x4010000; 5528 tw32_f(MAC_SERDES_CFG, val); 5529 } 5530 5531 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5532 } 5533 if (mac_status & MAC_STATUS_PCS_SYNCED) { 5534 tg3_setup_flow_control(tp, 0, 0); 5535 current_link_up = true; 5536 } 5537 goto out; 5538 } 5539 5540 /* Want auto-negotiation. */ 5541 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; 5542 5543 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5544 if (flowctrl & ADVERTISE_1000XPAUSE) 5545 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; 5546 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5547 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; 5548 5549 if (sg_dig_ctrl != expected_sg_dig_ctrl) { 5550 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && 5551 tp->serdes_counter && 5552 ((mac_status & (MAC_STATUS_PCS_SYNCED | 5553 MAC_STATUS_RCVD_CFG)) == 5554 MAC_STATUS_PCS_SYNCED)) { 5555 tp->serdes_counter--; 5556 current_link_up = true; 5557 goto out; 5558 } 5559 restart_autoneg: 5560 if (workaround) 5561 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); 5562 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); 5563 udelay(5); 5564 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); 5565 5566 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5567 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5568 } else if (mac_status & (MAC_STATUS_PCS_SYNCED | 5569 MAC_STATUS_SIGNAL_DET)) { 5570 sg_dig_status = tr32(SG_DIG_STATUS); 5571 mac_status = tr32(MAC_STATUS); 5572 5573 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && 5574 (mac_status & MAC_STATUS_PCS_SYNCED)) { 5575 u32 local_adv = 0, remote_adv = 0; 5576 5577 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) 5578 local_adv |= ADVERTISE_1000XPAUSE; 5579 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) 5580 local_adv |= ADVERTISE_1000XPSE_ASYM; 5581 5582 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) 5583 remote_adv |= LPA_1000XPAUSE; 5584 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) 5585 remote_adv |= LPA_1000XPAUSE_ASYM; 5586 5587 tp->link_config.rmt_adv = 5588 mii_adv_to_ethtool_adv_x(remote_adv); 5589 5590 tg3_setup_flow_control(tp, local_adv, remote_adv); 5591 current_link_up = true; 5592 tp->serdes_counter = 0; 5593 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5594 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { 5595 if (tp->serdes_counter) 5596 tp->serdes_counter--; 5597 else { 5598 if (workaround) { 5599 u32 val = serdes_cfg; 5600 5601 if (port_a) 5602 val |= 0xc010000; 5603 else 5604 val |= 0x4010000; 5605 5606 tw32_f(MAC_SERDES_CFG, val); 5607 } 5608 5609 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5610 udelay(40); 5611 5612 /* Link parallel detection - link is up */ 5613 /* only if we have PCS_SYNC and not */ 5614 /* receiving config code words */ 5615 mac_status = tr32(MAC_STATUS); 5616 if ((mac_status & MAC_STATUS_PCS_SYNCED) && 5617 !(mac_status & MAC_STATUS_RCVD_CFG)) { 5618 tg3_setup_flow_control(tp, 0, 0); 5619 current_link_up = true; 5620 tp->phy_flags |= 5621 TG3_PHYFLG_PARALLEL_DETECT; 5622 tp->serdes_counter = 5623 SERDES_PARALLEL_DET_TIMEOUT; 5624 } else 5625 goto restart_autoneg; 5626 } 5627 } 5628 } else { 5629 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5630 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5631 } 5632 5633 out: 5634 return current_link_up; 5635 } 5636 5637 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) 5638 { 5639 bool current_link_up = false; 5640 5641 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) 5642 goto out; 5643 5644 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5645 u32 txflags, rxflags; 5646 int i; 5647 5648 if (fiber_autoneg(tp, &txflags, &rxflags)) { 5649 u32 local_adv = 0, remote_adv = 0; 5650 5651 if (txflags & ANEG_CFG_PS1) 5652 local_adv |= ADVERTISE_1000XPAUSE; 5653 if (txflags & ANEG_CFG_PS2) 5654 local_adv |= ADVERTISE_1000XPSE_ASYM; 5655 5656 if (rxflags & MR_LP_ADV_SYM_PAUSE) 5657 remote_adv |= LPA_1000XPAUSE; 5658 if (rxflags & MR_LP_ADV_ASYM_PAUSE) 5659 remote_adv |= LPA_1000XPAUSE_ASYM; 5660 5661 tp->link_config.rmt_adv = 5662 mii_adv_to_ethtool_adv_x(remote_adv); 5663 5664 tg3_setup_flow_control(tp, local_adv, remote_adv); 5665 5666 current_link_up = true; 5667 } 5668 for (i = 0; i < 30; i++) { 5669 udelay(20); 5670 tw32_f(MAC_STATUS, 5671 (MAC_STATUS_SYNC_CHANGED | 5672 MAC_STATUS_CFG_CHANGED)); 5673 udelay(40); 5674 if ((tr32(MAC_STATUS) & 5675 (MAC_STATUS_SYNC_CHANGED | 5676 MAC_STATUS_CFG_CHANGED)) == 0) 5677 break; 5678 } 5679 5680 mac_status = tr32(MAC_STATUS); 5681 if (!current_link_up && 5682 (mac_status & MAC_STATUS_PCS_SYNCED) && 5683 !(mac_status & MAC_STATUS_RCVD_CFG)) 5684 current_link_up = true; 5685 } else { 5686 tg3_setup_flow_control(tp, 0, 0); 5687 5688 /* Forcing 1000FD link up. */ 5689 current_link_up = true; 5690 5691 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); 5692 udelay(40); 5693 5694 tw32_f(MAC_MODE, tp->mac_mode); 5695 udelay(40); 5696 } 5697 5698 out: 5699 return current_link_up; 5700 } 5701 5702 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) 5703 { 5704 u32 orig_pause_cfg; 5705 u16 orig_active_speed; 5706 u8 orig_active_duplex; 5707 u32 mac_status; 5708 bool current_link_up; 5709 int i; 5710 5711 orig_pause_cfg = tp->link_config.active_flowctrl; 5712 orig_active_speed = tp->link_config.active_speed; 5713 orig_active_duplex = tp->link_config.active_duplex; 5714 5715 if (!tg3_flag(tp, HW_AUTONEG) && 5716 tp->link_up && 5717 tg3_flag(tp, INIT_COMPLETE)) { 5718 mac_status = tr32(MAC_STATUS); 5719 mac_status &= (MAC_STATUS_PCS_SYNCED | 5720 MAC_STATUS_SIGNAL_DET | 5721 MAC_STATUS_CFG_CHANGED | 5722 MAC_STATUS_RCVD_CFG); 5723 if (mac_status == (MAC_STATUS_PCS_SYNCED | 5724 MAC_STATUS_SIGNAL_DET)) { 5725 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5726 MAC_STATUS_CFG_CHANGED)); 5727 return 0; 5728 } 5729 } 5730 5731 tw32_f(MAC_TX_AUTO_NEG, 0); 5732 5733 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 5734 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; 5735 tw32_f(MAC_MODE, tp->mac_mode); 5736 udelay(40); 5737 5738 if (tp->phy_id == TG3_PHY_ID_BCM8002) 5739 tg3_init_bcm8002(tp); 5740 5741 /* Enable link change event even when serdes polling. */ 5742 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5743 udelay(40); 5744 5745 current_link_up = false; 5746 tp->link_config.rmt_adv = 0; 5747 mac_status = tr32(MAC_STATUS); 5748 5749 if (tg3_flag(tp, HW_AUTONEG)) 5750 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); 5751 else 5752 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); 5753 5754 tp->napi[0].hw_status->status = 5755 (SD_STATUS_UPDATED | 5756 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); 5757 5758 for (i = 0; i < 100; i++) { 5759 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5760 MAC_STATUS_CFG_CHANGED)); 5761 udelay(5); 5762 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | 5763 MAC_STATUS_CFG_CHANGED | 5764 MAC_STATUS_LNKSTATE_CHANGED)) == 0) 5765 break; 5766 } 5767 5768 mac_status = tr32(MAC_STATUS); 5769 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { 5770 current_link_up = false; 5771 if (tp->link_config.autoneg == AUTONEG_ENABLE && 5772 tp->serdes_counter == 0) { 5773 tw32_f(MAC_MODE, (tp->mac_mode | 5774 MAC_MODE_SEND_CONFIGS)); 5775 udelay(1); 5776 tw32_f(MAC_MODE, tp->mac_mode); 5777 } 5778 } 5779 5780 if (current_link_up) { 5781 tp->link_config.active_speed = SPEED_1000; 5782 tp->link_config.active_duplex = DUPLEX_FULL; 5783 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5784 LED_CTRL_LNKLED_OVERRIDE | 5785 LED_CTRL_1000MBPS_ON)); 5786 } else { 5787 tp->link_config.active_speed = SPEED_UNKNOWN; 5788 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 5789 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5790 LED_CTRL_LNKLED_OVERRIDE | 5791 LED_CTRL_TRAFFIC_OVERRIDE)); 5792 } 5793 5794 if (!tg3_test_and_report_link_chg(tp, current_link_up)) { 5795 u32 now_pause_cfg = tp->link_config.active_flowctrl; 5796 if (orig_pause_cfg != now_pause_cfg || 5797 orig_active_speed != tp->link_config.active_speed || 5798 orig_active_duplex != tp->link_config.active_duplex) 5799 tg3_link_report(tp); 5800 } 5801 5802 return 0; 5803 } 5804 5805 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) 5806 { 5807 int err = 0; 5808 u32 bmsr, bmcr; 5809 u16 current_speed = SPEED_UNKNOWN; 5810 u8 current_duplex = DUPLEX_UNKNOWN; 5811 bool current_link_up = false; 5812 u32 local_adv, remote_adv, sgsr; 5813 5814 if ((tg3_asic_rev(tp) == ASIC_REV_5719 || 5815 tg3_asic_rev(tp) == ASIC_REV_5720) && 5816 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) && 5817 (sgsr & SERDES_TG3_SGMII_MODE)) { 5818 5819 if (force_reset) 5820 tg3_phy_reset(tp); 5821 5822 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 5823 5824 if (!(sgsr & SERDES_TG3_LINK_UP)) { 5825 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5826 } else { 5827 current_link_up = true; 5828 if (sgsr & SERDES_TG3_SPEED_1000) { 5829 current_speed = SPEED_1000; 5830 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5831 } else if (sgsr & SERDES_TG3_SPEED_100) { 5832 current_speed = SPEED_100; 5833 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5834 } else { 5835 current_speed = SPEED_10; 5836 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5837 } 5838 5839 if (sgsr & SERDES_TG3_FULL_DUPLEX) 5840 current_duplex = DUPLEX_FULL; 5841 else 5842 current_duplex = DUPLEX_HALF; 5843 } 5844 5845 tw32_f(MAC_MODE, tp->mac_mode); 5846 udelay(40); 5847 5848 tg3_clear_mac_status(tp); 5849 5850 goto fiber_setup_done; 5851 } 5852 5853 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5854 tw32_f(MAC_MODE, tp->mac_mode); 5855 udelay(40); 5856 5857 tg3_clear_mac_status(tp); 5858 5859 if (force_reset) 5860 tg3_phy_reset(tp); 5861 5862 tp->link_config.rmt_adv = 0; 5863 5864 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5865 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5866 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5867 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5868 bmsr |= BMSR_LSTATUS; 5869 else 5870 bmsr &= ~BMSR_LSTATUS; 5871 } 5872 5873 err |= tg3_readphy(tp, MII_BMCR, &bmcr); 5874 5875 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && 5876 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 5877 /* do nothing, just check for link up at the end */ 5878 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5879 u32 adv, newadv; 5880 5881 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5882 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | 5883 ADVERTISE_1000XPAUSE | 5884 ADVERTISE_1000XPSE_ASYM | 5885 ADVERTISE_SLCT); 5886 5887 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5888 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); 5889 5890 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { 5891 tg3_writephy(tp, MII_ADVERTISE, newadv); 5892 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; 5893 tg3_writephy(tp, MII_BMCR, bmcr); 5894 5895 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5896 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; 5897 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5898 5899 return err; 5900 } 5901 } else { 5902 u32 new_bmcr; 5903 5904 bmcr &= ~BMCR_SPEED1000; 5905 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); 5906 5907 if (tp->link_config.duplex == DUPLEX_FULL) 5908 new_bmcr |= BMCR_FULLDPLX; 5909 5910 if (new_bmcr != bmcr) { 5911 /* BMCR_SPEED1000 is a reserved bit that needs 5912 * to be set on write. 5913 */ 5914 new_bmcr |= BMCR_SPEED1000; 5915 5916 /* Force a linkdown */ 5917 if (tp->link_up) { 5918 u32 adv; 5919 5920 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5921 adv &= ~(ADVERTISE_1000XFULL | 5922 ADVERTISE_1000XHALF | 5923 ADVERTISE_SLCT); 5924 tg3_writephy(tp, MII_ADVERTISE, adv); 5925 tg3_writephy(tp, MII_BMCR, bmcr | 5926 BMCR_ANRESTART | 5927 BMCR_ANENABLE); 5928 udelay(10); 5929 tg3_carrier_off(tp); 5930 } 5931 tg3_writephy(tp, MII_BMCR, new_bmcr); 5932 bmcr = new_bmcr; 5933 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5934 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5935 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5936 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5937 bmsr |= BMSR_LSTATUS; 5938 else 5939 bmsr &= ~BMSR_LSTATUS; 5940 } 5941 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5942 } 5943 } 5944 5945 if (bmsr & BMSR_LSTATUS) { 5946 current_speed = SPEED_1000; 5947 current_link_up = true; 5948 if (bmcr & BMCR_FULLDPLX) 5949 current_duplex = DUPLEX_FULL; 5950 else 5951 current_duplex = DUPLEX_HALF; 5952 5953 local_adv = 0; 5954 remote_adv = 0; 5955 5956 if (bmcr & BMCR_ANENABLE) { 5957 u32 common; 5958 5959 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); 5960 err |= tg3_readphy(tp, MII_LPA, &remote_adv); 5961 common = local_adv & remote_adv; 5962 if (common & (ADVERTISE_1000XHALF | 5963 ADVERTISE_1000XFULL)) { 5964 if (common & ADVERTISE_1000XFULL) 5965 current_duplex = DUPLEX_FULL; 5966 else 5967 current_duplex = DUPLEX_HALF; 5968 5969 tp->link_config.rmt_adv = 5970 mii_adv_to_ethtool_adv_x(remote_adv); 5971 } else if (!tg3_flag(tp, 5780_CLASS)) { 5972 /* Link is up via parallel detect */ 5973 } else { 5974 current_link_up = false; 5975 } 5976 } 5977 } 5978 5979 fiber_setup_done: 5980 if (current_link_up && current_duplex == DUPLEX_FULL) 5981 tg3_setup_flow_control(tp, local_adv, remote_adv); 5982 5983 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5984 if (tp->link_config.active_duplex == DUPLEX_HALF) 5985 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5986 5987 tw32_f(MAC_MODE, tp->mac_mode); 5988 udelay(40); 5989 5990 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5991 5992 tp->link_config.active_speed = current_speed; 5993 tp->link_config.active_duplex = current_duplex; 5994 5995 tg3_test_and_report_link_chg(tp, current_link_up); 5996 return err; 5997 } 5998 5999 static void tg3_serdes_parallel_detect(struct tg3 *tp) 6000 { 6001 if (tp->serdes_counter) { 6002 /* Give autoneg time to complete. */ 6003 tp->serdes_counter--; 6004 return; 6005 } 6006 6007 if (!tp->link_up && 6008 (tp->link_config.autoneg == AUTONEG_ENABLE)) { 6009 u32 bmcr; 6010 6011 tg3_readphy(tp, MII_BMCR, &bmcr); 6012 if (bmcr & BMCR_ANENABLE) { 6013 u32 phy1, phy2; 6014 6015 /* Select shadow register 0x1f */ 6016 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); 6017 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); 6018 6019 /* Select expansion interrupt status register */ 6020 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6021 MII_TG3_DSP_EXP1_INT_STAT); 6022 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6023 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6024 6025 if ((phy1 & 0x10) && !(phy2 & 0x20)) { 6026 /* We have signal detect and not receiving 6027 * config code words, link is up by parallel 6028 * detection. 6029 */ 6030 6031 bmcr &= ~BMCR_ANENABLE; 6032 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; 6033 tg3_writephy(tp, MII_BMCR, bmcr); 6034 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; 6035 } 6036 } 6037 } else if (tp->link_up && 6038 (tp->link_config.autoneg == AUTONEG_ENABLE) && 6039 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 6040 u32 phy2; 6041 6042 /* Select expansion interrupt status register */ 6043 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6044 MII_TG3_DSP_EXP1_INT_STAT); 6045 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6046 if (phy2 & 0x20) { 6047 u32 bmcr; 6048 6049 /* Config code words received, turn on autoneg. */ 6050 tg3_readphy(tp, MII_BMCR, &bmcr); 6051 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); 6052 6053 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 6054 6055 } 6056 } 6057 } 6058 6059 static int tg3_setup_phy(struct tg3 *tp, bool force_reset) 6060 { 6061 u32 val; 6062 int err; 6063 6064 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 6065 err = tg3_setup_fiber_phy(tp, force_reset); 6066 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 6067 err = tg3_setup_fiber_mii_phy(tp, force_reset); 6068 else 6069 err = tg3_setup_copper_phy(tp, force_reset); 6070 6071 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 6072 u32 scale; 6073 6074 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; 6075 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) 6076 scale = 65; 6077 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) 6078 scale = 6; 6079 else 6080 scale = 12; 6081 6082 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; 6083 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); 6084 tw32(GRC_MISC_CFG, val); 6085 } 6086 6087 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 6088 (6 << TX_LENGTHS_IPG_SHIFT); 6089 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 6090 tg3_asic_rev(tp) == ASIC_REV_5762) 6091 val |= tr32(MAC_TX_LENGTHS) & 6092 (TX_LENGTHS_JMB_FRM_LEN_MSK | 6093 TX_LENGTHS_CNT_DWN_VAL_MSK); 6094 6095 if (tp->link_config.active_speed == SPEED_1000 && 6096 tp->link_config.active_duplex == DUPLEX_HALF) 6097 tw32(MAC_TX_LENGTHS, val | 6098 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); 6099 else 6100 tw32(MAC_TX_LENGTHS, val | 6101 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); 6102 6103 if (!tg3_flag(tp, 5705_PLUS)) { 6104 if (tp->link_up) { 6105 tw32(HOSTCC_STAT_COAL_TICKS, 6106 tp->coal.stats_block_coalesce_usecs); 6107 } else { 6108 tw32(HOSTCC_STAT_COAL_TICKS, 0); 6109 } 6110 } 6111 6112 if (tg3_flag(tp, ASPM_WORKAROUND)) { 6113 val = tr32(PCIE_PWR_MGMT_THRESH); 6114 if (!tp->link_up) 6115 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | 6116 tp->pwrmgmt_thresh; 6117 else 6118 val |= PCIE_PWR_MGMT_L1_THRESH_MSK; 6119 tw32(PCIE_PWR_MGMT_THRESH, val); 6120 } 6121 6122 return err; 6123 } 6124 6125 /* tp->lock must be held */ 6126 static u64 tg3_refclk_read(struct tg3 *tp) 6127 { 6128 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB); 6129 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32; 6130 } 6131 6132 /* tp->lock must be held */ 6133 static void tg3_refclk_write(struct tg3 *tp, u64 newval) 6134 { 6135 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6136 6137 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP); 6138 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff); 6139 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32); 6140 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME); 6141 } 6142 6143 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); 6144 static inline void tg3_full_unlock(struct tg3 *tp); 6145 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) 6146 { 6147 struct tg3 *tp = netdev_priv(dev); 6148 6149 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 6150 SOF_TIMESTAMPING_RX_SOFTWARE | 6151 SOF_TIMESTAMPING_SOFTWARE; 6152 6153 if (tg3_flag(tp, PTP_CAPABLE)) { 6154 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 6155 SOF_TIMESTAMPING_RX_HARDWARE | 6156 SOF_TIMESTAMPING_RAW_HARDWARE; 6157 } 6158 6159 if (tp->ptp_clock) 6160 info->phc_index = ptp_clock_index(tp->ptp_clock); 6161 else 6162 info->phc_index = -1; 6163 6164 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 6165 6166 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 6167 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 6168 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 6169 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 6170 return 0; 6171 } 6172 6173 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 6174 { 6175 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6176 bool neg_adj = false; 6177 u32 correction = 0; 6178 6179 if (ppb < 0) { 6180 neg_adj = true; 6181 ppb = -ppb; 6182 } 6183 6184 /* Frequency adjustment is performed using hardware with a 24 bit 6185 * accumulator and a programmable correction value. On each clk, the 6186 * correction value gets added to the accumulator and when it 6187 * overflows, the time counter is incremented/decremented. 6188 * 6189 * So conversion from ppb to correction value is 6190 * ppb * (1 << 24) / 1000000000 6191 */ 6192 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) & 6193 TG3_EAV_REF_CLK_CORRECT_MASK; 6194 6195 tg3_full_lock(tp, 0); 6196 6197 if (correction) 6198 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 6199 TG3_EAV_REF_CLK_CORRECT_EN | 6200 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction); 6201 else 6202 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0); 6203 6204 tg3_full_unlock(tp); 6205 6206 return 0; 6207 } 6208 6209 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 6210 { 6211 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6212 6213 tg3_full_lock(tp, 0); 6214 tp->ptp_adjust += delta; 6215 tg3_full_unlock(tp); 6216 6217 return 0; 6218 } 6219 6220 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) 6221 { 6222 u64 ns; 6223 u32 remainder; 6224 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6225 6226 tg3_full_lock(tp, 0); 6227 ns = tg3_refclk_read(tp); 6228 ns += tp->ptp_adjust; 6229 tg3_full_unlock(tp); 6230 6231 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); 6232 ts->tv_nsec = remainder; 6233 6234 return 0; 6235 } 6236 6237 static int tg3_ptp_settime(struct ptp_clock_info *ptp, 6238 const struct timespec *ts) 6239 { 6240 u64 ns; 6241 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6242 6243 ns = timespec_to_ns(ts); 6244 6245 tg3_full_lock(tp, 0); 6246 tg3_refclk_write(tp, ns); 6247 tp->ptp_adjust = 0; 6248 tg3_full_unlock(tp); 6249 6250 return 0; 6251 } 6252 6253 static int tg3_ptp_enable(struct ptp_clock_info *ptp, 6254 struct ptp_clock_request *rq, int on) 6255 { 6256 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6257 u32 clock_ctl; 6258 int rval = 0; 6259 6260 switch (rq->type) { 6261 case PTP_CLK_REQ_PEROUT: 6262 if (rq->perout.index != 0) 6263 return -EINVAL; 6264 6265 tg3_full_lock(tp, 0); 6266 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6267 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK; 6268 6269 if (on) { 6270 u64 nsec; 6271 6272 nsec = rq->perout.start.sec * 1000000000ULL + 6273 rq->perout.start.nsec; 6274 6275 if (rq->perout.period.sec || rq->perout.period.nsec) { 6276 netdev_warn(tp->dev, 6277 "Device supports only a one-shot timesync output, period must be 0\n"); 6278 rval = -EINVAL; 6279 goto err_out; 6280 } 6281 6282 if (nsec & (1ULL << 63)) { 6283 netdev_warn(tp->dev, 6284 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n"); 6285 rval = -EINVAL; 6286 goto err_out; 6287 } 6288 6289 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff)); 6290 tw32(TG3_EAV_WATCHDOG0_MSB, 6291 TG3_EAV_WATCHDOG0_EN | 6292 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK)); 6293 6294 tw32(TG3_EAV_REF_CLCK_CTL, 6295 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0); 6296 } else { 6297 tw32(TG3_EAV_WATCHDOG0_MSB, 0); 6298 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl); 6299 } 6300 6301 err_out: 6302 tg3_full_unlock(tp); 6303 return rval; 6304 6305 default: 6306 break; 6307 } 6308 6309 return -EOPNOTSUPP; 6310 } 6311 6312 static const struct ptp_clock_info tg3_ptp_caps = { 6313 .owner = THIS_MODULE, 6314 .name = "tg3 clock", 6315 .max_adj = 250000000, 6316 .n_alarm = 0, 6317 .n_ext_ts = 0, 6318 .n_per_out = 1, 6319 .n_pins = 0, 6320 .pps = 0, 6321 .adjfreq = tg3_ptp_adjfreq, 6322 .adjtime = tg3_ptp_adjtime, 6323 .gettime = tg3_ptp_gettime, 6324 .settime = tg3_ptp_settime, 6325 .enable = tg3_ptp_enable, 6326 }; 6327 6328 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, 6329 struct skb_shared_hwtstamps *timestamp) 6330 { 6331 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); 6332 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + 6333 tp->ptp_adjust); 6334 } 6335 6336 /* tp->lock must be held */ 6337 static void tg3_ptp_init(struct tg3 *tp) 6338 { 6339 if (!tg3_flag(tp, PTP_CAPABLE)) 6340 return; 6341 6342 /* Initialize the hardware clock to the system time. */ 6343 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real())); 6344 tp->ptp_adjust = 0; 6345 tp->ptp_info = tg3_ptp_caps; 6346 } 6347 6348 /* tp->lock must be held */ 6349 static void tg3_ptp_resume(struct tg3 *tp) 6350 { 6351 if (!tg3_flag(tp, PTP_CAPABLE)) 6352 return; 6353 6354 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust); 6355 tp->ptp_adjust = 0; 6356 } 6357 6358 static void tg3_ptp_fini(struct tg3 *tp) 6359 { 6360 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock) 6361 return; 6362 6363 ptp_clock_unregister(tp->ptp_clock); 6364 tp->ptp_clock = NULL; 6365 tp->ptp_adjust = 0; 6366 } 6367 6368 static inline int tg3_irq_sync(struct tg3 *tp) 6369 { 6370 return tp->irq_sync; 6371 } 6372 6373 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) 6374 { 6375 int i; 6376 6377 dst = (u32 *)((u8 *)dst + off); 6378 for (i = 0; i < len; i += sizeof(u32)) 6379 *dst++ = tr32(off + i); 6380 } 6381 6382 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) 6383 { 6384 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); 6385 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); 6386 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); 6387 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); 6388 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); 6389 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); 6390 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); 6391 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); 6392 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); 6393 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); 6394 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); 6395 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); 6396 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); 6397 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); 6398 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); 6399 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); 6400 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); 6401 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); 6402 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); 6403 6404 if (tg3_flag(tp, SUPPORT_MSIX)) 6405 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); 6406 6407 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); 6408 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); 6409 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); 6410 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); 6411 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); 6412 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); 6413 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); 6414 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); 6415 6416 if (!tg3_flag(tp, 5705_PLUS)) { 6417 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); 6418 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); 6419 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); 6420 } 6421 6422 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); 6423 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); 6424 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); 6425 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); 6426 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); 6427 6428 if (tg3_flag(tp, NVRAM)) 6429 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); 6430 } 6431 6432 static void tg3_dump_state(struct tg3 *tp) 6433 { 6434 int i; 6435 u32 *regs; 6436 6437 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); 6438 if (!regs) 6439 return; 6440 6441 if (tg3_flag(tp, PCI_EXPRESS)) { 6442 /* Read up to but not including private PCI registers */ 6443 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) 6444 regs[i / sizeof(u32)] = tr32(i); 6445 } else 6446 tg3_dump_legacy_regs(tp, regs); 6447 6448 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { 6449 if (!regs[i + 0] && !regs[i + 1] && 6450 !regs[i + 2] && !regs[i + 3]) 6451 continue; 6452 6453 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", 6454 i * 4, 6455 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); 6456 } 6457 6458 kfree(regs); 6459 6460 for (i = 0; i < tp->irq_cnt; i++) { 6461 struct tg3_napi *tnapi = &tp->napi[i]; 6462 6463 /* SW status block */ 6464 netdev_err(tp->dev, 6465 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", 6466 i, 6467 tnapi->hw_status->status, 6468 tnapi->hw_status->status_tag, 6469 tnapi->hw_status->rx_jumbo_consumer, 6470 tnapi->hw_status->rx_consumer, 6471 tnapi->hw_status->rx_mini_consumer, 6472 tnapi->hw_status->idx[0].rx_producer, 6473 tnapi->hw_status->idx[0].tx_consumer); 6474 6475 netdev_err(tp->dev, 6476 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", 6477 i, 6478 tnapi->last_tag, tnapi->last_irq_tag, 6479 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, 6480 tnapi->rx_rcb_ptr, 6481 tnapi->prodring.rx_std_prod_idx, 6482 tnapi->prodring.rx_std_cons_idx, 6483 tnapi->prodring.rx_jmb_prod_idx, 6484 tnapi->prodring.rx_jmb_cons_idx); 6485 } 6486 } 6487 6488 /* This is called whenever we suspect that the system chipset is re- 6489 * ordering the sequence of MMIO to the tx send mailbox. The symptom 6490 * is bogus tx completions. We try to recover by setting the 6491 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later 6492 * in the workqueue. 6493 */ 6494 static void tg3_tx_recover(struct tg3 *tp) 6495 { 6496 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || 6497 tp->write32_tx_mbox == tg3_write_indirect_mbox); 6498 6499 netdev_warn(tp->dev, 6500 "The system may be re-ordering memory-mapped I/O " 6501 "cycles to the network device, attempting to recover. " 6502 "Please report the problem to the driver maintainer " 6503 "and include system chipset information.\n"); 6504 6505 tg3_flag_set(tp, TX_RECOVERY_PENDING); 6506 } 6507 6508 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) 6509 { 6510 /* Tell compiler to fetch tx indices from memory. */ 6511 barrier(); 6512 return tnapi->tx_pending - 6513 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); 6514 } 6515 6516 /* Tigon3 never reports partial packet sends. So we do not 6517 * need special logic to handle SKBs that have not had all 6518 * of their frags sent yet, like SunGEM does. 6519 */ 6520 static void tg3_tx(struct tg3_napi *tnapi) 6521 { 6522 struct tg3 *tp = tnapi->tp; 6523 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; 6524 u32 sw_idx = tnapi->tx_cons; 6525 struct netdev_queue *txq; 6526 int index = tnapi - tp->napi; 6527 unsigned int pkts_compl = 0, bytes_compl = 0; 6528 6529 if (tg3_flag(tp, ENABLE_TSS)) 6530 index--; 6531 6532 txq = netdev_get_tx_queue(tp->dev, index); 6533 6534 while (sw_idx != hw_idx) { 6535 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; 6536 struct sk_buff *skb = ri->skb; 6537 int i, tx_bug = 0; 6538 6539 if (unlikely(skb == NULL)) { 6540 tg3_tx_recover(tp); 6541 return; 6542 } 6543 6544 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) { 6545 struct skb_shared_hwtstamps timestamp; 6546 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB); 6547 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; 6548 6549 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); 6550 6551 skb_tstamp_tx(skb, ×tamp); 6552 } 6553 6554 pci_unmap_single(tp->pdev, 6555 dma_unmap_addr(ri, mapping), 6556 skb_headlen(skb), 6557 PCI_DMA_TODEVICE); 6558 6559 ri->skb = NULL; 6560 6561 while (ri->fragmented) { 6562 ri->fragmented = false; 6563 sw_idx = NEXT_TX(sw_idx); 6564 ri = &tnapi->tx_buffers[sw_idx]; 6565 } 6566 6567 sw_idx = NEXT_TX(sw_idx); 6568 6569 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 6570 ri = &tnapi->tx_buffers[sw_idx]; 6571 if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) 6572 tx_bug = 1; 6573 6574 pci_unmap_page(tp->pdev, 6575 dma_unmap_addr(ri, mapping), 6576 skb_frag_size(&skb_shinfo(skb)->frags[i]), 6577 PCI_DMA_TODEVICE); 6578 6579 while (ri->fragmented) { 6580 ri->fragmented = false; 6581 sw_idx = NEXT_TX(sw_idx); 6582 ri = &tnapi->tx_buffers[sw_idx]; 6583 } 6584 6585 sw_idx = NEXT_TX(sw_idx); 6586 } 6587 6588 pkts_compl++; 6589 bytes_compl += skb->len; 6590 6591 dev_kfree_skb_any(skb); 6592 6593 if (unlikely(tx_bug)) { 6594 tg3_tx_recover(tp); 6595 return; 6596 } 6597 } 6598 6599 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); 6600 6601 tnapi->tx_cons = sw_idx; 6602 6603 /* Need to make the tx_cons update visible to tg3_start_xmit() 6604 * before checking for netif_queue_stopped(). Without the 6605 * memory barrier, there is a small possibility that tg3_start_xmit() 6606 * will miss it and cause the queue to be stopped forever. 6607 */ 6608 smp_mb(); 6609 6610 if (unlikely(netif_tx_queue_stopped(txq) && 6611 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { 6612 __netif_tx_lock(txq, smp_processor_id()); 6613 if (netif_tx_queue_stopped(txq) && 6614 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) 6615 netif_tx_wake_queue(txq); 6616 __netif_tx_unlock(txq); 6617 } 6618 } 6619 6620 static void tg3_frag_free(bool is_frag, void *data) 6621 { 6622 if (is_frag) 6623 put_page(virt_to_head_page(data)); 6624 else 6625 kfree(data); 6626 } 6627 6628 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) 6629 { 6630 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) + 6631 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6632 6633 if (!ri->data) 6634 return; 6635 6636 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), 6637 map_sz, PCI_DMA_FROMDEVICE); 6638 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data); 6639 ri->data = NULL; 6640 } 6641 6642 6643 /* Returns size of skb allocated or < 0 on error. 6644 * 6645 * We only need to fill in the address because the other members 6646 * of the RX descriptor are invariant, see tg3_init_rings. 6647 * 6648 * Note the purposeful assymetry of cpu vs. chip accesses. For 6649 * posting buffers we only dirty the first cache line of the RX 6650 * descriptor (containing the address). Whereas for the RX status 6651 * buffers the cpu only reads the last cacheline of the RX descriptor 6652 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 6653 */ 6654 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, 6655 u32 opaque_key, u32 dest_idx_unmasked, 6656 unsigned int *frag_size) 6657 { 6658 struct tg3_rx_buffer_desc *desc; 6659 struct ring_info *map; 6660 u8 *data; 6661 dma_addr_t mapping; 6662 int skb_size, data_size, dest_idx; 6663 6664 switch (opaque_key) { 6665 case RXD_OPAQUE_RING_STD: 6666 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6667 desc = &tpr->rx_std[dest_idx]; 6668 map = &tpr->rx_std_buffers[dest_idx]; 6669 data_size = tp->rx_pkt_map_sz; 6670 break; 6671 6672 case RXD_OPAQUE_RING_JUMBO: 6673 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6674 desc = &tpr->rx_jmb[dest_idx].std; 6675 map = &tpr->rx_jmb_buffers[dest_idx]; 6676 data_size = TG3_RX_JMB_MAP_SZ; 6677 break; 6678 6679 default: 6680 return -EINVAL; 6681 } 6682 6683 /* Do not overwrite any of the map or rp information 6684 * until we are sure we can commit to a new buffer. 6685 * 6686 * Callers depend upon this behavior and assume that 6687 * we leave everything unchanged if we fail. 6688 */ 6689 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + 6690 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6691 if (skb_size <= PAGE_SIZE) { 6692 data = netdev_alloc_frag(skb_size); 6693 *frag_size = skb_size; 6694 } else { 6695 data = kmalloc(skb_size, GFP_ATOMIC); 6696 *frag_size = 0; 6697 } 6698 if (!data) 6699 return -ENOMEM; 6700 6701 mapping = pci_map_single(tp->pdev, 6702 data + TG3_RX_OFFSET(tp), 6703 data_size, 6704 PCI_DMA_FROMDEVICE); 6705 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) { 6706 tg3_frag_free(skb_size <= PAGE_SIZE, data); 6707 return -EIO; 6708 } 6709 6710 map->data = data; 6711 dma_unmap_addr_set(map, mapping, mapping); 6712 6713 desc->addr_hi = ((u64)mapping >> 32); 6714 desc->addr_lo = ((u64)mapping & 0xffffffff); 6715 6716 return data_size; 6717 } 6718 6719 /* We only need to move over in the address because the other 6720 * members of the RX descriptor are invariant. See notes above 6721 * tg3_alloc_rx_data for full details. 6722 */ 6723 static void tg3_recycle_rx(struct tg3_napi *tnapi, 6724 struct tg3_rx_prodring_set *dpr, 6725 u32 opaque_key, int src_idx, 6726 u32 dest_idx_unmasked) 6727 { 6728 struct tg3 *tp = tnapi->tp; 6729 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 6730 struct ring_info *src_map, *dest_map; 6731 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; 6732 int dest_idx; 6733 6734 switch (opaque_key) { 6735 case RXD_OPAQUE_RING_STD: 6736 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6737 dest_desc = &dpr->rx_std[dest_idx]; 6738 dest_map = &dpr->rx_std_buffers[dest_idx]; 6739 src_desc = &spr->rx_std[src_idx]; 6740 src_map = &spr->rx_std_buffers[src_idx]; 6741 break; 6742 6743 case RXD_OPAQUE_RING_JUMBO: 6744 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6745 dest_desc = &dpr->rx_jmb[dest_idx].std; 6746 dest_map = &dpr->rx_jmb_buffers[dest_idx]; 6747 src_desc = &spr->rx_jmb[src_idx].std; 6748 src_map = &spr->rx_jmb_buffers[src_idx]; 6749 break; 6750 6751 default: 6752 return; 6753 } 6754 6755 dest_map->data = src_map->data; 6756 dma_unmap_addr_set(dest_map, mapping, 6757 dma_unmap_addr(src_map, mapping)); 6758 dest_desc->addr_hi = src_desc->addr_hi; 6759 dest_desc->addr_lo = src_desc->addr_lo; 6760 6761 /* Ensure that the update to the skb happens after the physical 6762 * addresses have been transferred to the new BD location. 6763 */ 6764 smp_wmb(); 6765 6766 src_map->data = NULL; 6767 } 6768 6769 /* The RX ring scheme is composed of multiple rings which post fresh 6770 * buffers to the chip, and one special ring the chip uses to report 6771 * status back to the host. 6772 * 6773 * The special ring reports the status of received packets to the 6774 * host. The chip does not write into the original descriptor the 6775 * RX buffer was obtained from. The chip simply takes the original 6776 * descriptor as provided by the host, updates the status and length 6777 * field, then writes this into the next status ring entry. 6778 * 6779 * Each ring the host uses to post buffers to the chip is described 6780 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, 6781 * it is first placed into the on-chip ram. When the packet's length 6782 * is known, it walks down the TG3_BDINFO entries to select the ring. 6783 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO 6784 * which is within the range of the new packet's length is chosen. 6785 * 6786 * The "separate ring for rx status" scheme may sound queer, but it makes 6787 * sense from a cache coherency perspective. If only the host writes 6788 * to the buffer post rings, and only the chip writes to the rx status 6789 * rings, then cache lines never move beyond shared-modified state. 6790 * If both the host and chip were to write into the same ring, cache line 6791 * eviction could occur since both entities want it in an exclusive state. 6792 */ 6793 static int tg3_rx(struct tg3_napi *tnapi, int budget) 6794 { 6795 struct tg3 *tp = tnapi->tp; 6796 u32 work_mask, rx_std_posted = 0; 6797 u32 std_prod_idx, jmb_prod_idx; 6798 u32 sw_idx = tnapi->rx_rcb_ptr; 6799 u16 hw_idx; 6800 int received; 6801 struct tg3_rx_prodring_set *tpr = &tnapi->prodring; 6802 6803 hw_idx = *(tnapi->rx_rcb_prod_idx); 6804 /* 6805 * We need to order the read of hw_idx and the read of 6806 * the opaque cookie. 6807 */ 6808 rmb(); 6809 work_mask = 0; 6810 received = 0; 6811 std_prod_idx = tpr->rx_std_prod_idx; 6812 jmb_prod_idx = tpr->rx_jmb_prod_idx; 6813 while (sw_idx != hw_idx && budget > 0) { 6814 struct ring_info *ri; 6815 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; 6816 unsigned int len; 6817 struct sk_buff *skb; 6818 dma_addr_t dma_addr; 6819 u32 opaque_key, desc_idx, *post_ptr; 6820 u8 *data; 6821 u64 tstamp = 0; 6822 6823 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 6824 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 6825 if (opaque_key == RXD_OPAQUE_RING_STD) { 6826 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; 6827 dma_addr = dma_unmap_addr(ri, mapping); 6828 data = ri->data; 6829 post_ptr = &std_prod_idx; 6830 rx_std_posted++; 6831 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 6832 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; 6833 dma_addr = dma_unmap_addr(ri, mapping); 6834 data = ri->data; 6835 post_ptr = &jmb_prod_idx; 6836 } else 6837 goto next_pkt_nopost; 6838 6839 work_mask |= opaque_key; 6840 6841 if (desc->err_vlan & RXD_ERR_MASK) { 6842 drop_it: 6843 tg3_recycle_rx(tnapi, tpr, opaque_key, 6844 desc_idx, *post_ptr); 6845 drop_it_no_recycle: 6846 /* Other statistics kept track of by card. */ 6847 tp->rx_dropped++; 6848 goto next_pkt; 6849 } 6850 6851 prefetch(data + TG3_RX_OFFSET(tp)); 6852 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 6853 ETH_FCS_LEN; 6854 6855 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6856 RXD_FLAG_PTPSTAT_PTPV1 || 6857 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6858 RXD_FLAG_PTPSTAT_PTPV2) { 6859 tstamp = tr32(TG3_RX_TSTAMP_LSB); 6860 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32; 6861 } 6862 6863 if (len > TG3_RX_COPY_THRESH(tp)) { 6864 int skb_size; 6865 unsigned int frag_size; 6866 6867 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, 6868 *post_ptr, &frag_size); 6869 if (skb_size < 0) 6870 goto drop_it; 6871 6872 pci_unmap_single(tp->pdev, dma_addr, skb_size, 6873 PCI_DMA_FROMDEVICE); 6874 6875 /* Ensure that the update to the data happens 6876 * after the usage of the old DMA mapping. 6877 */ 6878 smp_wmb(); 6879 6880 ri->data = NULL; 6881 6882 skb = build_skb(data, frag_size); 6883 if (!skb) { 6884 tg3_frag_free(frag_size != 0, data); 6885 goto drop_it_no_recycle; 6886 } 6887 skb_reserve(skb, TG3_RX_OFFSET(tp)); 6888 } else { 6889 tg3_recycle_rx(tnapi, tpr, opaque_key, 6890 desc_idx, *post_ptr); 6891 6892 skb = netdev_alloc_skb(tp->dev, 6893 len + TG3_RAW_IP_ALIGN); 6894 if (skb == NULL) 6895 goto drop_it_no_recycle; 6896 6897 skb_reserve(skb, TG3_RAW_IP_ALIGN); 6898 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 6899 memcpy(skb->data, 6900 data + TG3_RX_OFFSET(tp), 6901 len); 6902 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 6903 } 6904 6905 skb_put(skb, len); 6906 if (tstamp) 6907 tg3_hwclock_to_timestamp(tp, tstamp, 6908 skb_hwtstamps(skb)); 6909 6910 if ((tp->dev->features & NETIF_F_RXCSUM) && 6911 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 6912 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 6913 >> RXD_TCPCSUM_SHIFT) == 0xffff)) 6914 skb->ip_summed = CHECKSUM_UNNECESSARY; 6915 else 6916 skb_checksum_none_assert(skb); 6917 6918 skb->protocol = eth_type_trans(skb, tp->dev); 6919 6920 if (len > (tp->dev->mtu + ETH_HLEN) && 6921 skb->protocol != htons(ETH_P_8021Q)) { 6922 dev_kfree_skb_any(skb); 6923 goto drop_it_no_recycle; 6924 } 6925 6926 if (desc->type_flags & RXD_FLAG_VLAN && 6927 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) 6928 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 6929 desc->err_vlan & RXD_VLAN_MASK); 6930 6931 napi_gro_receive(&tnapi->napi, skb); 6932 6933 received++; 6934 budget--; 6935 6936 next_pkt: 6937 (*post_ptr)++; 6938 6939 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 6940 tpr->rx_std_prod_idx = std_prod_idx & 6941 tp->rx_std_ring_mask; 6942 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 6943 tpr->rx_std_prod_idx); 6944 work_mask &= ~RXD_OPAQUE_RING_STD; 6945 rx_std_posted = 0; 6946 } 6947 next_pkt_nopost: 6948 sw_idx++; 6949 sw_idx &= tp->rx_ret_ring_mask; 6950 6951 /* Refresh hw_idx to see if there is new work */ 6952 if (sw_idx == hw_idx) { 6953 hw_idx = *(tnapi->rx_rcb_prod_idx); 6954 rmb(); 6955 } 6956 } 6957 6958 /* ACK the status ring. */ 6959 tnapi->rx_rcb_ptr = sw_idx; 6960 tw32_rx_mbox(tnapi->consmbox, sw_idx); 6961 6962 /* Refill RX ring(s). */ 6963 if (!tg3_flag(tp, ENABLE_RSS)) { 6964 /* Sync BD data before updating mailbox */ 6965 wmb(); 6966 6967 if (work_mask & RXD_OPAQUE_RING_STD) { 6968 tpr->rx_std_prod_idx = std_prod_idx & 6969 tp->rx_std_ring_mask; 6970 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 6971 tpr->rx_std_prod_idx); 6972 } 6973 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 6974 tpr->rx_jmb_prod_idx = jmb_prod_idx & 6975 tp->rx_jmb_ring_mask; 6976 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 6977 tpr->rx_jmb_prod_idx); 6978 } 6979 mmiowb(); 6980 } else if (work_mask) { 6981 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be 6982 * updated before the producer indices can be updated. 6983 */ 6984 smp_wmb(); 6985 6986 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; 6987 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; 6988 6989 if (tnapi != &tp->napi[1]) { 6990 tp->rx_refill = true; 6991 napi_schedule(&tp->napi[1].napi); 6992 } 6993 } 6994 6995 return received; 6996 } 6997 6998 static void tg3_poll_link(struct tg3 *tp) 6999 { 7000 /* handle link change and other phy events */ 7001 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 7002 struct tg3_hw_status *sblk = tp->napi[0].hw_status; 7003 7004 if (sblk->status & SD_STATUS_LINK_CHG) { 7005 sblk->status = SD_STATUS_UPDATED | 7006 (sblk->status & ~SD_STATUS_LINK_CHG); 7007 spin_lock(&tp->lock); 7008 if (tg3_flag(tp, USE_PHYLIB)) { 7009 tw32_f(MAC_STATUS, 7010 (MAC_STATUS_SYNC_CHANGED | 7011 MAC_STATUS_CFG_CHANGED | 7012 MAC_STATUS_MI_COMPLETION | 7013 MAC_STATUS_LNKSTATE_CHANGED)); 7014 udelay(40); 7015 } else 7016 tg3_setup_phy(tp, false); 7017 spin_unlock(&tp->lock); 7018 } 7019 } 7020 } 7021 7022 static int tg3_rx_prodring_xfer(struct tg3 *tp, 7023 struct tg3_rx_prodring_set *dpr, 7024 struct tg3_rx_prodring_set *spr) 7025 { 7026 u32 si, di, cpycnt, src_prod_idx; 7027 int i, err = 0; 7028 7029 while (1) { 7030 src_prod_idx = spr->rx_std_prod_idx; 7031 7032 /* Make sure updates to the rx_std_buffers[] entries and the 7033 * standard producer index are seen in the correct order. 7034 */ 7035 smp_rmb(); 7036 7037 if (spr->rx_std_cons_idx == src_prod_idx) 7038 break; 7039 7040 if (spr->rx_std_cons_idx < src_prod_idx) 7041 cpycnt = src_prod_idx - spr->rx_std_cons_idx; 7042 else 7043 cpycnt = tp->rx_std_ring_mask + 1 - 7044 spr->rx_std_cons_idx; 7045 7046 cpycnt = min(cpycnt, 7047 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); 7048 7049 si = spr->rx_std_cons_idx; 7050 di = dpr->rx_std_prod_idx; 7051 7052 for (i = di; i < di + cpycnt; i++) { 7053 if (dpr->rx_std_buffers[i].data) { 7054 cpycnt = i - di; 7055 err = -ENOSPC; 7056 break; 7057 } 7058 } 7059 7060 if (!cpycnt) 7061 break; 7062 7063 /* Ensure that updates to the rx_std_buffers ring and the 7064 * shadowed hardware producer ring from tg3_recycle_skb() are 7065 * ordered correctly WRT the skb check above. 7066 */ 7067 smp_rmb(); 7068 7069 memcpy(&dpr->rx_std_buffers[di], 7070 &spr->rx_std_buffers[si], 7071 cpycnt * sizeof(struct ring_info)); 7072 7073 for (i = 0; i < cpycnt; i++, di++, si++) { 7074 struct tg3_rx_buffer_desc *sbd, *dbd; 7075 sbd = &spr->rx_std[si]; 7076 dbd = &dpr->rx_std[di]; 7077 dbd->addr_hi = sbd->addr_hi; 7078 dbd->addr_lo = sbd->addr_lo; 7079 } 7080 7081 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & 7082 tp->rx_std_ring_mask; 7083 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & 7084 tp->rx_std_ring_mask; 7085 } 7086 7087 while (1) { 7088 src_prod_idx = spr->rx_jmb_prod_idx; 7089 7090 /* Make sure updates to the rx_jmb_buffers[] entries and 7091 * the jumbo producer index are seen in the correct order. 7092 */ 7093 smp_rmb(); 7094 7095 if (spr->rx_jmb_cons_idx == src_prod_idx) 7096 break; 7097 7098 if (spr->rx_jmb_cons_idx < src_prod_idx) 7099 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; 7100 else 7101 cpycnt = tp->rx_jmb_ring_mask + 1 - 7102 spr->rx_jmb_cons_idx; 7103 7104 cpycnt = min(cpycnt, 7105 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); 7106 7107 si = spr->rx_jmb_cons_idx; 7108 di = dpr->rx_jmb_prod_idx; 7109 7110 for (i = di; i < di + cpycnt; i++) { 7111 if (dpr->rx_jmb_buffers[i].data) { 7112 cpycnt = i - di; 7113 err = -ENOSPC; 7114 break; 7115 } 7116 } 7117 7118 if (!cpycnt) 7119 break; 7120 7121 /* Ensure that updates to the rx_jmb_buffers ring and the 7122 * shadowed hardware producer ring from tg3_recycle_skb() are 7123 * ordered correctly WRT the skb check above. 7124 */ 7125 smp_rmb(); 7126 7127 memcpy(&dpr->rx_jmb_buffers[di], 7128 &spr->rx_jmb_buffers[si], 7129 cpycnt * sizeof(struct ring_info)); 7130 7131 for (i = 0; i < cpycnt; i++, di++, si++) { 7132 struct tg3_rx_buffer_desc *sbd, *dbd; 7133 sbd = &spr->rx_jmb[si].std; 7134 dbd = &dpr->rx_jmb[di].std; 7135 dbd->addr_hi = sbd->addr_hi; 7136 dbd->addr_lo = sbd->addr_lo; 7137 } 7138 7139 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & 7140 tp->rx_jmb_ring_mask; 7141 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & 7142 tp->rx_jmb_ring_mask; 7143 } 7144 7145 return err; 7146 } 7147 7148 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) 7149 { 7150 struct tg3 *tp = tnapi->tp; 7151 7152 /* run TX completion thread */ 7153 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { 7154 tg3_tx(tnapi); 7155 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7156 return work_done; 7157 } 7158 7159 if (!tnapi->rx_rcb_prod_idx) 7160 return work_done; 7161 7162 /* run RX thread, within the bounds set by NAPI. 7163 * All RX "locking" is done by ensuring outside 7164 * code synchronizes with tg3->napi.poll() 7165 */ 7166 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 7167 work_done += tg3_rx(tnapi, budget - work_done); 7168 7169 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { 7170 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; 7171 int i, err = 0; 7172 u32 std_prod_idx = dpr->rx_std_prod_idx; 7173 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; 7174 7175 tp->rx_refill = false; 7176 for (i = 1; i <= tp->rxq_cnt; i++) 7177 err |= tg3_rx_prodring_xfer(tp, dpr, 7178 &tp->napi[i].prodring); 7179 7180 wmb(); 7181 7182 if (std_prod_idx != dpr->rx_std_prod_idx) 7183 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 7184 dpr->rx_std_prod_idx); 7185 7186 if (jmb_prod_idx != dpr->rx_jmb_prod_idx) 7187 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 7188 dpr->rx_jmb_prod_idx); 7189 7190 mmiowb(); 7191 7192 if (err) 7193 tw32_f(HOSTCC_MODE, tp->coal_now); 7194 } 7195 7196 return work_done; 7197 } 7198 7199 static inline void tg3_reset_task_schedule(struct tg3 *tp) 7200 { 7201 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) 7202 schedule_work(&tp->reset_task); 7203 } 7204 7205 static inline void tg3_reset_task_cancel(struct tg3 *tp) 7206 { 7207 cancel_work_sync(&tp->reset_task); 7208 tg3_flag_clear(tp, RESET_TASK_PENDING); 7209 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 7210 } 7211 7212 static int tg3_poll_msix(struct napi_struct *napi, int budget) 7213 { 7214 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7215 struct tg3 *tp = tnapi->tp; 7216 int work_done = 0; 7217 struct tg3_hw_status *sblk = tnapi->hw_status; 7218 7219 while (1) { 7220 work_done = tg3_poll_work(tnapi, work_done, budget); 7221 7222 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7223 goto tx_recovery; 7224 7225 if (unlikely(work_done >= budget)) 7226 break; 7227 7228 /* tp->last_tag is used in tg3_int_reenable() below 7229 * to tell the hw how much work has been processed, 7230 * so we must read it before checking for more work. 7231 */ 7232 tnapi->last_tag = sblk->status_tag; 7233 tnapi->last_irq_tag = tnapi->last_tag; 7234 rmb(); 7235 7236 /* check for RX/TX work to do */ 7237 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && 7238 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { 7239 7240 /* This test here is not race free, but will reduce 7241 * the number of interrupts by looping again. 7242 */ 7243 if (tnapi == &tp->napi[1] && tp->rx_refill) 7244 continue; 7245 7246 napi_complete(napi); 7247 /* Reenable interrupts. */ 7248 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 7249 7250 /* This test here is synchronized by napi_schedule() 7251 * and napi_complete() to close the race condition. 7252 */ 7253 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) { 7254 tw32(HOSTCC_MODE, tp->coalesce_mode | 7255 HOSTCC_MODE_ENABLE | 7256 tnapi->coal_now); 7257 } 7258 mmiowb(); 7259 break; 7260 } 7261 } 7262 7263 return work_done; 7264 7265 tx_recovery: 7266 /* work_done is guaranteed to be less than budget. */ 7267 napi_complete(napi); 7268 tg3_reset_task_schedule(tp); 7269 return work_done; 7270 } 7271 7272 static void tg3_process_error(struct tg3 *tp) 7273 { 7274 u32 val; 7275 bool real_error = false; 7276 7277 if (tg3_flag(tp, ERROR_PROCESSED)) 7278 return; 7279 7280 /* Check Flow Attention register */ 7281 val = tr32(HOSTCC_FLOW_ATTN); 7282 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { 7283 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); 7284 real_error = true; 7285 } 7286 7287 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { 7288 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); 7289 real_error = true; 7290 } 7291 7292 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { 7293 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); 7294 real_error = true; 7295 } 7296 7297 if (!real_error) 7298 return; 7299 7300 tg3_dump_state(tp); 7301 7302 tg3_flag_set(tp, ERROR_PROCESSED); 7303 tg3_reset_task_schedule(tp); 7304 } 7305 7306 static int tg3_poll(struct napi_struct *napi, int budget) 7307 { 7308 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7309 struct tg3 *tp = tnapi->tp; 7310 int work_done = 0; 7311 struct tg3_hw_status *sblk = tnapi->hw_status; 7312 7313 while (1) { 7314 if (sblk->status & SD_STATUS_ERROR) 7315 tg3_process_error(tp); 7316 7317 tg3_poll_link(tp); 7318 7319 work_done = tg3_poll_work(tnapi, work_done, budget); 7320 7321 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7322 goto tx_recovery; 7323 7324 if (unlikely(work_done >= budget)) 7325 break; 7326 7327 if (tg3_flag(tp, TAGGED_STATUS)) { 7328 /* tp->last_tag is used in tg3_int_reenable() below 7329 * to tell the hw how much work has been processed, 7330 * so we must read it before checking for more work. 7331 */ 7332 tnapi->last_tag = sblk->status_tag; 7333 tnapi->last_irq_tag = tnapi->last_tag; 7334 rmb(); 7335 } else 7336 sblk->status &= ~SD_STATUS_UPDATED; 7337 7338 if (likely(!tg3_has_work(tnapi))) { 7339 napi_complete(napi); 7340 tg3_int_reenable(tnapi); 7341 break; 7342 } 7343 } 7344 7345 return work_done; 7346 7347 tx_recovery: 7348 /* work_done is guaranteed to be less than budget. */ 7349 napi_complete(napi); 7350 tg3_reset_task_schedule(tp); 7351 return work_done; 7352 } 7353 7354 static void tg3_napi_disable(struct tg3 *tp) 7355 { 7356 int i; 7357 7358 for (i = tp->irq_cnt - 1; i >= 0; i--) 7359 napi_disable(&tp->napi[i].napi); 7360 } 7361 7362 static void tg3_napi_enable(struct tg3 *tp) 7363 { 7364 int i; 7365 7366 for (i = 0; i < tp->irq_cnt; i++) 7367 napi_enable(&tp->napi[i].napi); 7368 } 7369 7370 static void tg3_napi_init(struct tg3 *tp) 7371 { 7372 int i; 7373 7374 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64); 7375 for (i = 1; i < tp->irq_cnt; i++) 7376 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64); 7377 } 7378 7379 static void tg3_napi_fini(struct tg3 *tp) 7380 { 7381 int i; 7382 7383 for (i = 0; i < tp->irq_cnt; i++) 7384 netif_napi_del(&tp->napi[i].napi); 7385 } 7386 7387 static inline void tg3_netif_stop(struct tg3 *tp) 7388 { 7389 tp->dev->trans_start = jiffies; /* prevent tx timeout */ 7390 tg3_napi_disable(tp); 7391 netif_carrier_off(tp->dev); 7392 netif_tx_disable(tp->dev); 7393 } 7394 7395 /* tp->lock must be held */ 7396 static inline void tg3_netif_start(struct tg3 *tp) 7397 { 7398 tg3_ptp_resume(tp); 7399 7400 /* NOTE: unconditional netif_tx_wake_all_queues is only 7401 * appropriate so long as all callers are assured to 7402 * have free tx slots (such as after tg3_init_hw) 7403 */ 7404 netif_tx_wake_all_queues(tp->dev); 7405 7406 if (tp->link_up) 7407 netif_carrier_on(tp->dev); 7408 7409 tg3_napi_enable(tp); 7410 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; 7411 tg3_enable_ints(tp); 7412 } 7413 7414 static void tg3_irq_quiesce(struct tg3 *tp) 7415 { 7416 int i; 7417 7418 BUG_ON(tp->irq_sync); 7419 7420 tp->irq_sync = 1; 7421 smp_mb(); 7422 7423 for (i = 0; i < tp->irq_cnt; i++) 7424 synchronize_irq(tp->napi[i].irq_vec); 7425 } 7426 7427 /* Fully shutdown all tg3 driver activity elsewhere in the system. 7428 * If irq_sync is non-zero, then the IRQ handler must be synchronized 7429 * with as well. Most of the time, this is not necessary except when 7430 * shutting down the device. 7431 */ 7432 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) 7433 { 7434 spin_lock_bh(&tp->lock); 7435 if (irq_sync) 7436 tg3_irq_quiesce(tp); 7437 } 7438 7439 static inline void tg3_full_unlock(struct tg3 *tp) 7440 { 7441 spin_unlock_bh(&tp->lock); 7442 } 7443 7444 /* One-shot MSI handler - Chip automatically disables interrupt 7445 * after sending MSI so driver doesn't have to do it. 7446 */ 7447 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) 7448 { 7449 struct tg3_napi *tnapi = dev_id; 7450 struct tg3 *tp = tnapi->tp; 7451 7452 prefetch(tnapi->hw_status); 7453 if (tnapi->rx_rcb) 7454 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7455 7456 if (likely(!tg3_irq_sync(tp))) 7457 napi_schedule(&tnapi->napi); 7458 7459 return IRQ_HANDLED; 7460 } 7461 7462 /* MSI ISR - No need to check for interrupt sharing and no need to 7463 * flush status block and interrupt mailbox. PCI ordering rules 7464 * guarantee that MSI will arrive after the status block. 7465 */ 7466 static irqreturn_t tg3_msi(int irq, void *dev_id) 7467 { 7468 struct tg3_napi *tnapi = dev_id; 7469 struct tg3 *tp = tnapi->tp; 7470 7471 prefetch(tnapi->hw_status); 7472 if (tnapi->rx_rcb) 7473 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7474 /* 7475 * Writing any value to intr-mbox-0 clears PCI INTA# and 7476 * chip-internal interrupt pending events. 7477 * Writing non-zero to intr-mbox-0 additional tells the 7478 * NIC to stop sending us irqs, engaging "in-intr-handler" 7479 * event coalescing. 7480 */ 7481 tw32_mailbox(tnapi->int_mbox, 0x00000001); 7482 if (likely(!tg3_irq_sync(tp))) 7483 napi_schedule(&tnapi->napi); 7484 7485 return IRQ_RETVAL(1); 7486 } 7487 7488 static irqreturn_t tg3_interrupt(int irq, void *dev_id) 7489 { 7490 struct tg3_napi *tnapi = dev_id; 7491 struct tg3 *tp = tnapi->tp; 7492 struct tg3_hw_status *sblk = tnapi->hw_status; 7493 unsigned int handled = 1; 7494 7495 /* In INTx mode, it is possible for the interrupt to arrive at 7496 * the CPU before the status block posted prior to the interrupt. 7497 * Reading the PCI State register will confirm whether the 7498 * interrupt is ours and will flush the status block. 7499 */ 7500 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { 7501 if (tg3_flag(tp, CHIP_RESETTING) || 7502 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7503 handled = 0; 7504 goto out; 7505 } 7506 } 7507 7508 /* 7509 * Writing any value to intr-mbox-0 clears PCI INTA# and 7510 * chip-internal interrupt pending events. 7511 * Writing non-zero to intr-mbox-0 additional tells the 7512 * NIC to stop sending us irqs, engaging "in-intr-handler" 7513 * event coalescing. 7514 * 7515 * Flush the mailbox to de-assert the IRQ immediately to prevent 7516 * spurious interrupts. The flush impacts performance but 7517 * excessive spurious interrupts can be worse in some cases. 7518 */ 7519 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7520 if (tg3_irq_sync(tp)) 7521 goto out; 7522 sblk->status &= ~SD_STATUS_UPDATED; 7523 if (likely(tg3_has_work(tnapi))) { 7524 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7525 napi_schedule(&tnapi->napi); 7526 } else { 7527 /* No work, shared interrupt perhaps? re-enable 7528 * interrupts, and flush that PCI write 7529 */ 7530 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 7531 0x00000000); 7532 } 7533 out: 7534 return IRQ_RETVAL(handled); 7535 } 7536 7537 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) 7538 { 7539 struct tg3_napi *tnapi = dev_id; 7540 struct tg3 *tp = tnapi->tp; 7541 struct tg3_hw_status *sblk = tnapi->hw_status; 7542 unsigned int handled = 1; 7543 7544 /* In INTx mode, it is possible for the interrupt to arrive at 7545 * the CPU before the status block posted prior to the interrupt. 7546 * Reading the PCI State register will confirm whether the 7547 * interrupt is ours and will flush the status block. 7548 */ 7549 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { 7550 if (tg3_flag(tp, CHIP_RESETTING) || 7551 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7552 handled = 0; 7553 goto out; 7554 } 7555 } 7556 7557 /* 7558 * writing any value to intr-mbox-0 clears PCI INTA# and 7559 * chip-internal interrupt pending events. 7560 * writing non-zero to intr-mbox-0 additional tells the 7561 * NIC to stop sending us irqs, engaging "in-intr-handler" 7562 * event coalescing. 7563 * 7564 * Flush the mailbox to de-assert the IRQ immediately to prevent 7565 * spurious interrupts. The flush impacts performance but 7566 * excessive spurious interrupts can be worse in some cases. 7567 */ 7568 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7569 7570 /* 7571 * In a shared interrupt configuration, sometimes other devices' 7572 * interrupts will scream. We record the current status tag here 7573 * so that the above check can report that the screaming interrupts 7574 * are unhandled. Eventually they will be silenced. 7575 */ 7576 tnapi->last_irq_tag = sblk->status_tag; 7577 7578 if (tg3_irq_sync(tp)) 7579 goto out; 7580 7581 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7582 7583 napi_schedule(&tnapi->napi); 7584 7585 out: 7586 return IRQ_RETVAL(handled); 7587 } 7588 7589 /* ISR for interrupt test */ 7590 static irqreturn_t tg3_test_isr(int irq, void *dev_id) 7591 { 7592 struct tg3_napi *tnapi = dev_id; 7593 struct tg3 *tp = tnapi->tp; 7594 struct tg3_hw_status *sblk = tnapi->hw_status; 7595 7596 if ((sblk->status & SD_STATUS_UPDATED) || 7597 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7598 tg3_disable_ints(tp); 7599 return IRQ_RETVAL(1); 7600 } 7601 return IRQ_RETVAL(0); 7602 } 7603 7604 #ifdef CONFIG_NET_POLL_CONTROLLER 7605 static void tg3_poll_controller(struct net_device *dev) 7606 { 7607 int i; 7608 struct tg3 *tp = netdev_priv(dev); 7609 7610 if (tg3_irq_sync(tp)) 7611 return; 7612 7613 for (i = 0; i < tp->irq_cnt; i++) 7614 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); 7615 } 7616 #endif 7617 7618 static void tg3_tx_timeout(struct net_device *dev) 7619 { 7620 struct tg3 *tp = netdev_priv(dev); 7621 7622 if (netif_msg_tx_err(tp)) { 7623 netdev_err(dev, "transmit timed out, resetting\n"); 7624 tg3_dump_state(tp); 7625 } 7626 7627 tg3_reset_task_schedule(tp); 7628 } 7629 7630 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ 7631 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) 7632 { 7633 u32 base = (u32) mapping & 0xffffffff; 7634 7635 return base + len + 8 < base; 7636 } 7637 7638 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes 7639 * of any 4GB boundaries: 4G, 8G, etc 7640 */ 7641 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7642 u32 len, u32 mss) 7643 { 7644 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) { 7645 u32 base = (u32) mapping & 0xffffffff; 7646 7647 return ((base + len + (mss & 0x3fff)) < base); 7648 } 7649 return 0; 7650 } 7651 7652 /* Test for DMA addresses > 40-bit */ 7653 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7654 int len) 7655 { 7656 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) 7657 if (tg3_flag(tp, 40BIT_DMA_BUG)) 7658 return ((u64) mapping + len) > DMA_BIT_MASK(40); 7659 return 0; 7660 #else 7661 return 0; 7662 #endif 7663 } 7664 7665 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, 7666 dma_addr_t mapping, u32 len, u32 flags, 7667 u32 mss, u32 vlan) 7668 { 7669 txbd->addr_hi = ((u64) mapping >> 32); 7670 txbd->addr_lo = ((u64) mapping & 0xffffffff); 7671 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); 7672 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); 7673 } 7674 7675 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, 7676 dma_addr_t map, u32 len, u32 flags, 7677 u32 mss, u32 vlan) 7678 { 7679 struct tg3 *tp = tnapi->tp; 7680 bool hwbug = false; 7681 7682 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) 7683 hwbug = true; 7684 7685 if (tg3_4g_overflow_test(map, len)) 7686 hwbug = true; 7687 7688 if (tg3_4g_tso_overflow_test(tp, map, len, mss)) 7689 hwbug = true; 7690 7691 if (tg3_40bit_overflow_test(tp, map, len)) 7692 hwbug = true; 7693 7694 if (tp->dma_limit) { 7695 u32 prvidx = *entry; 7696 u32 tmp_flag = flags & ~TXD_FLAG_END; 7697 while (len > tp->dma_limit && *budget) { 7698 u32 frag_len = tp->dma_limit; 7699 len -= tp->dma_limit; 7700 7701 /* Avoid the 8byte DMA problem */ 7702 if (len <= 8) { 7703 len += tp->dma_limit / 2; 7704 frag_len = tp->dma_limit / 2; 7705 } 7706 7707 tnapi->tx_buffers[*entry].fragmented = true; 7708 7709 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7710 frag_len, tmp_flag, mss, vlan); 7711 *budget -= 1; 7712 prvidx = *entry; 7713 *entry = NEXT_TX(*entry); 7714 7715 map += frag_len; 7716 } 7717 7718 if (len) { 7719 if (*budget) { 7720 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7721 len, flags, mss, vlan); 7722 *budget -= 1; 7723 *entry = NEXT_TX(*entry); 7724 } else { 7725 hwbug = true; 7726 tnapi->tx_buffers[prvidx].fragmented = false; 7727 } 7728 } 7729 } else { 7730 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7731 len, flags, mss, vlan); 7732 *entry = NEXT_TX(*entry); 7733 } 7734 7735 return hwbug; 7736 } 7737 7738 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) 7739 { 7740 int i; 7741 struct sk_buff *skb; 7742 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; 7743 7744 skb = txb->skb; 7745 txb->skb = NULL; 7746 7747 pci_unmap_single(tnapi->tp->pdev, 7748 dma_unmap_addr(txb, mapping), 7749 skb_headlen(skb), 7750 PCI_DMA_TODEVICE); 7751 7752 while (txb->fragmented) { 7753 txb->fragmented = false; 7754 entry = NEXT_TX(entry); 7755 txb = &tnapi->tx_buffers[entry]; 7756 } 7757 7758 for (i = 0; i <= last; i++) { 7759 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 7760 7761 entry = NEXT_TX(entry); 7762 txb = &tnapi->tx_buffers[entry]; 7763 7764 pci_unmap_page(tnapi->tp->pdev, 7765 dma_unmap_addr(txb, mapping), 7766 skb_frag_size(frag), PCI_DMA_TODEVICE); 7767 7768 while (txb->fragmented) { 7769 txb->fragmented = false; 7770 entry = NEXT_TX(entry); 7771 txb = &tnapi->tx_buffers[entry]; 7772 } 7773 } 7774 } 7775 7776 /* Workaround 4GB and 40-bit hardware DMA bugs. */ 7777 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, 7778 struct sk_buff **pskb, 7779 u32 *entry, u32 *budget, 7780 u32 base_flags, u32 mss, u32 vlan) 7781 { 7782 struct tg3 *tp = tnapi->tp; 7783 struct sk_buff *new_skb, *skb = *pskb; 7784 dma_addr_t new_addr = 0; 7785 int ret = 0; 7786 7787 if (tg3_asic_rev(tp) != ASIC_REV_5701) 7788 new_skb = skb_copy(skb, GFP_ATOMIC); 7789 else { 7790 int more_headroom = 4 - ((unsigned long)skb->data & 3); 7791 7792 new_skb = skb_copy_expand(skb, 7793 skb_headroom(skb) + more_headroom, 7794 skb_tailroom(skb), GFP_ATOMIC); 7795 } 7796 7797 if (!new_skb) { 7798 ret = -1; 7799 } else { 7800 /* New SKB is guaranteed to be linear. */ 7801 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, 7802 PCI_DMA_TODEVICE); 7803 /* Make sure the mapping succeeded */ 7804 if (pci_dma_mapping_error(tp->pdev, new_addr)) { 7805 dev_kfree_skb_any(new_skb); 7806 ret = -1; 7807 } else { 7808 u32 save_entry = *entry; 7809 7810 base_flags |= TXD_FLAG_END; 7811 7812 tnapi->tx_buffers[*entry].skb = new_skb; 7813 dma_unmap_addr_set(&tnapi->tx_buffers[*entry], 7814 mapping, new_addr); 7815 7816 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, 7817 new_skb->len, base_flags, 7818 mss, vlan)) { 7819 tg3_tx_skb_unmap(tnapi, save_entry, -1); 7820 dev_kfree_skb_any(new_skb); 7821 ret = -1; 7822 } 7823 } 7824 } 7825 7826 dev_kfree_skb_any(skb); 7827 *pskb = new_skb; 7828 return ret; 7829 } 7830 7831 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); 7832 7833 /* Use GSO to workaround all TSO packets that meet HW bug conditions 7834 * indicated in tg3_tx_frag_set() 7835 */ 7836 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, 7837 struct netdev_queue *txq, struct sk_buff *skb) 7838 { 7839 struct sk_buff *segs, *nskb; 7840 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; 7841 7842 /* Estimate the number of fragments in the worst case */ 7843 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) { 7844 netif_tx_stop_queue(txq); 7845 7846 /* netif_tx_stop_queue() must be done before checking 7847 * checking tx index in tg3_tx_avail() below, because in 7848 * tg3_tx(), we update tx index before checking for 7849 * netif_tx_queue_stopped(). 7850 */ 7851 smp_mb(); 7852 if (tg3_tx_avail(tnapi) <= frag_cnt_est) 7853 return NETDEV_TX_BUSY; 7854 7855 netif_tx_wake_queue(txq); 7856 } 7857 7858 segs = skb_gso_segment(skb, tp->dev->features & 7859 ~(NETIF_F_TSO | NETIF_F_TSO6)); 7860 if (IS_ERR(segs) || !segs) 7861 goto tg3_tso_bug_end; 7862 7863 do { 7864 nskb = segs; 7865 segs = segs->next; 7866 nskb->next = NULL; 7867 tg3_start_xmit(nskb, tp->dev); 7868 } while (segs); 7869 7870 tg3_tso_bug_end: 7871 dev_kfree_skb_any(skb); 7872 7873 return NETDEV_TX_OK; 7874 } 7875 7876 /* hard_start_xmit for all devices */ 7877 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 7878 { 7879 struct tg3 *tp = netdev_priv(dev); 7880 u32 len, entry, base_flags, mss, vlan = 0; 7881 u32 budget; 7882 int i = -1, would_hit_hwbug; 7883 dma_addr_t mapping; 7884 struct tg3_napi *tnapi; 7885 struct netdev_queue *txq; 7886 unsigned int last; 7887 struct iphdr *iph = NULL; 7888 struct tcphdr *tcph = NULL; 7889 __sum16 tcp_csum = 0, ip_csum = 0; 7890 __be16 ip_tot_len = 0; 7891 7892 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 7893 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 7894 if (tg3_flag(tp, ENABLE_TSS)) 7895 tnapi++; 7896 7897 budget = tg3_tx_avail(tnapi); 7898 7899 /* We are running in BH disabled context with netif_tx_lock 7900 * and TX reclaim runs via tp->napi.poll inside of a software 7901 * interrupt. Furthermore, IRQ processing runs lockless so we have 7902 * no IRQ context deadlocks to worry about either. Rejoice! 7903 */ 7904 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { 7905 if (!netif_tx_queue_stopped(txq)) { 7906 netif_tx_stop_queue(txq); 7907 7908 /* This is a hard error, log it. */ 7909 netdev_err(dev, 7910 "BUG! Tx Ring full when queue awake!\n"); 7911 } 7912 return NETDEV_TX_BUSY; 7913 } 7914 7915 entry = tnapi->tx_prod; 7916 base_flags = 0; 7917 if (skb->ip_summed == CHECKSUM_PARTIAL) 7918 base_flags |= TXD_FLAG_TCPUDP_CSUM; 7919 7920 mss = skb_shinfo(skb)->gso_size; 7921 if (mss) { 7922 u32 tcp_opt_len, hdr_len; 7923 7924 if (skb_cow_head(skb, 0)) 7925 goto drop; 7926 7927 iph = ip_hdr(skb); 7928 tcp_opt_len = tcp_optlen(skb); 7929 7930 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; 7931 7932 if (!skb_is_gso_v6(skb)) { 7933 if (unlikely((ETH_HLEN + hdr_len) > 80) && 7934 tg3_flag(tp, TSO_BUG)) 7935 return tg3_tso_bug(tp, tnapi, txq, skb); 7936 7937 ip_csum = iph->check; 7938 ip_tot_len = iph->tot_len; 7939 iph->check = 0; 7940 iph->tot_len = htons(mss + hdr_len); 7941 } 7942 7943 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 7944 TXD_FLAG_CPU_POST_DMA); 7945 7946 tcph = tcp_hdr(skb); 7947 tcp_csum = tcph->check; 7948 7949 if (tg3_flag(tp, HW_TSO_1) || 7950 tg3_flag(tp, HW_TSO_2) || 7951 tg3_flag(tp, HW_TSO_3)) { 7952 tcph->check = 0; 7953 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 7954 } else { 7955 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 7956 0, IPPROTO_TCP, 0); 7957 } 7958 7959 if (tg3_flag(tp, HW_TSO_3)) { 7960 mss |= (hdr_len & 0xc) << 12; 7961 if (hdr_len & 0x10) 7962 base_flags |= 0x00000010; 7963 base_flags |= (hdr_len & 0x3e0) << 5; 7964 } else if (tg3_flag(tp, HW_TSO_2)) 7965 mss |= hdr_len << 9; 7966 else if (tg3_flag(tp, HW_TSO_1) || 7967 tg3_asic_rev(tp) == ASIC_REV_5705) { 7968 if (tcp_opt_len || iph->ihl > 5) { 7969 int tsflags; 7970 7971 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 7972 mss |= (tsflags << 11); 7973 } 7974 } else { 7975 if (tcp_opt_len || iph->ihl > 5) { 7976 int tsflags; 7977 7978 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 7979 base_flags |= tsflags << 12; 7980 } 7981 } 7982 } 7983 7984 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 7985 !mss && skb->len > VLAN_ETH_FRAME_LEN) 7986 base_flags |= TXD_FLAG_JMB_PKT; 7987 7988 if (vlan_tx_tag_present(skb)) { 7989 base_flags |= TXD_FLAG_VLAN; 7990 vlan = vlan_tx_tag_get(skb); 7991 } 7992 7993 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && 7994 tg3_flag(tp, TX_TSTAMP_EN)) { 7995 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 7996 base_flags |= TXD_FLAG_HWTSTAMP; 7997 } 7998 7999 len = skb_headlen(skb); 8000 8001 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); 8002 if (pci_dma_mapping_error(tp->pdev, mapping)) 8003 goto drop; 8004 8005 8006 tnapi->tx_buffers[entry].skb = skb; 8007 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 8008 8009 would_hit_hwbug = 0; 8010 8011 if (tg3_flag(tp, 5701_DMA_BUG)) 8012 would_hit_hwbug = 1; 8013 8014 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | 8015 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), 8016 mss, vlan)) { 8017 would_hit_hwbug = 1; 8018 } else if (skb_shinfo(skb)->nr_frags > 0) { 8019 u32 tmp_mss = mss; 8020 8021 if (!tg3_flag(tp, HW_TSO_1) && 8022 !tg3_flag(tp, HW_TSO_2) && 8023 !tg3_flag(tp, HW_TSO_3)) 8024 tmp_mss = 0; 8025 8026 /* Now loop through additional data 8027 * fragments, and queue them. 8028 */ 8029 last = skb_shinfo(skb)->nr_frags - 1; 8030 for (i = 0; i <= last; i++) { 8031 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 8032 8033 len = skb_frag_size(frag); 8034 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, 8035 len, DMA_TO_DEVICE); 8036 8037 tnapi->tx_buffers[entry].skb = NULL; 8038 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 8039 mapping); 8040 if (dma_mapping_error(&tp->pdev->dev, mapping)) 8041 goto dma_error; 8042 8043 if (!budget || 8044 tg3_tx_frag_set(tnapi, &entry, &budget, mapping, 8045 len, base_flags | 8046 ((i == last) ? TXD_FLAG_END : 0), 8047 tmp_mss, vlan)) { 8048 would_hit_hwbug = 1; 8049 break; 8050 } 8051 } 8052 } 8053 8054 if (would_hit_hwbug) { 8055 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 8056 8057 if (mss) { 8058 /* If it's a TSO packet, do GSO instead of 8059 * allocating and copying to a large linear SKB 8060 */ 8061 if (ip_tot_len) { 8062 iph->check = ip_csum; 8063 iph->tot_len = ip_tot_len; 8064 } 8065 tcph->check = tcp_csum; 8066 return tg3_tso_bug(tp, tnapi, txq, skb); 8067 } 8068 8069 /* If the workaround fails due to memory/mapping 8070 * failure, silently drop this packet. 8071 */ 8072 entry = tnapi->tx_prod; 8073 budget = tg3_tx_avail(tnapi); 8074 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, 8075 base_flags, mss, vlan)) 8076 goto drop_nofree; 8077 } 8078 8079 skb_tx_timestamp(skb); 8080 netdev_tx_sent_queue(txq, skb->len); 8081 8082 /* Sync BD data before updating mailbox */ 8083 wmb(); 8084 8085 /* Packets are ready, update Tx producer idx local and on card. */ 8086 tw32_tx_mbox(tnapi->prodmbox, entry); 8087 8088 tnapi->tx_prod = entry; 8089 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 8090 netif_tx_stop_queue(txq); 8091 8092 /* netif_tx_stop_queue() must be done before checking 8093 * checking tx index in tg3_tx_avail() below, because in 8094 * tg3_tx(), we update tx index before checking for 8095 * netif_tx_queue_stopped(). 8096 */ 8097 smp_mb(); 8098 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 8099 netif_tx_wake_queue(txq); 8100 } 8101 8102 mmiowb(); 8103 return NETDEV_TX_OK; 8104 8105 dma_error: 8106 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); 8107 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; 8108 drop: 8109 dev_kfree_skb_any(skb); 8110 drop_nofree: 8111 tp->tx_dropped++; 8112 return NETDEV_TX_OK; 8113 } 8114 8115 static void tg3_mac_loopback(struct tg3 *tp, bool enable) 8116 { 8117 if (enable) { 8118 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | 8119 MAC_MODE_PORT_MODE_MASK); 8120 8121 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; 8122 8123 if (!tg3_flag(tp, 5705_PLUS)) 8124 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 8125 8126 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 8127 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 8128 else 8129 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 8130 } else { 8131 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; 8132 8133 if (tg3_flag(tp, 5705_PLUS) || 8134 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || 8135 tg3_asic_rev(tp) == ASIC_REV_5700) 8136 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 8137 } 8138 8139 tw32(MAC_MODE, tp->mac_mode); 8140 udelay(40); 8141 } 8142 8143 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) 8144 { 8145 u32 val, bmcr, mac_mode, ptest = 0; 8146 8147 tg3_phy_toggle_apd(tp, false); 8148 tg3_phy_toggle_automdix(tp, false); 8149 8150 if (extlpbk && tg3_phy_set_extloopbk(tp)) 8151 return -EIO; 8152 8153 bmcr = BMCR_FULLDPLX; 8154 switch (speed) { 8155 case SPEED_10: 8156 break; 8157 case SPEED_100: 8158 bmcr |= BMCR_SPEED100; 8159 break; 8160 case SPEED_1000: 8161 default: 8162 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 8163 speed = SPEED_100; 8164 bmcr |= BMCR_SPEED100; 8165 } else { 8166 speed = SPEED_1000; 8167 bmcr |= BMCR_SPEED1000; 8168 } 8169 } 8170 8171 if (extlpbk) { 8172 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 8173 tg3_readphy(tp, MII_CTRL1000, &val); 8174 val |= CTL1000_AS_MASTER | 8175 CTL1000_ENABLE_MASTER; 8176 tg3_writephy(tp, MII_CTRL1000, val); 8177 } else { 8178 ptest = MII_TG3_FET_PTEST_TRIM_SEL | 8179 MII_TG3_FET_PTEST_TRIM_2; 8180 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); 8181 } 8182 } else 8183 bmcr |= BMCR_LOOPBACK; 8184 8185 tg3_writephy(tp, MII_BMCR, bmcr); 8186 8187 /* The write needs to be flushed for the FETs */ 8188 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 8189 tg3_readphy(tp, MII_BMCR, &bmcr); 8190 8191 udelay(40); 8192 8193 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 8194 tg3_asic_rev(tp) == ASIC_REV_5785) { 8195 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | 8196 MII_TG3_FET_PTEST_FRC_TX_LINK | 8197 MII_TG3_FET_PTEST_FRC_TX_LOCK); 8198 8199 /* The write needs to be flushed for the AC131 */ 8200 tg3_readphy(tp, MII_TG3_FET_PTEST, &val); 8201 } 8202 8203 /* Reset to prevent losing 1st rx packet intermittently */ 8204 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 8205 tg3_flag(tp, 5780_CLASS)) { 8206 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 8207 udelay(10); 8208 tw32_f(MAC_RX_MODE, tp->rx_mode); 8209 } 8210 8211 mac_mode = tp->mac_mode & 8212 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 8213 if (speed == SPEED_1000) 8214 mac_mode |= MAC_MODE_PORT_MODE_GMII; 8215 else 8216 mac_mode |= MAC_MODE_PORT_MODE_MII; 8217 8218 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 8219 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; 8220 8221 if (masked_phy_id == TG3_PHY_ID_BCM5401) 8222 mac_mode &= ~MAC_MODE_LINK_POLARITY; 8223 else if (masked_phy_id == TG3_PHY_ID_BCM5411) 8224 mac_mode |= MAC_MODE_LINK_POLARITY; 8225 8226 tg3_writephy(tp, MII_TG3_EXT_CTRL, 8227 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 8228 } 8229 8230 tw32(MAC_MODE, mac_mode); 8231 udelay(40); 8232 8233 return 0; 8234 } 8235 8236 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) 8237 { 8238 struct tg3 *tp = netdev_priv(dev); 8239 8240 if (features & NETIF_F_LOOPBACK) { 8241 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) 8242 return; 8243 8244 spin_lock_bh(&tp->lock); 8245 tg3_mac_loopback(tp, true); 8246 netif_carrier_on(tp->dev); 8247 spin_unlock_bh(&tp->lock); 8248 netdev_info(dev, "Internal MAC loopback mode enabled.\n"); 8249 } else { 8250 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 8251 return; 8252 8253 spin_lock_bh(&tp->lock); 8254 tg3_mac_loopback(tp, false); 8255 /* Force link status check */ 8256 tg3_setup_phy(tp, true); 8257 spin_unlock_bh(&tp->lock); 8258 netdev_info(dev, "Internal MAC loopback mode disabled.\n"); 8259 } 8260 } 8261 8262 static netdev_features_t tg3_fix_features(struct net_device *dev, 8263 netdev_features_t features) 8264 { 8265 struct tg3 *tp = netdev_priv(dev); 8266 8267 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) 8268 features &= ~NETIF_F_ALL_TSO; 8269 8270 return features; 8271 } 8272 8273 static int tg3_set_features(struct net_device *dev, netdev_features_t features) 8274 { 8275 netdev_features_t changed = dev->features ^ features; 8276 8277 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) 8278 tg3_set_loopback(dev, features); 8279 8280 return 0; 8281 } 8282 8283 static void tg3_rx_prodring_free(struct tg3 *tp, 8284 struct tg3_rx_prodring_set *tpr) 8285 { 8286 int i; 8287 8288 if (tpr != &tp->napi[0].prodring) { 8289 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; 8290 i = (i + 1) & tp->rx_std_ring_mask) 8291 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8292 tp->rx_pkt_map_sz); 8293 8294 if (tg3_flag(tp, JUMBO_CAPABLE)) { 8295 for (i = tpr->rx_jmb_cons_idx; 8296 i != tpr->rx_jmb_prod_idx; 8297 i = (i + 1) & tp->rx_jmb_ring_mask) { 8298 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8299 TG3_RX_JMB_MAP_SZ); 8300 } 8301 } 8302 8303 return; 8304 } 8305 8306 for (i = 0; i <= tp->rx_std_ring_mask; i++) 8307 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8308 tp->rx_pkt_map_sz); 8309 8310 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8311 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) 8312 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8313 TG3_RX_JMB_MAP_SZ); 8314 } 8315 } 8316 8317 /* Initialize rx rings for packet processing. 8318 * 8319 * The chip has been shut down and the driver detached from 8320 * the networking, so no interrupts or new tx packets will 8321 * end up in the driver. tp->{tx,}lock are held and thus 8322 * we may not sleep. 8323 */ 8324 static int tg3_rx_prodring_alloc(struct tg3 *tp, 8325 struct tg3_rx_prodring_set *tpr) 8326 { 8327 u32 i, rx_pkt_dma_sz; 8328 8329 tpr->rx_std_cons_idx = 0; 8330 tpr->rx_std_prod_idx = 0; 8331 tpr->rx_jmb_cons_idx = 0; 8332 tpr->rx_jmb_prod_idx = 0; 8333 8334 if (tpr != &tp->napi[0].prodring) { 8335 memset(&tpr->rx_std_buffers[0], 0, 8336 TG3_RX_STD_BUFF_RING_SIZE(tp)); 8337 if (tpr->rx_jmb_buffers) 8338 memset(&tpr->rx_jmb_buffers[0], 0, 8339 TG3_RX_JMB_BUFF_RING_SIZE(tp)); 8340 goto done; 8341 } 8342 8343 /* Zero out all descriptors. */ 8344 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); 8345 8346 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; 8347 if (tg3_flag(tp, 5780_CLASS) && 8348 tp->dev->mtu > ETH_DATA_LEN) 8349 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; 8350 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); 8351 8352 /* Initialize invariants of the rings, we only set this 8353 * stuff once. This works because the card does not 8354 * write into the rx buffer posting rings. 8355 */ 8356 for (i = 0; i <= tp->rx_std_ring_mask; i++) { 8357 struct tg3_rx_buffer_desc *rxd; 8358 8359 rxd = &tpr->rx_std[i]; 8360 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; 8361 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); 8362 rxd->opaque = (RXD_OPAQUE_RING_STD | 8363 (i << RXD_OPAQUE_INDEX_SHIFT)); 8364 } 8365 8366 /* Now allocate fresh SKBs for each rx ring. */ 8367 for (i = 0; i < tp->rx_pending; i++) { 8368 unsigned int frag_size; 8369 8370 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i, 8371 &frag_size) < 0) { 8372 netdev_warn(tp->dev, 8373 "Using a smaller RX standard ring. Only " 8374 "%d out of %d buffers were allocated " 8375 "successfully\n", i, tp->rx_pending); 8376 if (i == 0) 8377 goto initfail; 8378 tp->rx_pending = i; 8379 break; 8380 } 8381 } 8382 8383 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 8384 goto done; 8385 8386 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); 8387 8388 if (!tg3_flag(tp, JUMBO_RING_ENABLE)) 8389 goto done; 8390 8391 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { 8392 struct tg3_rx_buffer_desc *rxd; 8393 8394 rxd = &tpr->rx_jmb[i].std; 8395 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; 8396 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | 8397 RXD_FLAG_JUMBO; 8398 rxd->opaque = (RXD_OPAQUE_RING_JUMBO | 8399 (i << RXD_OPAQUE_INDEX_SHIFT)); 8400 } 8401 8402 for (i = 0; i < tp->rx_jumbo_pending; i++) { 8403 unsigned int frag_size; 8404 8405 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i, 8406 &frag_size) < 0) { 8407 netdev_warn(tp->dev, 8408 "Using a smaller RX jumbo ring. Only %d " 8409 "out of %d buffers were allocated " 8410 "successfully\n", i, tp->rx_jumbo_pending); 8411 if (i == 0) 8412 goto initfail; 8413 tp->rx_jumbo_pending = i; 8414 break; 8415 } 8416 } 8417 8418 done: 8419 return 0; 8420 8421 initfail: 8422 tg3_rx_prodring_free(tp, tpr); 8423 return -ENOMEM; 8424 } 8425 8426 static void tg3_rx_prodring_fini(struct tg3 *tp, 8427 struct tg3_rx_prodring_set *tpr) 8428 { 8429 kfree(tpr->rx_std_buffers); 8430 tpr->rx_std_buffers = NULL; 8431 kfree(tpr->rx_jmb_buffers); 8432 tpr->rx_jmb_buffers = NULL; 8433 if (tpr->rx_std) { 8434 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), 8435 tpr->rx_std, tpr->rx_std_mapping); 8436 tpr->rx_std = NULL; 8437 } 8438 if (tpr->rx_jmb) { 8439 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), 8440 tpr->rx_jmb, tpr->rx_jmb_mapping); 8441 tpr->rx_jmb = NULL; 8442 } 8443 } 8444 8445 static int tg3_rx_prodring_init(struct tg3 *tp, 8446 struct tg3_rx_prodring_set *tpr) 8447 { 8448 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), 8449 GFP_KERNEL); 8450 if (!tpr->rx_std_buffers) 8451 return -ENOMEM; 8452 8453 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, 8454 TG3_RX_STD_RING_BYTES(tp), 8455 &tpr->rx_std_mapping, 8456 GFP_KERNEL); 8457 if (!tpr->rx_std) 8458 goto err_out; 8459 8460 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8461 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), 8462 GFP_KERNEL); 8463 if (!tpr->rx_jmb_buffers) 8464 goto err_out; 8465 8466 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, 8467 TG3_RX_JMB_RING_BYTES(tp), 8468 &tpr->rx_jmb_mapping, 8469 GFP_KERNEL); 8470 if (!tpr->rx_jmb) 8471 goto err_out; 8472 } 8473 8474 return 0; 8475 8476 err_out: 8477 tg3_rx_prodring_fini(tp, tpr); 8478 return -ENOMEM; 8479 } 8480 8481 /* Free up pending packets in all rx/tx rings. 8482 * 8483 * The chip has been shut down and the driver detached from 8484 * the networking, so no interrupts or new tx packets will 8485 * end up in the driver. tp->{tx,}lock is not held and we are not 8486 * in an interrupt context and thus may sleep. 8487 */ 8488 static void tg3_free_rings(struct tg3 *tp) 8489 { 8490 int i, j; 8491 8492 for (j = 0; j < tp->irq_cnt; j++) { 8493 struct tg3_napi *tnapi = &tp->napi[j]; 8494 8495 tg3_rx_prodring_free(tp, &tnapi->prodring); 8496 8497 if (!tnapi->tx_buffers) 8498 continue; 8499 8500 for (i = 0; i < TG3_TX_RING_SIZE; i++) { 8501 struct sk_buff *skb = tnapi->tx_buffers[i].skb; 8502 8503 if (!skb) 8504 continue; 8505 8506 tg3_tx_skb_unmap(tnapi, i, 8507 skb_shinfo(skb)->nr_frags - 1); 8508 8509 dev_kfree_skb_any(skb); 8510 } 8511 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j)); 8512 } 8513 } 8514 8515 /* Initialize tx/rx rings for packet processing. 8516 * 8517 * The chip has been shut down and the driver detached from 8518 * the networking, so no interrupts or new tx packets will 8519 * end up in the driver. tp->{tx,}lock are held and thus 8520 * we may not sleep. 8521 */ 8522 static int tg3_init_rings(struct tg3 *tp) 8523 { 8524 int i; 8525 8526 /* Free up all the SKBs. */ 8527 tg3_free_rings(tp); 8528 8529 for (i = 0; i < tp->irq_cnt; i++) { 8530 struct tg3_napi *tnapi = &tp->napi[i]; 8531 8532 tnapi->last_tag = 0; 8533 tnapi->last_irq_tag = 0; 8534 tnapi->hw_status->status = 0; 8535 tnapi->hw_status->status_tag = 0; 8536 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8537 8538 tnapi->tx_prod = 0; 8539 tnapi->tx_cons = 0; 8540 if (tnapi->tx_ring) 8541 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); 8542 8543 tnapi->rx_rcb_ptr = 0; 8544 if (tnapi->rx_rcb) 8545 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 8546 8547 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { 8548 tg3_free_rings(tp); 8549 return -ENOMEM; 8550 } 8551 } 8552 8553 return 0; 8554 } 8555 8556 static void tg3_mem_tx_release(struct tg3 *tp) 8557 { 8558 int i; 8559 8560 for (i = 0; i < tp->irq_max; i++) { 8561 struct tg3_napi *tnapi = &tp->napi[i]; 8562 8563 if (tnapi->tx_ring) { 8564 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, 8565 tnapi->tx_ring, tnapi->tx_desc_mapping); 8566 tnapi->tx_ring = NULL; 8567 } 8568 8569 kfree(tnapi->tx_buffers); 8570 tnapi->tx_buffers = NULL; 8571 } 8572 } 8573 8574 static int tg3_mem_tx_acquire(struct tg3 *tp) 8575 { 8576 int i; 8577 struct tg3_napi *tnapi = &tp->napi[0]; 8578 8579 /* If multivector TSS is enabled, vector 0 does not handle 8580 * tx interrupts. Don't allocate any resources for it. 8581 */ 8582 if (tg3_flag(tp, ENABLE_TSS)) 8583 tnapi++; 8584 8585 for (i = 0; i < tp->txq_cnt; i++, tnapi++) { 8586 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) * 8587 TG3_TX_RING_SIZE, GFP_KERNEL); 8588 if (!tnapi->tx_buffers) 8589 goto err_out; 8590 8591 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, 8592 TG3_TX_RING_BYTES, 8593 &tnapi->tx_desc_mapping, 8594 GFP_KERNEL); 8595 if (!tnapi->tx_ring) 8596 goto err_out; 8597 } 8598 8599 return 0; 8600 8601 err_out: 8602 tg3_mem_tx_release(tp); 8603 return -ENOMEM; 8604 } 8605 8606 static void tg3_mem_rx_release(struct tg3 *tp) 8607 { 8608 int i; 8609 8610 for (i = 0; i < tp->irq_max; i++) { 8611 struct tg3_napi *tnapi = &tp->napi[i]; 8612 8613 tg3_rx_prodring_fini(tp, &tnapi->prodring); 8614 8615 if (!tnapi->rx_rcb) 8616 continue; 8617 8618 dma_free_coherent(&tp->pdev->dev, 8619 TG3_RX_RCB_RING_BYTES(tp), 8620 tnapi->rx_rcb, 8621 tnapi->rx_rcb_mapping); 8622 tnapi->rx_rcb = NULL; 8623 } 8624 } 8625 8626 static int tg3_mem_rx_acquire(struct tg3 *tp) 8627 { 8628 unsigned int i, limit; 8629 8630 limit = tp->rxq_cnt; 8631 8632 /* If RSS is enabled, we need a (dummy) producer ring 8633 * set on vector zero. This is the true hw prodring. 8634 */ 8635 if (tg3_flag(tp, ENABLE_RSS)) 8636 limit++; 8637 8638 for (i = 0; i < limit; i++) { 8639 struct tg3_napi *tnapi = &tp->napi[i]; 8640 8641 if (tg3_rx_prodring_init(tp, &tnapi->prodring)) 8642 goto err_out; 8643 8644 /* If multivector RSS is enabled, vector 0 8645 * does not handle rx or tx interrupts. 8646 * Don't allocate any resources for it. 8647 */ 8648 if (!i && tg3_flag(tp, ENABLE_RSS)) 8649 continue; 8650 8651 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev, 8652 TG3_RX_RCB_RING_BYTES(tp), 8653 &tnapi->rx_rcb_mapping, 8654 GFP_KERNEL); 8655 if (!tnapi->rx_rcb) 8656 goto err_out; 8657 } 8658 8659 return 0; 8660 8661 err_out: 8662 tg3_mem_rx_release(tp); 8663 return -ENOMEM; 8664 } 8665 8666 /* 8667 * Must not be invoked with interrupt sources disabled and 8668 * the hardware shutdown down. 8669 */ 8670 static void tg3_free_consistent(struct tg3 *tp) 8671 { 8672 int i; 8673 8674 for (i = 0; i < tp->irq_cnt; i++) { 8675 struct tg3_napi *tnapi = &tp->napi[i]; 8676 8677 if (tnapi->hw_status) { 8678 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, 8679 tnapi->hw_status, 8680 tnapi->status_mapping); 8681 tnapi->hw_status = NULL; 8682 } 8683 } 8684 8685 tg3_mem_rx_release(tp); 8686 tg3_mem_tx_release(tp); 8687 8688 if (tp->hw_stats) { 8689 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), 8690 tp->hw_stats, tp->stats_mapping); 8691 tp->hw_stats = NULL; 8692 } 8693 } 8694 8695 /* 8696 * Must not be invoked with interrupt sources disabled and 8697 * the hardware shutdown down. Can sleep. 8698 */ 8699 static int tg3_alloc_consistent(struct tg3 *tp) 8700 { 8701 int i; 8702 8703 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev, 8704 sizeof(struct tg3_hw_stats), 8705 &tp->stats_mapping, GFP_KERNEL); 8706 if (!tp->hw_stats) 8707 goto err_out; 8708 8709 for (i = 0; i < tp->irq_cnt; i++) { 8710 struct tg3_napi *tnapi = &tp->napi[i]; 8711 struct tg3_hw_status *sblk; 8712 8713 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev, 8714 TG3_HW_STATUS_SIZE, 8715 &tnapi->status_mapping, 8716 GFP_KERNEL); 8717 if (!tnapi->hw_status) 8718 goto err_out; 8719 8720 sblk = tnapi->hw_status; 8721 8722 if (tg3_flag(tp, ENABLE_RSS)) { 8723 u16 *prodptr = NULL; 8724 8725 /* 8726 * When RSS is enabled, the status block format changes 8727 * slightly. The "rx_jumbo_consumer", "reserved", 8728 * and "rx_mini_consumer" members get mapped to the 8729 * other three rx return ring producer indexes. 8730 */ 8731 switch (i) { 8732 case 1: 8733 prodptr = &sblk->idx[0].rx_producer; 8734 break; 8735 case 2: 8736 prodptr = &sblk->rx_jumbo_consumer; 8737 break; 8738 case 3: 8739 prodptr = &sblk->reserved; 8740 break; 8741 case 4: 8742 prodptr = &sblk->rx_mini_consumer; 8743 break; 8744 } 8745 tnapi->rx_rcb_prod_idx = prodptr; 8746 } else { 8747 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; 8748 } 8749 } 8750 8751 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp)) 8752 goto err_out; 8753 8754 return 0; 8755 8756 err_out: 8757 tg3_free_consistent(tp); 8758 return -ENOMEM; 8759 } 8760 8761 #define MAX_WAIT_CNT 1000 8762 8763 /* To stop a block, clear the enable bit and poll till it 8764 * clears. tp->lock is held. 8765 */ 8766 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent) 8767 { 8768 unsigned int i; 8769 u32 val; 8770 8771 if (tg3_flag(tp, 5705_PLUS)) { 8772 switch (ofs) { 8773 case RCVLSC_MODE: 8774 case DMAC_MODE: 8775 case MBFREE_MODE: 8776 case BUFMGR_MODE: 8777 case MEMARB_MODE: 8778 /* We can't enable/disable these bits of the 8779 * 5705/5750, just say success. 8780 */ 8781 return 0; 8782 8783 default: 8784 break; 8785 } 8786 } 8787 8788 val = tr32(ofs); 8789 val &= ~enable_bit; 8790 tw32_f(ofs, val); 8791 8792 for (i = 0; i < MAX_WAIT_CNT; i++) { 8793 if (pci_channel_offline(tp->pdev)) { 8794 dev_err(&tp->pdev->dev, 8795 "tg3_stop_block device offline, " 8796 "ofs=%lx enable_bit=%x\n", 8797 ofs, enable_bit); 8798 return -ENODEV; 8799 } 8800 8801 udelay(100); 8802 val = tr32(ofs); 8803 if ((val & enable_bit) == 0) 8804 break; 8805 } 8806 8807 if (i == MAX_WAIT_CNT && !silent) { 8808 dev_err(&tp->pdev->dev, 8809 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", 8810 ofs, enable_bit); 8811 return -ENODEV; 8812 } 8813 8814 return 0; 8815 } 8816 8817 /* tp->lock is held. */ 8818 static int tg3_abort_hw(struct tg3 *tp, bool silent) 8819 { 8820 int i, err; 8821 8822 tg3_disable_ints(tp); 8823 8824 if (pci_channel_offline(tp->pdev)) { 8825 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE); 8826 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8827 err = -ENODEV; 8828 goto err_no_dev; 8829 } 8830 8831 tp->rx_mode &= ~RX_MODE_ENABLE; 8832 tw32_f(MAC_RX_MODE, tp->rx_mode); 8833 udelay(10); 8834 8835 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); 8836 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); 8837 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); 8838 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); 8839 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); 8840 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); 8841 8842 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); 8843 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); 8844 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); 8845 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); 8846 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); 8847 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); 8848 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); 8849 8850 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8851 tw32_f(MAC_MODE, tp->mac_mode); 8852 udelay(40); 8853 8854 tp->tx_mode &= ~TX_MODE_ENABLE; 8855 tw32_f(MAC_TX_MODE, tp->tx_mode); 8856 8857 for (i = 0; i < MAX_WAIT_CNT; i++) { 8858 udelay(100); 8859 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) 8860 break; 8861 } 8862 if (i >= MAX_WAIT_CNT) { 8863 dev_err(&tp->pdev->dev, 8864 "%s timed out, TX_MODE_ENABLE will not clear " 8865 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); 8866 err |= -ENODEV; 8867 } 8868 8869 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); 8870 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); 8871 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); 8872 8873 tw32(FTQ_RESET, 0xffffffff); 8874 tw32(FTQ_RESET, 0x00000000); 8875 8876 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); 8877 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); 8878 8879 err_no_dev: 8880 for (i = 0; i < tp->irq_cnt; i++) { 8881 struct tg3_napi *tnapi = &tp->napi[i]; 8882 if (tnapi->hw_status) 8883 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8884 } 8885 8886 return err; 8887 } 8888 8889 /* Save PCI command register before chip reset */ 8890 static void tg3_save_pci_state(struct tg3 *tp) 8891 { 8892 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); 8893 } 8894 8895 /* Restore PCI state after chip reset */ 8896 static void tg3_restore_pci_state(struct tg3 *tp) 8897 { 8898 u32 val; 8899 8900 /* Re-enable indirect register accesses. */ 8901 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 8902 tp->misc_host_ctrl); 8903 8904 /* Set MAX PCI retry to zero. */ 8905 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); 8906 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 8907 tg3_flag(tp, PCIX_MODE)) 8908 val |= PCISTATE_RETRY_SAME_DMA; 8909 /* Allow reads and writes to the APE register and memory space. */ 8910 if (tg3_flag(tp, ENABLE_APE)) 8911 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 8912 PCISTATE_ALLOW_APE_SHMEM_WR | 8913 PCISTATE_ALLOW_APE_PSPACE_WR; 8914 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); 8915 8916 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); 8917 8918 if (!tg3_flag(tp, PCI_EXPRESS)) { 8919 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 8920 tp->pci_cacheline_sz); 8921 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 8922 tp->pci_lat_timer); 8923 } 8924 8925 /* Make sure PCI-X relaxed ordering bit is clear. */ 8926 if (tg3_flag(tp, PCIX_MODE)) { 8927 u16 pcix_cmd; 8928 8929 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 8930 &pcix_cmd); 8931 pcix_cmd &= ~PCI_X_CMD_ERO; 8932 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 8933 pcix_cmd); 8934 } 8935 8936 if (tg3_flag(tp, 5780_CLASS)) { 8937 8938 /* Chip reset on 5780 will reset MSI enable bit, 8939 * so need to restore it. 8940 */ 8941 if (tg3_flag(tp, USING_MSI)) { 8942 u16 ctrl; 8943 8944 pci_read_config_word(tp->pdev, 8945 tp->msi_cap + PCI_MSI_FLAGS, 8946 &ctrl); 8947 pci_write_config_word(tp->pdev, 8948 tp->msi_cap + PCI_MSI_FLAGS, 8949 ctrl | PCI_MSI_FLAGS_ENABLE); 8950 val = tr32(MSGINT_MODE); 8951 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); 8952 } 8953 } 8954 } 8955 8956 static void tg3_override_clk(struct tg3 *tp) 8957 { 8958 u32 val; 8959 8960 switch (tg3_asic_rev(tp)) { 8961 case ASIC_REV_5717: 8962 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 8963 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 8964 TG3_CPMU_MAC_ORIDE_ENABLE); 8965 break; 8966 8967 case ASIC_REV_5719: 8968 case ASIC_REV_5720: 8969 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 8970 break; 8971 8972 default: 8973 return; 8974 } 8975 } 8976 8977 static void tg3_restore_clk(struct tg3 *tp) 8978 { 8979 u32 val; 8980 8981 switch (tg3_asic_rev(tp)) { 8982 case ASIC_REV_5717: 8983 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 8984 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, 8985 val & ~TG3_CPMU_MAC_ORIDE_ENABLE); 8986 break; 8987 8988 case ASIC_REV_5719: 8989 case ASIC_REV_5720: 8990 val = tr32(TG3_CPMU_CLCK_ORIDE); 8991 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 8992 break; 8993 8994 default: 8995 return; 8996 } 8997 } 8998 8999 /* tp->lock is held. */ 9000 static int tg3_chip_reset(struct tg3 *tp) 9001 { 9002 u32 val; 9003 void (*write_op)(struct tg3 *, u32, u32); 9004 int i, err; 9005 9006 if (!pci_device_is_present(tp->pdev)) 9007 return -ENODEV; 9008 9009 tg3_nvram_lock(tp); 9010 9011 tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 9012 9013 /* No matching tg3_nvram_unlock() after this because 9014 * chip reset below will undo the nvram lock. 9015 */ 9016 tp->nvram_lock_cnt = 0; 9017 9018 /* GRC_MISC_CFG core clock reset will clear the memory 9019 * enable bit in PCI register 4 and the MSI enable bit 9020 * on some chips, so we save relevant registers here. 9021 */ 9022 tg3_save_pci_state(tp); 9023 9024 if (tg3_asic_rev(tp) == ASIC_REV_5752 || 9025 tg3_flag(tp, 5755_PLUS)) 9026 tw32(GRC_FASTBOOT_PC, 0); 9027 9028 /* 9029 * We must avoid the readl() that normally takes place. 9030 * It locks machines, causes machine checks, and other 9031 * fun things. So, temporarily disable the 5701 9032 * hardware workaround, while we do the reset. 9033 */ 9034 write_op = tp->write32; 9035 if (write_op == tg3_write_flush_reg32) 9036 tp->write32 = tg3_write32; 9037 9038 /* Prevent the irq handler from reading or writing PCI registers 9039 * during chip reset when the memory enable bit in the PCI command 9040 * register may be cleared. The chip does not generate interrupt 9041 * at this time, but the irq handler may still be called due to irq 9042 * sharing or irqpoll. 9043 */ 9044 tg3_flag_set(tp, CHIP_RESETTING); 9045 for (i = 0; i < tp->irq_cnt; i++) { 9046 struct tg3_napi *tnapi = &tp->napi[i]; 9047 if (tnapi->hw_status) { 9048 tnapi->hw_status->status = 0; 9049 tnapi->hw_status->status_tag = 0; 9050 } 9051 tnapi->last_tag = 0; 9052 tnapi->last_irq_tag = 0; 9053 } 9054 smp_mb(); 9055 9056 for (i = 0; i < tp->irq_cnt; i++) 9057 synchronize_irq(tp->napi[i].irq_vec); 9058 9059 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9060 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9061 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9062 } 9063 9064 /* do the reset */ 9065 val = GRC_MISC_CFG_CORECLK_RESET; 9066 9067 if (tg3_flag(tp, PCI_EXPRESS)) { 9068 /* Force PCIe 1.0a mode */ 9069 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 9070 !tg3_flag(tp, 57765_PLUS) && 9071 tr32(TG3_PCIE_PHY_TSTCTL) == 9072 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) 9073 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); 9074 9075 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) { 9076 tw32(GRC_MISC_CFG, (1 << 29)); 9077 val |= (1 << 29); 9078 } 9079 } 9080 9081 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 9082 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); 9083 tw32(GRC_VCPU_EXT_CTRL, 9084 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); 9085 } 9086 9087 /* Set the clock to the highest frequency to avoid timeouts. With link 9088 * aware mode, the clock speed could be slow and bootcode does not 9089 * complete within the expected time. Override the clock to allow the 9090 * bootcode to finish sooner and then restore it. 9091 */ 9092 tg3_override_clk(tp); 9093 9094 /* Manage gphy power for all CPMU absent PCIe devices. */ 9095 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) 9096 val |= GRC_MISC_CFG_KEEP_GPHY_POWER; 9097 9098 tw32(GRC_MISC_CFG, val); 9099 9100 /* restore 5701 hardware bug workaround write method */ 9101 tp->write32 = write_op; 9102 9103 /* Unfortunately, we have to delay before the PCI read back. 9104 * Some 575X chips even will not respond to a PCI cfg access 9105 * when the reset command is given to the chip. 9106 * 9107 * How do these hardware designers expect things to work 9108 * properly if the PCI write is posted for a long period 9109 * of time? It is always necessary to have some method by 9110 * which a register read back can occur to push the write 9111 * out which does the reset. 9112 * 9113 * For most tg3 variants the trick below was working. 9114 * Ho hum... 9115 */ 9116 udelay(120); 9117 9118 /* Flush PCI posted writes. The normal MMIO registers 9119 * are inaccessible at this time so this is the only 9120 * way to make this reliably (actually, this is no longer 9121 * the case, see above). I tried to use indirect 9122 * register read/write but this upset some 5701 variants. 9123 */ 9124 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); 9125 9126 udelay(120); 9127 9128 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) { 9129 u16 val16; 9130 9131 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) { 9132 int j; 9133 u32 cfg_val; 9134 9135 /* Wait for link training to complete. */ 9136 for (j = 0; j < 5000; j++) 9137 udelay(100); 9138 9139 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); 9140 pci_write_config_dword(tp->pdev, 0xc4, 9141 cfg_val | (1 << 15)); 9142 } 9143 9144 /* Clear the "no snoop" and "relaxed ordering" bits. */ 9145 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN; 9146 /* 9147 * Older PCIe devices only support the 128 byte 9148 * MPS setting. Enforce the restriction. 9149 */ 9150 if (!tg3_flag(tp, CPMU_PRESENT)) 9151 val16 |= PCI_EXP_DEVCTL_PAYLOAD; 9152 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16); 9153 9154 /* Clear error status */ 9155 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA, 9156 PCI_EXP_DEVSTA_CED | 9157 PCI_EXP_DEVSTA_NFED | 9158 PCI_EXP_DEVSTA_FED | 9159 PCI_EXP_DEVSTA_URD); 9160 } 9161 9162 tg3_restore_pci_state(tp); 9163 9164 tg3_flag_clear(tp, CHIP_RESETTING); 9165 tg3_flag_clear(tp, ERROR_PROCESSED); 9166 9167 val = 0; 9168 if (tg3_flag(tp, 5780_CLASS)) 9169 val = tr32(MEMARB_MODE); 9170 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 9171 9172 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) { 9173 tg3_stop_fw(tp); 9174 tw32(0x5000, 0x400); 9175 } 9176 9177 if (tg3_flag(tp, IS_SSB_CORE)) { 9178 /* 9179 * BCM4785: In order to avoid repercussions from using 9180 * potentially defective internal ROM, stop the Rx RISC CPU, 9181 * which is not required. 9182 */ 9183 tg3_stop_fw(tp); 9184 tg3_halt_cpu(tp, RX_CPU_BASE); 9185 } 9186 9187 err = tg3_poll_fw(tp); 9188 if (err) 9189 return err; 9190 9191 tw32(GRC_MODE, tp->grc_mode); 9192 9193 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) { 9194 val = tr32(0xc4); 9195 9196 tw32(0xc4, val | (1 << 15)); 9197 } 9198 9199 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && 9200 tg3_asic_rev(tp) == ASIC_REV_5705) { 9201 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; 9202 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) 9203 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; 9204 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 9205 } 9206 9207 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 9208 tp->mac_mode = MAC_MODE_PORT_MODE_TBI; 9209 val = tp->mac_mode; 9210 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 9211 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 9212 val = tp->mac_mode; 9213 } else 9214 val = 0; 9215 9216 tw32_f(MAC_MODE, val); 9217 udelay(40); 9218 9219 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); 9220 9221 tg3_mdio_start(tp); 9222 9223 if (tg3_flag(tp, PCI_EXPRESS) && 9224 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 9225 tg3_asic_rev(tp) != ASIC_REV_5785 && 9226 !tg3_flag(tp, 57765_PLUS)) { 9227 val = tr32(0x7c00); 9228 9229 tw32(0x7c00, val | (1 << 25)); 9230 } 9231 9232 tg3_restore_clk(tp); 9233 9234 /* Reprobe ASF enable state. */ 9235 tg3_flag_clear(tp, ENABLE_ASF); 9236 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 9237 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 9238 9239 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); 9240 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 9241 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 9242 u32 nic_cfg; 9243 9244 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 9245 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 9246 tg3_flag_set(tp, ENABLE_ASF); 9247 tp->last_event_jiffies = jiffies; 9248 if (tg3_flag(tp, 5750_PLUS)) 9249 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 9250 9251 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg); 9252 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK) 9253 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 9254 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID) 9255 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 9256 } 9257 } 9258 9259 return 0; 9260 } 9261 9262 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *); 9263 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *); 9264 static void __tg3_set_rx_mode(struct net_device *); 9265 9266 /* tp->lock is held. */ 9267 static int tg3_halt(struct tg3 *tp, int kind, bool silent) 9268 { 9269 int err; 9270 9271 tg3_stop_fw(tp); 9272 9273 tg3_write_sig_pre_reset(tp, kind); 9274 9275 tg3_abort_hw(tp, silent); 9276 err = tg3_chip_reset(tp); 9277 9278 __tg3_set_mac_addr(tp, false); 9279 9280 tg3_write_sig_legacy(tp, kind); 9281 tg3_write_sig_post_reset(tp, kind); 9282 9283 if (tp->hw_stats) { 9284 /* Save the stats across chip resets... */ 9285 tg3_get_nstats(tp, &tp->net_stats_prev); 9286 tg3_get_estats(tp, &tp->estats_prev); 9287 9288 /* And make sure the next sample is new data */ 9289 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); 9290 } 9291 9292 return err; 9293 } 9294 9295 static int tg3_set_mac_addr(struct net_device *dev, void *p) 9296 { 9297 struct tg3 *tp = netdev_priv(dev); 9298 struct sockaddr *addr = p; 9299 int err = 0; 9300 bool skip_mac_1 = false; 9301 9302 if (!is_valid_ether_addr(addr->sa_data)) 9303 return -EADDRNOTAVAIL; 9304 9305 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 9306 9307 if (!netif_running(dev)) 9308 return 0; 9309 9310 if (tg3_flag(tp, ENABLE_ASF)) { 9311 u32 addr0_high, addr0_low, addr1_high, addr1_low; 9312 9313 addr0_high = tr32(MAC_ADDR_0_HIGH); 9314 addr0_low = tr32(MAC_ADDR_0_LOW); 9315 addr1_high = tr32(MAC_ADDR_1_HIGH); 9316 addr1_low = tr32(MAC_ADDR_1_LOW); 9317 9318 /* Skip MAC addr 1 if ASF is using it. */ 9319 if ((addr0_high != addr1_high || addr0_low != addr1_low) && 9320 !(addr1_high == 0 && addr1_low == 0)) 9321 skip_mac_1 = true; 9322 } 9323 spin_lock_bh(&tp->lock); 9324 __tg3_set_mac_addr(tp, skip_mac_1); 9325 __tg3_set_rx_mode(dev); 9326 spin_unlock_bh(&tp->lock); 9327 9328 return err; 9329 } 9330 9331 /* tp->lock is held. */ 9332 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, 9333 dma_addr_t mapping, u32 maxlen_flags, 9334 u32 nic_addr) 9335 { 9336 tg3_write_mem(tp, 9337 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), 9338 ((u64) mapping >> 32)); 9339 tg3_write_mem(tp, 9340 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), 9341 ((u64) mapping & 0xffffffff)); 9342 tg3_write_mem(tp, 9343 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), 9344 maxlen_flags); 9345 9346 if (!tg3_flag(tp, 5705_PLUS)) 9347 tg3_write_mem(tp, 9348 (bdinfo_addr + TG3_BDINFO_NIC_ADDR), 9349 nic_addr); 9350 } 9351 9352 9353 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9354 { 9355 int i = 0; 9356 9357 if (!tg3_flag(tp, ENABLE_TSS)) { 9358 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); 9359 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); 9360 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); 9361 } else { 9362 tw32(HOSTCC_TXCOL_TICKS, 0); 9363 tw32(HOSTCC_TXMAX_FRAMES, 0); 9364 tw32(HOSTCC_TXCOAL_MAXF_INT, 0); 9365 9366 for (; i < tp->txq_cnt; i++) { 9367 u32 reg; 9368 9369 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; 9370 tw32(reg, ec->tx_coalesce_usecs); 9371 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; 9372 tw32(reg, ec->tx_max_coalesced_frames); 9373 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; 9374 tw32(reg, ec->tx_max_coalesced_frames_irq); 9375 } 9376 } 9377 9378 for (; i < tp->irq_max - 1; i++) { 9379 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); 9380 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); 9381 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9382 } 9383 } 9384 9385 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9386 { 9387 int i = 0; 9388 u32 limit = tp->rxq_cnt; 9389 9390 if (!tg3_flag(tp, ENABLE_RSS)) { 9391 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); 9392 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); 9393 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); 9394 limit--; 9395 } else { 9396 tw32(HOSTCC_RXCOL_TICKS, 0); 9397 tw32(HOSTCC_RXMAX_FRAMES, 0); 9398 tw32(HOSTCC_RXCOAL_MAXF_INT, 0); 9399 } 9400 9401 for (; i < limit; i++) { 9402 u32 reg; 9403 9404 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; 9405 tw32(reg, ec->rx_coalesce_usecs); 9406 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; 9407 tw32(reg, ec->rx_max_coalesced_frames); 9408 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; 9409 tw32(reg, ec->rx_max_coalesced_frames_irq); 9410 } 9411 9412 for (; i < tp->irq_max - 1; i++) { 9413 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); 9414 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); 9415 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9416 } 9417 } 9418 9419 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) 9420 { 9421 tg3_coal_tx_init(tp, ec); 9422 tg3_coal_rx_init(tp, ec); 9423 9424 if (!tg3_flag(tp, 5705_PLUS)) { 9425 u32 val = ec->stats_block_coalesce_usecs; 9426 9427 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); 9428 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); 9429 9430 if (!tp->link_up) 9431 val = 0; 9432 9433 tw32(HOSTCC_STAT_COAL_TICKS, val); 9434 } 9435 } 9436 9437 /* tp->lock is held. */ 9438 static void tg3_tx_rcbs_disable(struct tg3 *tp) 9439 { 9440 u32 txrcb, limit; 9441 9442 /* Disable all transmit rings but the first. */ 9443 if (!tg3_flag(tp, 5705_PLUS)) 9444 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; 9445 else if (tg3_flag(tp, 5717_PLUS)) 9446 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; 9447 else if (tg3_flag(tp, 57765_CLASS) || 9448 tg3_asic_rev(tp) == ASIC_REV_5762) 9449 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; 9450 else 9451 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9452 9453 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9454 txrcb < limit; txrcb += TG3_BDINFO_SIZE) 9455 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, 9456 BDINFO_FLAGS_DISABLED); 9457 } 9458 9459 /* tp->lock is held. */ 9460 static void tg3_tx_rcbs_init(struct tg3 *tp) 9461 { 9462 int i = 0; 9463 u32 txrcb = NIC_SRAM_SEND_RCB; 9464 9465 if (tg3_flag(tp, ENABLE_TSS)) 9466 i++; 9467 9468 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { 9469 struct tg3_napi *tnapi = &tp->napi[i]; 9470 9471 if (!tnapi->tx_ring) 9472 continue; 9473 9474 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, 9475 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), 9476 NIC_SRAM_TX_BUFFER_DESC); 9477 } 9478 } 9479 9480 /* tp->lock is held. */ 9481 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp) 9482 { 9483 u32 rxrcb, limit; 9484 9485 /* Disable all receive return rings but the first. */ 9486 if (tg3_flag(tp, 5717_PLUS)) 9487 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; 9488 else if (!tg3_flag(tp, 5705_PLUS)) 9489 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; 9490 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9491 tg3_asic_rev(tp) == ASIC_REV_5762 || 9492 tg3_flag(tp, 57765_CLASS)) 9493 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; 9494 else 9495 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9496 9497 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9498 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) 9499 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, 9500 BDINFO_FLAGS_DISABLED); 9501 } 9502 9503 /* tp->lock is held. */ 9504 static void tg3_rx_ret_rcbs_init(struct tg3 *tp) 9505 { 9506 int i = 0; 9507 u32 rxrcb = NIC_SRAM_RCV_RET_RCB; 9508 9509 if (tg3_flag(tp, ENABLE_RSS)) 9510 i++; 9511 9512 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { 9513 struct tg3_napi *tnapi = &tp->napi[i]; 9514 9515 if (!tnapi->rx_rcb) 9516 continue; 9517 9518 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, 9519 (tp->rx_ret_ring_mask + 1) << 9520 BDINFO_FLAGS_MAXLEN_SHIFT, 0); 9521 } 9522 } 9523 9524 /* tp->lock is held. */ 9525 static void tg3_rings_reset(struct tg3 *tp) 9526 { 9527 int i; 9528 u32 stblk; 9529 struct tg3_napi *tnapi = &tp->napi[0]; 9530 9531 tg3_tx_rcbs_disable(tp); 9532 9533 tg3_rx_ret_rcbs_disable(tp); 9534 9535 /* Disable interrupts */ 9536 tw32_mailbox_f(tp->napi[0].int_mbox, 1); 9537 tp->napi[0].chk_msi_cnt = 0; 9538 tp->napi[0].last_rx_cons = 0; 9539 tp->napi[0].last_tx_cons = 0; 9540 9541 /* Zero mailbox registers. */ 9542 if (tg3_flag(tp, SUPPORT_MSIX)) { 9543 for (i = 1; i < tp->irq_max; i++) { 9544 tp->napi[i].tx_prod = 0; 9545 tp->napi[i].tx_cons = 0; 9546 if (tg3_flag(tp, ENABLE_TSS)) 9547 tw32_mailbox(tp->napi[i].prodmbox, 0); 9548 tw32_rx_mbox(tp->napi[i].consmbox, 0); 9549 tw32_mailbox_f(tp->napi[i].int_mbox, 1); 9550 tp->napi[i].chk_msi_cnt = 0; 9551 tp->napi[i].last_rx_cons = 0; 9552 tp->napi[i].last_tx_cons = 0; 9553 } 9554 if (!tg3_flag(tp, ENABLE_TSS)) 9555 tw32_mailbox(tp->napi[0].prodmbox, 0); 9556 } else { 9557 tp->napi[0].tx_prod = 0; 9558 tp->napi[0].tx_cons = 0; 9559 tw32_mailbox(tp->napi[0].prodmbox, 0); 9560 tw32_rx_mbox(tp->napi[0].consmbox, 0); 9561 } 9562 9563 /* Make sure the NIC-based send BD rings are disabled. */ 9564 if (!tg3_flag(tp, 5705_PLUS)) { 9565 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; 9566 for (i = 0; i < 16; i++) 9567 tw32_tx_mbox(mbox + i * 8, 0); 9568 } 9569 9570 /* Clear status block in ram. */ 9571 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9572 9573 /* Set status block DMA address */ 9574 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 9575 ((u64) tnapi->status_mapping >> 32)); 9576 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 9577 ((u64) tnapi->status_mapping & 0xffffffff)); 9578 9579 stblk = HOSTCC_STATBLCK_RING1; 9580 9581 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { 9582 u64 mapping = (u64)tnapi->status_mapping; 9583 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); 9584 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); 9585 stblk += 8; 9586 9587 /* Clear status block in ram. */ 9588 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9589 } 9590 9591 tg3_tx_rcbs_init(tp); 9592 tg3_rx_ret_rcbs_init(tp); 9593 } 9594 9595 static void tg3_setup_rxbd_thresholds(struct tg3 *tp) 9596 { 9597 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; 9598 9599 if (!tg3_flag(tp, 5750_PLUS) || 9600 tg3_flag(tp, 5780_CLASS) || 9601 tg3_asic_rev(tp) == ASIC_REV_5750 || 9602 tg3_asic_rev(tp) == ASIC_REV_5752 || 9603 tg3_flag(tp, 57765_PLUS)) 9604 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; 9605 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9606 tg3_asic_rev(tp) == ASIC_REV_5787) 9607 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; 9608 else 9609 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; 9610 9611 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); 9612 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); 9613 9614 val = min(nic_rep_thresh, host_rep_thresh); 9615 tw32(RCVBDI_STD_THRESH, val); 9616 9617 if (tg3_flag(tp, 57765_PLUS)) 9618 tw32(STD_REPLENISH_LWM, bdcache_maxcnt); 9619 9620 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 9621 return; 9622 9623 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; 9624 9625 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); 9626 9627 val = min(bdcache_maxcnt / 2, host_rep_thresh); 9628 tw32(RCVBDI_JUMBO_THRESH, val); 9629 9630 if (tg3_flag(tp, 57765_PLUS)) 9631 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); 9632 } 9633 9634 static inline u32 calc_crc(unsigned char *buf, int len) 9635 { 9636 u32 reg; 9637 u32 tmp; 9638 int j, k; 9639 9640 reg = 0xffffffff; 9641 9642 for (j = 0; j < len; j++) { 9643 reg ^= buf[j]; 9644 9645 for (k = 0; k < 8; k++) { 9646 tmp = reg & 0x01; 9647 9648 reg >>= 1; 9649 9650 if (tmp) 9651 reg ^= 0xedb88320; 9652 } 9653 } 9654 9655 return ~reg; 9656 } 9657 9658 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) 9659 { 9660 /* accept or reject all multicast frames */ 9661 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); 9662 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); 9663 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); 9664 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); 9665 } 9666 9667 static void __tg3_set_rx_mode(struct net_device *dev) 9668 { 9669 struct tg3 *tp = netdev_priv(dev); 9670 u32 rx_mode; 9671 9672 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | 9673 RX_MODE_KEEP_VLAN_TAG); 9674 9675 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) 9676 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 9677 * flag clear. 9678 */ 9679 if (!tg3_flag(tp, ENABLE_ASF)) 9680 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 9681 #endif 9682 9683 if (dev->flags & IFF_PROMISC) { 9684 /* Promiscuous mode. */ 9685 rx_mode |= RX_MODE_PROMISC; 9686 } else if (dev->flags & IFF_ALLMULTI) { 9687 /* Accept all multicast. */ 9688 tg3_set_multi(tp, 1); 9689 } else if (netdev_mc_empty(dev)) { 9690 /* Reject all multicast. */ 9691 tg3_set_multi(tp, 0); 9692 } else { 9693 /* Accept one or more multicast(s). */ 9694 struct netdev_hw_addr *ha; 9695 u32 mc_filter[4] = { 0, }; 9696 u32 regidx; 9697 u32 bit; 9698 u32 crc; 9699 9700 netdev_for_each_mc_addr(ha, dev) { 9701 crc = calc_crc(ha->addr, ETH_ALEN); 9702 bit = ~crc & 0x7f; 9703 regidx = (bit & 0x60) >> 5; 9704 bit &= 0x1f; 9705 mc_filter[regidx] |= (1 << bit); 9706 } 9707 9708 tw32(MAC_HASH_REG_0, mc_filter[0]); 9709 tw32(MAC_HASH_REG_1, mc_filter[1]); 9710 tw32(MAC_HASH_REG_2, mc_filter[2]); 9711 tw32(MAC_HASH_REG_3, mc_filter[3]); 9712 } 9713 9714 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) { 9715 rx_mode |= RX_MODE_PROMISC; 9716 } else if (!(dev->flags & IFF_PROMISC)) { 9717 /* Add all entries into to the mac addr filter list */ 9718 int i = 0; 9719 struct netdev_hw_addr *ha; 9720 9721 netdev_for_each_uc_addr(ha, dev) { 9722 __tg3_set_one_mac_addr(tp, ha->addr, 9723 i + TG3_UCAST_ADDR_IDX(tp)); 9724 i++; 9725 } 9726 } 9727 9728 if (rx_mode != tp->rx_mode) { 9729 tp->rx_mode = rx_mode; 9730 tw32_f(MAC_RX_MODE, rx_mode); 9731 udelay(10); 9732 } 9733 } 9734 9735 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt) 9736 { 9737 int i; 9738 9739 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 9740 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt); 9741 } 9742 9743 static void tg3_rss_check_indir_tbl(struct tg3 *tp) 9744 { 9745 int i; 9746 9747 if (!tg3_flag(tp, SUPPORT_MSIX)) 9748 return; 9749 9750 if (tp->rxq_cnt == 1) { 9751 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); 9752 return; 9753 } 9754 9755 /* Validate table against current IRQ count */ 9756 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { 9757 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt) 9758 break; 9759 } 9760 9761 if (i != TG3_RSS_INDIR_TBL_SIZE) 9762 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt); 9763 } 9764 9765 static void tg3_rss_write_indir_tbl(struct tg3 *tp) 9766 { 9767 int i = 0; 9768 u32 reg = MAC_RSS_INDIR_TBL_0; 9769 9770 while (i < TG3_RSS_INDIR_TBL_SIZE) { 9771 u32 val = tp->rss_ind_tbl[i]; 9772 i++; 9773 for (; i % 8; i++) { 9774 val <<= 4; 9775 val |= tp->rss_ind_tbl[i]; 9776 } 9777 tw32(reg, val); 9778 reg += 4; 9779 } 9780 } 9781 9782 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp) 9783 { 9784 if (tg3_asic_rev(tp) == ASIC_REV_5719) 9785 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719; 9786 else 9787 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720; 9788 } 9789 9790 /* tp->lock is held. */ 9791 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) 9792 { 9793 u32 val, rdmac_mode; 9794 int i, err, limit; 9795 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 9796 9797 tg3_disable_ints(tp); 9798 9799 tg3_stop_fw(tp); 9800 9801 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); 9802 9803 if (tg3_flag(tp, INIT_COMPLETE)) 9804 tg3_abort_hw(tp, 1); 9805 9806 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 9807 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { 9808 tg3_phy_pull_config(tp); 9809 tg3_eee_pull_config(tp, NULL); 9810 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 9811 } 9812 9813 /* Enable MAC control of LPI */ 9814 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 9815 tg3_setup_eee(tp); 9816 9817 if (reset_phy) 9818 tg3_phy_reset(tp); 9819 9820 err = tg3_chip_reset(tp); 9821 if (err) 9822 return err; 9823 9824 tg3_write_sig_legacy(tp, RESET_KIND_INIT); 9825 9826 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 9827 val = tr32(TG3_CPMU_CTRL); 9828 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); 9829 tw32(TG3_CPMU_CTRL, val); 9830 9831 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 9832 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 9833 val |= CPMU_LSPD_10MB_MACCLK_6_25; 9834 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 9835 9836 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); 9837 val &= ~CPMU_LNK_AWARE_MACCLK_MASK; 9838 val |= CPMU_LNK_AWARE_MACCLK_6_25; 9839 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); 9840 9841 val = tr32(TG3_CPMU_HST_ACC); 9842 val &= ~CPMU_HST_ACC_MACCLK_MASK; 9843 val |= CPMU_HST_ACC_MACCLK_6_25; 9844 tw32(TG3_CPMU_HST_ACC, val); 9845 } 9846 9847 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9848 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; 9849 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | 9850 PCIE_PWR_MGMT_L1_THRESH_4MS; 9851 tw32(PCIE_PWR_MGMT_THRESH, val); 9852 9853 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; 9854 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); 9855 9856 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); 9857 9858 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9859 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9860 } 9861 9862 if (tg3_flag(tp, L1PLLPD_EN)) { 9863 u32 grc_mode = tr32(GRC_MODE); 9864 9865 /* Access the lower 1K of PL PCIE block registers. */ 9866 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9867 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 9868 9869 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); 9870 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, 9871 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); 9872 9873 tw32(GRC_MODE, grc_mode); 9874 } 9875 9876 if (tg3_flag(tp, 57765_CLASS)) { 9877 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 9878 u32 grc_mode = tr32(GRC_MODE); 9879 9880 /* Access the lower 1K of PL PCIE block registers. */ 9881 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9882 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 9883 9884 val = tr32(TG3_PCIE_TLDLPL_PORT + 9885 TG3_PCIE_PL_LO_PHYCTL5); 9886 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, 9887 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); 9888 9889 tw32(GRC_MODE, grc_mode); 9890 } 9891 9892 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) { 9893 u32 grc_mode; 9894 9895 /* Fix transmit hangs */ 9896 val = tr32(TG3_CPMU_PADRNG_CTL); 9897 val |= TG3_CPMU_PADRNG_CTL_RDIV2; 9898 tw32(TG3_CPMU_PADRNG_CTL, val); 9899 9900 grc_mode = tr32(GRC_MODE); 9901 9902 /* Access the lower 1K of DL PCIE block registers. */ 9903 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9904 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); 9905 9906 val = tr32(TG3_PCIE_TLDLPL_PORT + 9907 TG3_PCIE_DL_LO_FTSMAX); 9908 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; 9909 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, 9910 val | TG3_PCIE_DL_LO_FTSMAX_VAL); 9911 9912 tw32(GRC_MODE, grc_mode); 9913 } 9914 9915 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 9916 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 9917 val |= CPMU_LSPD_10MB_MACCLK_6_25; 9918 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 9919 } 9920 9921 /* This works around an issue with Athlon chipsets on 9922 * B3 tigon3 silicon. This bit has no effect on any 9923 * other revision. But do not set this on PCI Express 9924 * chips and don't even touch the clocks if the CPMU is present. 9925 */ 9926 if (!tg3_flag(tp, CPMU_PRESENT)) { 9927 if (!tg3_flag(tp, PCI_EXPRESS)) 9928 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; 9929 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 9930 } 9931 9932 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 9933 tg3_flag(tp, PCIX_MODE)) { 9934 val = tr32(TG3PCI_PCISTATE); 9935 val |= PCISTATE_RETRY_SAME_DMA; 9936 tw32(TG3PCI_PCISTATE, val); 9937 } 9938 9939 if (tg3_flag(tp, ENABLE_APE)) { 9940 /* Allow reads and writes to the 9941 * APE register and memory space. 9942 */ 9943 val = tr32(TG3PCI_PCISTATE); 9944 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 9945 PCISTATE_ALLOW_APE_SHMEM_WR | 9946 PCISTATE_ALLOW_APE_PSPACE_WR; 9947 tw32(TG3PCI_PCISTATE, val); 9948 } 9949 9950 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) { 9951 /* Enable some hw fixes. */ 9952 val = tr32(TG3PCI_MSI_DATA); 9953 val |= (1 << 26) | (1 << 28) | (1 << 29); 9954 tw32(TG3PCI_MSI_DATA, val); 9955 } 9956 9957 /* Descriptor ring init may make accesses to the 9958 * NIC SRAM area to setup the TX descriptors, so we 9959 * can only do this after the hardware has been 9960 * successfully reset. 9961 */ 9962 err = tg3_init_rings(tp); 9963 if (err) 9964 return err; 9965 9966 if (tg3_flag(tp, 57765_PLUS)) { 9967 val = tr32(TG3PCI_DMA_RW_CTRL) & 9968 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 9969 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 9970 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; 9971 if (!tg3_flag(tp, 57765_CLASS) && 9972 tg3_asic_rev(tp) != ASIC_REV_5717 && 9973 tg3_asic_rev(tp) != ASIC_REV_5762) 9974 val |= DMA_RWCTRL_TAGGED_STAT_WA; 9975 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); 9976 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 && 9977 tg3_asic_rev(tp) != ASIC_REV_5761) { 9978 /* This value is determined during the probe time DMA 9979 * engine test, tg3_test_dma. 9980 */ 9981 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 9982 } 9983 9984 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | 9985 GRC_MODE_4X_NIC_SEND_RINGS | 9986 GRC_MODE_NO_TX_PHDR_CSUM | 9987 GRC_MODE_NO_RX_PHDR_CSUM); 9988 tp->grc_mode |= GRC_MODE_HOST_SENDBDS; 9989 9990 /* Pseudo-header checksum is done by hardware logic and not 9991 * the offload processers, so make the chip do the pseudo- 9992 * header checksums on receive. For transmit it is more 9993 * convenient to do the pseudo-header checksum in software 9994 * as Linux does that on transmit for us in all cases. 9995 */ 9996 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; 9997 9998 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP; 9999 if (tp->rxptpctl) 10000 tw32(TG3_RX_PTP_CTL, 10001 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 10002 10003 if (tg3_flag(tp, PTP_CAPABLE)) 10004 val |= GRC_MODE_TIME_SYNC_ENABLE; 10005 10006 tw32(GRC_MODE, tp->grc_mode | val); 10007 10008 /* Setup the timer prescalar register. Clock is always 66Mhz. */ 10009 val = tr32(GRC_MISC_CFG); 10010 val &= ~0xff; 10011 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); 10012 tw32(GRC_MISC_CFG, val); 10013 10014 /* Initialize MBUF/DESC pool. */ 10015 if (tg3_flag(tp, 5750_PLUS)) { 10016 /* Do nothing. */ 10017 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) { 10018 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); 10019 if (tg3_asic_rev(tp) == ASIC_REV_5704) 10020 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); 10021 else 10022 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); 10023 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); 10024 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); 10025 } else if (tg3_flag(tp, TSO_CAPABLE)) { 10026 int fw_len; 10027 10028 fw_len = tp->fw_len; 10029 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); 10030 tw32(BUFMGR_MB_POOL_ADDR, 10031 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); 10032 tw32(BUFMGR_MB_POOL_SIZE, 10033 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); 10034 } 10035 10036 if (tp->dev->mtu <= ETH_DATA_LEN) { 10037 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10038 tp->bufmgr_config.mbuf_read_dma_low_water); 10039 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10040 tp->bufmgr_config.mbuf_mac_rx_low_water); 10041 tw32(BUFMGR_MB_HIGH_WATER, 10042 tp->bufmgr_config.mbuf_high_water); 10043 } else { 10044 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10045 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); 10046 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10047 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); 10048 tw32(BUFMGR_MB_HIGH_WATER, 10049 tp->bufmgr_config.mbuf_high_water_jumbo); 10050 } 10051 tw32(BUFMGR_DMA_LOW_WATER, 10052 tp->bufmgr_config.dma_low_water); 10053 tw32(BUFMGR_DMA_HIGH_WATER, 10054 tp->bufmgr_config.dma_high_water); 10055 10056 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; 10057 if (tg3_asic_rev(tp) == ASIC_REV_5719) 10058 val |= BUFMGR_MODE_NO_TX_UNDERRUN; 10059 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10060 tg3_asic_rev(tp) == ASIC_REV_5762 || 10061 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10062 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) 10063 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; 10064 tw32(BUFMGR_MODE, val); 10065 for (i = 0; i < 2000; i++) { 10066 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) 10067 break; 10068 udelay(10); 10069 } 10070 if (i >= 2000) { 10071 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); 10072 return -ENODEV; 10073 } 10074 10075 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1) 10076 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); 10077 10078 tg3_setup_rxbd_thresholds(tp); 10079 10080 /* Initialize TG3_BDINFO's at: 10081 * RCVDBDI_STD_BD: standard eth size rx ring 10082 * RCVDBDI_JUMBO_BD: jumbo frame rx ring 10083 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) 10084 * 10085 * like so: 10086 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring 10087 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | 10088 * ring attribute flags 10089 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM 10090 * 10091 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. 10092 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. 10093 * 10094 * The size of each ring is fixed in the firmware, but the location is 10095 * configurable. 10096 */ 10097 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10098 ((u64) tpr->rx_std_mapping >> 32)); 10099 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10100 ((u64) tpr->rx_std_mapping & 0xffffffff)); 10101 if (!tg3_flag(tp, 5717_PLUS)) 10102 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 10103 NIC_SRAM_RX_BUFFER_DESC); 10104 10105 /* Disable the mini ring */ 10106 if (!tg3_flag(tp, 5705_PLUS)) 10107 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, 10108 BDINFO_FLAGS_DISABLED); 10109 10110 /* Program the jumbo buffer descriptor ring control 10111 * blocks on those devices that have them. 10112 */ 10113 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10114 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { 10115 10116 if (tg3_flag(tp, JUMBO_RING_ENABLE)) { 10117 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10118 ((u64) tpr->rx_jmb_mapping >> 32)); 10119 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10120 ((u64) tpr->rx_jmb_mapping & 0xffffffff)); 10121 val = TG3_RX_JMB_RING_SIZE(tp) << 10122 BDINFO_FLAGS_MAXLEN_SHIFT; 10123 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10124 val | BDINFO_FLAGS_USE_EXT_RECV); 10125 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || 10126 tg3_flag(tp, 57765_CLASS) || 10127 tg3_asic_rev(tp) == ASIC_REV_5762) 10128 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 10129 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 10130 } else { 10131 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10132 BDINFO_FLAGS_DISABLED); 10133 } 10134 10135 if (tg3_flag(tp, 57765_PLUS)) { 10136 val = TG3_RX_STD_RING_SIZE(tp); 10137 val <<= BDINFO_FLAGS_MAXLEN_SHIFT; 10138 val |= (TG3_RX_STD_DMA_SZ << 2); 10139 } else 10140 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; 10141 } else 10142 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; 10143 10144 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); 10145 10146 tpr->rx_std_prod_idx = tp->rx_pending; 10147 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); 10148 10149 tpr->rx_jmb_prod_idx = 10150 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; 10151 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); 10152 10153 tg3_rings_reset(tp); 10154 10155 /* Initialize MAC address and backoff seed. */ 10156 __tg3_set_mac_addr(tp, false); 10157 10158 /* MTU + ethernet header + FCS + optional VLAN tag */ 10159 tw32(MAC_RX_MTU_SIZE, 10160 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 10161 10162 /* The slot time is changed by tg3_setup_phy if we 10163 * run at gigabit with half duplex. 10164 */ 10165 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 10166 (6 << TX_LENGTHS_IPG_SHIFT) | 10167 (32 << TX_LENGTHS_SLOT_TIME_SHIFT); 10168 10169 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10170 tg3_asic_rev(tp) == ASIC_REV_5762) 10171 val |= tr32(MAC_TX_LENGTHS) & 10172 (TX_LENGTHS_JMB_FRM_LEN_MSK | 10173 TX_LENGTHS_CNT_DWN_VAL_MSK); 10174 10175 tw32(MAC_TX_LENGTHS, val); 10176 10177 /* Receive rules. */ 10178 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); 10179 tw32(RCVLPC_CONFIG, 0x0181); 10180 10181 /* Calculate RDMAC_MODE setting early, we need it to determine 10182 * the RCVLPC_STATE_ENABLE mask. 10183 */ 10184 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | 10185 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | 10186 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | 10187 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 10188 RDMAC_MODE_LNGREAD_ENAB); 10189 10190 if (tg3_asic_rev(tp) == ASIC_REV_5717) 10191 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; 10192 10193 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 10194 tg3_asic_rev(tp) == ASIC_REV_5785 || 10195 tg3_asic_rev(tp) == ASIC_REV_57780) 10196 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | 10197 RDMAC_MODE_MBUF_RBD_CRPT_ENAB | 10198 RDMAC_MODE_MBUF_SBD_CRPT_ENAB; 10199 10200 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10201 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10202 if (tg3_flag(tp, TSO_CAPABLE) && 10203 tg3_asic_rev(tp) == ASIC_REV_5705) { 10204 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; 10205 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10206 !tg3_flag(tp, IS_5788)) { 10207 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10208 } 10209 } 10210 10211 if (tg3_flag(tp, PCI_EXPRESS)) 10212 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10213 10214 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10215 tp->dma_limit = 0; 10216 if (tp->dev->mtu <= ETH_DATA_LEN) { 10217 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; 10218 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K; 10219 } 10220 } 10221 10222 if (tg3_flag(tp, HW_TSO_1) || 10223 tg3_flag(tp, HW_TSO_2) || 10224 tg3_flag(tp, HW_TSO_3)) 10225 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; 10226 10227 if (tg3_flag(tp, 57765_PLUS) || 10228 tg3_asic_rev(tp) == ASIC_REV_5785 || 10229 tg3_asic_rev(tp) == ASIC_REV_57780) 10230 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 10231 10232 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10233 tg3_asic_rev(tp) == ASIC_REV_5762) 10234 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; 10235 10236 if (tg3_asic_rev(tp) == ASIC_REV_5761 || 10237 tg3_asic_rev(tp) == ASIC_REV_5784 || 10238 tg3_asic_rev(tp) == ASIC_REV_5785 || 10239 tg3_asic_rev(tp) == ASIC_REV_57780 || 10240 tg3_flag(tp, 57765_PLUS)) { 10241 u32 tgtreg; 10242 10243 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10244 tgtreg = TG3_RDMA_RSRVCTRL_REG2; 10245 else 10246 tgtreg = TG3_RDMA_RSRVCTRL_REG; 10247 10248 val = tr32(tgtreg); 10249 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10250 tg3_asic_rev(tp) == ASIC_REV_5762) { 10251 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | 10252 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | 10253 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); 10254 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | 10255 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 10256 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; 10257 } 10258 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 10259 } 10260 10261 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10262 tg3_asic_rev(tp) == ASIC_REV_5720 || 10263 tg3_asic_rev(tp) == ASIC_REV_5762) { 10264 u32 tgtreg; 10265 10266 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10267 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2; 10268 else 10269 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL; 10270 10271 val = tr32(tgtreg); 10272 tw32(tgtreg, val | 10273 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | 10274 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); 10275 } 10276 10277 /* Receive/send statistics. */ 10278 if (tg3_flag(tp, 5750_PLUS)) { 10279 val = tr32(RCVLPC_STATS_ENABLE); 10280 val &= ~RCVLPC_STATSENAB_DACK_FIX; 10281 tw32(RCVLPC_STATS_ENABLE, val); 10282 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && 10283 tg3_flag(tp, TSO_CAPABLE)) { 10284 val = tr32(RCVLPC_STATS_ENABLE); 10285 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; 10286 tw32(RCVLPC_STATS_ENABLE, val); 10287 } else { 10288 tw32(RCVLPC_STATS_ENABLE, 0xffffff); 10289 } 10290 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); 10291 tw32(SNDDATAI_STATSENAB, 0xffffff); 10292 tw32(SNDDATAI_STATSCTRL, 10293 (SNDDATAI_SCTRL_ENABLE | 10294 SNDDATAI_SCTRL_FASTUPD)); 10295 10296 /* Setup host coalescing engine. */ 10297 tw32(HOSTCC_MODE, 0); 10298 for (i = 0; i < 2000; i++) { 10299 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) 10300 break; 10301 udelay(10); 10302 } 10303 10304 __tg3_set_coalesce(tp, &tp->coal); 10305 10306 if (!tg3_flag(tp, 5705_PLUS)) { 10307 /* Status/statistics block address. See tg3_timer, 10308 * the tg3_periodic_fetch_stats call there, and 10309 * tg3_get_stats to see how this works for 5705/5750 chips. 10310 */ 10311 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 10312 ((u64) tp->stats_mapping >> 32)); 10313 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 10314 ((u64) tp->stats_mapping & 0xffffffff)); 10315 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); 10316 10317 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); 10318 10319 /* Clear statistics and status block memory areas */ 10320 for (i = NIC_SRAM_STATS_BLK; 10321 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; 10322 i += sizeof(u32)) { 10323 tg3_write_mem(tp, i, 0); 10324 udelay(40); 10325 } 10326 } 10327 10328 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); 10329 10330 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); 10331 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); 10332 if (!tg3_flag(tp, 5705_PLUS)) 10333 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); 10334 10335 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 10336 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 10337 /* reset to prevent losing 1st rx packet intermittently */ 10338 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10339 udelay(10); 10340 } 10341 10342 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 10343 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | 10344 MAC_MODE_FHDE_ENABLE; 10345 if (tg3_flag(tp, ENABLE_APE)) 10346 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 10347 if (!tg3_flag(tp, 5705_PLUS) && 10348 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10349 tg3_asic_rev(tp) != ASIC_REV_5700) 10350 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 10351 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); 10352 udelay(40); 10353 10354 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). 10355 * If TG3_FLAG_IS_NIC is zero, we should read the 10356 * register to preserve the GPIO settings for LOMs. The GPIOs, 10357 * whether used as inputs or outputs, are set by boot code after 10358 * reset. 10359 */ 10360 if (!tg3_flag(tp, IS_NIC)) { 10361 u32 gpio_mask; 10362 10363 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | 10364 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | 10365 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; 10366 10367 if (tg3_asic_rev(tp) == ASIC_REV_5752) 10368 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | 10369 GRC_LCLCTRL_GPIO_OUTPUT3; 10370 10371 if (tg3_asic_rev(tp) == ASIC_REV_5755) 10372 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; 10373 10374 tp->grc_local_ctrl &= ~gpio_mask; 10375 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; 10376 10377 /* GPIO1 must be driven high for eeprom write protect */ 10378 if (tg3_flag(tp, EEPROM_WRITE_PROT)) 10379 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 10380 GRC_LCLCTRL_GPIO_OUTPUT1); 10381 } 10382 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10383 udelay(100); 10384 10385 if (tg3_flag(tp, USING_MSIX)) { 10386 val = tr32(MSGINT_MODE); 10387 val |= MSGINT_MODE_ENABLE; 10388 if (tp->irq_cnt > 1) 10389 val |= MSGINT_MODE_MULTIVEC_EN; 10390 if (!tg3_flag(tp, 1SHOT_MSI)) 10391 val |= MSGINT_MODE_ONE_SHOT_DISABLE; 10392 tw32(MSGINT_MODE, val); 10393 } 10394 10395 if (!tg3_flag(tp, 5705_PLUS)) { 10396 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 10397 udelay(40); 10398 } 10399 10400 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | 10401 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | 10402 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | 10403 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | 10404 WDMAC_MODE_LNGREAD_ENAB); 10405 10406 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10407 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10408 if (tg3_flag(tp, TSO_CAPABLE) && 10409 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 || 10410 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) { 10411 /* nothing */ 10412 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10413 !tg3_flag(tp, IS_5788)) { 10414 val |= WDMAC_MODE_RX_ACCEL; 10415 } 10416 } 10417 10418 /* Enable host coalescing bug fix */ 10419 if (tg3_flag(tp, 5755_PLUS)) 10420 val |= WDMAC_MODE_STATUS_TAG_FIX; 10421 10422 if (tg3_asic_rev(tp) == ASIC_REV_5785) 10423 val |= WDMAC_MODE_BURST_ALL_DATA; 10424 10425 tw32_f(WDMAC_MODE, val); 10426 udelay(40); 10427 10428 if (tg3_flag(tp, PCIX_MODE)) { 10429 u16 pcix_cmd; 10430 10431 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10432 &pcix_cmd); 10433 if (tg3_asic_rev(tp) == ASIC_REV_5703) { 10434 pcix_cmd &= ~PCI_X_CMD_MAX_READ; 10435 pcix_cmd |= PCI_X_CMD_READ_2K; 10436 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) { 10437 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); 10438 pcix_cmd |= PCI_X_CMD_READ_2K; 10439 } 10440 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10441 pcix_cmd); 10442 } 10443 10444 tw32_f(RDMAC_MODE, rdmac_mode); 10445 udelay(40); 10446 10447 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10448 tg3_asic_rev(tp) == ASIC_REV_5720) { 10449 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) { 10450 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp)) 10451 break; 10452 } 10453 if (i < TG3_NUM_RDMA_CHANNELS) { 10454 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10455 val |= tg3_lso_rd_dma_workaround_bit(tp); 10456 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10457 tg3_flag_set(tp, 5719_5720_RDMA_BUG); 10458 } 10459 } 10460 10461 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); 10462 if (!tg3_flag(tp, 5705_PLUS)) 10463 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); 10464 10465 if (tg3_asic_rev(tp) == ASIC_REV_5761) 10466 tw32(SNDDATAC_MODE, 10467 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); 10468 else 10469 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); 10470 10471 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); 10472 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); 10473 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; 10474 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 10475 val |= RCVDBDI_MODE_LRG_RING_SZ; 10476 tw32(RCVDBDI_MODE, val); 10477 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 10478 if (tg3_flag(tp, HW_TSO_1) || 10479 tg3_flag(tp, HW_TSO_2) || 10480 tg3_flag(tp, HW_TSO_3)) 10481 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); 10482 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; 10483 if (tg3_flag(tp, ENABLE_TSS)) 10484 val |= SNDBDI_MODE_MULTI_TXQ_EN; 10485 tw32(SNDBDI_MODE, val); 10486 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); 10487 10488 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 10489 err = tg3_load_5701_a0_firmware_fix(tp); 10490 if (err) 10491 return err; 10492 } 10493 10494 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10495 /* Ignore any errors for the firmware download. If download 10496 * fails, the device will operate with EEE disabled 10497 */ 10498 tg3_load_57766_firmware(tp); 10499 } 10500 10501 if (tg3_flag(tp, TSO_CAPABLE)) { 10502 err = tg3_load_tso_firmware(tp); 10503 if (err) 10504 return err; 10505 } 10506 10507 tp->tx_mode = TX_MODE_ENABLE; 10508 10509 if (tg3_flag(tp, 5755_PLUS) || 10510 tg3_asic_rev(tp) == ASIC_REV_5906) 10511 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; 10512 10513 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10514 tg3_asic_rev(tp) == ASIC_REV_5762) { 10515 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; 10516 tp->tx_mode &= ~val; 10517 tp->tx_mode |= tr32(MAC_TX_MODE) & val; 10518 } 10519 10520 tw32_f(MAC_TX_MODE, tp->tx_mode); 10521 udelay(100); 10522 10523 if (tg3_flag(tp, ENABLE_RSS)) { 10524 tg3_rss_write_indir_tbl(tp); 10525 10526 /* Setup the "secret" hash key. */ 10527 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437); 10528 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc); 10529 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45); 10530 tw32(MAC_RSS_HASH_KEY_3, 0x36621985); 10531 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8); 10532 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e); 10533 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556); 10534 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe); 10535 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7); 10536 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481); 10537 } 10538 10539 tp->rx_mode = RX_MODE_ENABLE; 10540 if (tg3_flag(tp, 5755_PLUS)) 10541 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 10542 10543 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10544 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX; 10545 10546 if (tg3_flag(tp, ENABLE_RSS)) 10547 tp->rx_mode |= RX_MODE_RSS_ENABLE | 10548 RX_MODE_RSS_ITBL_HASH_BITS_7 | 10549 RX_MODE_RSS_IPV6_HASH_EN | 10550 RX_MODE_RSS_TCP_IPV6_HASH_EN | 10551 RX_MODE_RSS_IPV4_HASH_EN | 10552 RX_MODE_RSS_TCP_IPV4_HASH_EN; 10553 10554 tw32_f(MAC_RX_MODE, tp->rx_mode); 10555 udelay(10); 10556 10557 tw32(MAC_LED_CTRL, tp->led_ctrl); 10558 10559 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 10560 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10561 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10562 udelay(10); 10563 } 10564 tw32_f(MAC_RX_MODE, tp->rx_mode); 10565 udelay(10); 10566 10567 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10568 if ((tg3_asic_rev(tp) == ASIC_REV_5704) && 10569 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { 10570 /* Set drive transmission level to 1.2V */ 10571 /* only if the signal pre-emphasis bit is not set */ 10572 val = tr32(MAC_SERDES_CFG); 10573 val &= 0xfffff000; 10574 val |= 0x880; 10575 tw32(MAC_SERDES_CFG, val); 10576 } 10577 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) 10578 tw32(MAC_SERDES_CFG, 0x616000); 10579 } 10580 10581 /* Prevent chip from dropping frames when flow control 10582 * is enabled. 10583 */ 10584 if (tg3_flag(tp, 57765_CLASS)) 10585 val = 1; 10586 else 10587 val = 2; 10588 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); 10589 10590 if (tg3_asic_rev(tp) == ASIC_REV_5704 && 10591 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 10592 /* Use hardware link auto-negotiation */ 10593 tg3_flag_set(tp, HW_AUTONEG); 10594 } 10595 10596 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 10597 tg3_asic_rev(tp) == ASIC_REV_5714) { 10598 u32 tmp; 10599 10600 tmp = tr32(SERDES_RX_CTRL); 10601 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); 10602 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; 10603 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; 10604 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10605 } 10606 10607 if (!tg3_flag(tp, USE_PHYLIB)) { 10608 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 10609 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 10610 10611 err = tg3_setup_phy(tp, false); 10612 if (err) 10613 return err; 10614 10615 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10616 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 10617 u32 tmp; 10618 10619 /* Clear CRC stats. */ 10620 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { 10621 tg3_writephy(tp, MII_TG3_TEST1, 10622 tmp | MII_TG3_TEST1_CRC_EN); 10623 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); 10624 } 10625 } 10626 } 10627 10628 __tg3_set_rx_mode(tp->dev); 10629 10630 /* Initialize receive rules. */ 10631 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); 10632 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); 10633 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); 10634 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); 10635 10636 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) 10637 limit = 8; 10638 else 10639 limit = 16; 10640 if (tg3_flag(tp, ENABLE_ASF)) 10641 limit -= 4; 10642 switch (limit) { 10643 case 16: 10644 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); 10645 case 15: 10646 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); 10647 case 14: 10648 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); 10649 case 13: 10650 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); 10651 case 12: 10652 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); 10653 case 11: 10654 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); 10655 case 10: 10656 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); 10657 case 9: 10658 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); 10659 case 8: 10660 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); 10661 case 7: 10662 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); 10663 case 6: 10664 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); 10665 case 5: 10666 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); 10667 case 4: 10668 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ 10669 case 3: 10670 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ 10671 case 2: 10672 case 1: 10673 10674 default: 10675 break; 10676 } 10677 10678 if (tg3_flag(tp, ENABLE_APE)) 10679 /* Write our heartbeat update interval to APE. */ 10680 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, 10681 APE_HOST_HEARTBEAT_INT_DISABLE); 10682 10683 tg3_write_sig_post_reset(tp, RESET_KIND_INIT); 10684 10685 return 0; 10686 } 10687 10688 /* Called at device open time to get the chip ready for 10689 * packet processing. Invoked with tp->lock held. 10690 */ 10691 static int tg3_init_hw(struct tg3 *tp, bool reset_phy) 10692 { 10693 /* Chip may have been just powered on. If so, the boot code may still 10694 * be running initialization. Wait for it to finish to avoid races in 10695 * accessing the hardware. 10696 */ 10697 tg3_enable_register_access(tp); 10698 tg3_poll_fw(tp); 10699 10700 tg3_switch_clocks(tp); 10701 10702 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 10703 10704 return tg3_reset_hw(tp, reset_phy); 10705 } 10706 10707 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) 10708 { 10709 int i; 10710 10711 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) { 10712 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN; 10713 10714 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len); 10715 off += len; 10716 10717 if (ocir->signature != TG3_OCIR_SIG_MAGIC || 10718 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE)) 10719 memset(ocir, 0, TG3_OCIR_LEN); 10720 } 10721 } 10722 10723 /* sysfs attributes for hwmon */ 10724 static ssize_t tg3_show_temp(struct device *dev, 10725 struct device_attribute *devattr, char *buf) 10726 { 10727 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 10728 struct tg3 *tp = dev_get_drvdata(dev); 10729 u32 temperature; 10730 10731 spin_lock_bh(&tp->lock); 10732 tg3_ape_scratchpad_read(tp, &temperature, attr->index, 10733 sizeof(temperature)); 10734 spin_unlock_bh(&tp->lock); 10735 return sprintf(buf, "%u\n", temperature); 10736 } 10737 10738 10739 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL, 10740 TG3_TEMP_SENSOR_OFFSET); 10741 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL, 10742 TG3_TEMP_CAUTION_OFFSET); 10743 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL, 10744 TG3_TEMP_MAX_OFFSET); 10745 10746 static struct attribute *tg3_attrs[] = { 10747 &sensor_dev_attr_temp1_input.dev_attr.attr, 10748 &sensor_dev_attr_temp1_crit.dev_attr.attr, 10749 &sensor_dev_attr_temp1_max.dev_attr.attr, 10750 NULL 10751 }; 10752 ATTRIBUTE_GROUPS(tg3); 10753 10754 static void tg3_hwmon_close(struct tg3 *tp) 10755 { 10756 if (tp->hwmon_dev) { 10757 hwmon_device_unregister(tp->hwmon_dev); 10758 tp->hwmon_dev = NULL; 10759 } 10760 } 10761 10762 static void tg3_hwmon_open(struct tg3 *tp) 10763 { 10764 int i; 10765 u32 size = 0; 10766 struct pci_dev *pdev = tp->pdev; 10767 struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; 10768 10769 tg3_sd_scan_scratchpad(tp, ocirs); 10770 10771 for (i = 0; i < TG3_SD_NUM_RECS; i++) { 10772 if (!ocirs[i].src_data_length) 10773 continue; 10774 10775 size += ocirs[i].src_hdr_length; 10776 size += ocirs[i].src_data_length; 10777 } 10778 10779 if (!size) 10780 return; 10781 10782 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", 10783 tp, tg3_groups); 10784 if (IS_ERR(tp->hwmon_dev)) { 10785 tp->hwmon_dev = NULL; 10786 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); 10787 } 10788 } 10789 10790 10791 #define TG3_STAT_ADD32(PSTAT, REG) \ 10792 do { u32 __val = tr32(REG); \ 10793 (PSTAT)->low += __val; \ 10794 if ((PSTAT)->low < __val) \ 10795 (PSTAT)->high += 1; \ 10796 } while (0) 10797 10798 static void tg3_periodic_fetch_stats(struct tg3 *tp) 10799 { 10800 struct tg3_hw_stats *sp = tp->hw_stats; 10801 10802 if (!tp->link_up) 10803 return; 10804 10805 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); 10806 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); 10807 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); 10808 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); 10809 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); 10810 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); 10811 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); 10812 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); 10813 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); 10814 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); 10815 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); 10816 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); 10817 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); 10818 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) && 10819 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low + 10820 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) { 10821 u32 val; 10822 10823 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10824 val &= ~tg3_lso_rd_dma_workaround_bit(tp); 10825 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10826 tg3_flag_clear(tp, 5719_5720_RDMA_BUG); 10827 } 10828 10829 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); 10830 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); 10831 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); 10832 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); 10833 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); 10834 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); 10835 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); 10836 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); 10837 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); 10838 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); 10839 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); 10840 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); 10841 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); 10842 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); 10843 10844 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); 10845 if (tg3_asic_rev(tp) != ASIC_REV_5717 && 10846 tg3_asic_rev(tp) != ASIC_REV_5762 && 10847 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 && 10848 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) { 10849 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); 10850 } else { 10851 u32 val = tr32(HOSTCC_FLOW_ATTN); 10852 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; 10853 if (val) { 10854 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); 10855 sp->rx_discards.low += val; 10856 if (sp->rx_discards.low < val) 10857 sp->rx_discards.high += 1; 10858 } 10859 sp->mbuf_lwm_thresh_hit = sp->rx_discards; 10860 } 10861 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); 10862 } 10863 10864 static void tg3_chk_missed_msi(struct tg3 *tp) 10865 { 10866 u32 i; 10867 10868 for (i = 0; i < tp->irq_cnt; i++) { 10869 struct tg3_napi *tnapi = &tp->napi[i]; 10870 10871 if (tg3_has_work(tnapi)) { 10872 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && 10873 tnapi->last_tx_cons == tnapi->tx_cons) { 10874 if (tnapi->chk_msi_cnt < 1) { 10875 tnapi->chk_msi_cnt++; 10876 return; 10877 } 10878 tg3_msi(0, tnapi); 10879 } 10880 } 10881 tnapi->chk_msi_cnt = 0; 10882 tnapi->last_rx_cons = tnapi->rx_rcb_ptr; 10883 tnapi->last_tx_cons = tnapi->tx_cons; 10884 } 10885 } 10886 10887 static void tg3_timer(unsigned long __opaque) 10888 { 10889 struct tg3 *tp = (struct tg3 *) __opaque; 10890 10891 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) 10892 goto restart_timer; 10893 10894 spin_lock(&tp->lock); 10895 10896 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10897 tg3_flag(tp, 57765_CLASS)) 10898 tg3_chk_missed_msi(tp); 10899 10900 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 10901 /* BCM4785: Flush posted writes from GbE to host memory. */ 10902 tr32(HOSTCC_MODE); 10903 } 10904 10905 if (!tg3_flag(tp, TAGGED_STATUS)) { 10906 /* All of this garbage is because when using non-tagged 10907 * IRQ status the mailbox/status_block protocol the chip 10908 * uses with the cpu is race prone. 10909 */ 10910 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { 10911 tw32(GRC_LOCAL_CTRL, 10912 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 10913 } else { 10914 tw32(HOSTCC_MODE, tp->coalesce_mode | 10915 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); 10916 } 10917 10918 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 10919 spin_unlock(&tp->lock); 10920 tg3_reset_task_schedule(tp); 10921 goto restart_timer; 10922 } 10923 } 10924 10925 /* This part only runs once per second. */ 10926 if (!--tp->timer_counter) { 10927 if (tg3_flag(tp, 5705_PLUS)) 10928 tg3_periodic_fetch_stats(tp); 10929 10930 if (tp->setlpicnt && !--tp->setlpicnt) 10931 tg3_phy_eee_enable(tp); 10932 10933 if (tg3_flag(tp, USE_LINKCHG_REG)) { 10934 u32 mac_stat; 10935 int phy_event; 10936 10937 mac_stat = tr32(MAC_STATUS); 10938 10939 phy_event = 0; 10940 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { 10941 if (mac_stat & MAC_STATUS_MI_INTERRUPT) 10942 phy_event = 1; 10943 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) 10944 phy_event = 1; 10945 10946 if (phy_event) 10947 tg3_setup_phy(tp, false); 10948 } else if (tg3_flag(tp, POLL_SERDES)) { 10949 u32 mac_stat = tr32(MAC_STATUS); 10950 int need_setup = 0; 10951 10952 if (tp->link_up && 10953 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { 10954 need_setup = 1; 10955 } 10956 if (!tp->link_up && 10957 (mac_stat & (MAC_STATUS_PCS_SYNCED | 10958 MAC_STATUS_SIGNAL_DET))) { 10959 need_setup = 1; 10960 } 10961 if (need_setup) { 10962 if (!tp->serdes_counter) { 10963 tw32_f(MAC_MODE, 10964 (tp->mac_mode & 10965 ~MAC_MODE_PORT_MODE_MASK)); 10966 udelay(40); 10967 tw32_f(MAC_MODE, tp->mac_mode); 10968 udelay(40); 10969 } 10970 tg3_setup_phy(tp, false); 10971 } 10972 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 10973 tg3_flag(tp, 5780_CLASS)) { 10974 tg3_serdes_parallel_detect(tp); 10975 } else if (tg3_flag(tp, POLL_CPMU_LINK)) { 10976 u32 cpmu = tr32(TG3_CPMU_STATUS); 10977 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) == 10978 TG3_CPMU_STATUS_LINK_MASK); 10979 10980 if (link_up != tp->link_up) 10981 tg3_setup_phy(tp, false); 10982 } 10983 10984 tp->timer_counter = tp->timer_multiplier; 10985 } 10986 10987 /* Heartbeat is only sent once every 2 seconds. 10988 * 10989 * The heartbeat is to tell the ASF firmware that the host 10990 * driver is still alive. In the event that the OS crashes, 10991 * ASF needs to reset the hardware to free up the FIFO space 10992 * that may be filled with rx packets destined for the host. 10993 * If the FIFO is full, ASF will no longer function properly. 10994 * 10995 * Unintended resets have been reported on real time kernels 10996 * where the timer doesn't run on time. Netpoll will also have 10997 * same problem. 10998 * 10999 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware 11000 * to check the ring condition when the heartbeat is expiring 11001 * before doing the reset. This will prevent most unintended 11002 * resets. 11003 */ 11004 if (!--tp->asf_counter) { 11005 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 11006 tg3_wait_for_event_ack(tp); 11007 11008 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, 11009 FWCMD_NICDRV_ALIVE3); 11010 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 11011 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 11012 TG3_FW_UPDATE_TIMEOUT_SEC); 11013 11014 tg3_generate_fw_event(tp); 11015 } 11016 tp->asf_counter = tp->asf_multiplier; 11017 } 11018 11019 spin_unlock(&tp->lock); 11020 11021 restart_timer: 11022 tp->timer.expires = jiffies + tp->timer_offset; 11023 add_timer(&tp->timer); 11024 } 11025 11026 static void tg3_timer_init(struct tg3 *tp) 11027 { 11028 if (tg3_flag(tp, TAGGED_STATUS) && 11029 tg3_asic_rev(tp) != ASIC_REV_5717 && 11030 !tg3_flag(tp, 57765_CLASS)) 11031 tp->timer_offset = HZ; 11032 else 11033 tp->timer_offset = HZ / 10; 11034 11035 BUG_ON(tp->timer_offset > HZ); 11036 11037 tp->timer_multiplier = (HZ / tp->timer_offset); 11038 tp->asf_multiplier = (HZ / tp->timer_offset) * 11039 TG3_FW_UPDATE_FREQ_SEC; 11040 11041 init_timer(&tp->timer); 11042 tp->timer.data = (unsigned long) tp; 11043 tp->timer.function = tg3_timer; 11044 } 11045 11046 static void tg3_timer_start(struct tg3 *tp) 11047 { 11048 tp->asf_counter = tp->asf_multiplier; 11049 tp->timer_counter = tp->timer_multiplier; 11050 11051 tp->timer.expires = jiffies + tp->timer_offset; 11052 add_timer(&tp->timer); 11053 } 11054 11055 static void tg3_timer_stop(struct tg3 *tp) 11056 { 11057 del_timer_sync(&tp->timer); 11058 } 11059 11060 /* Restart hardware after configuration changes, self-test, etc. 11061 * Invoked with tp->lock held. 11062 */ 11063 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy) 11064 __releases(tp->lock) 11065 __acquires(tp->lock) 11066 { 11067 int err; 11068 11069 err = tg3_init_hw(tp, reset_phy); 11070 if (err) { 11071 netdev_err(tp->dev, 11072 "Failed to re-initialize device, aborting\n"); 11073 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11074 tg3_full_unlock(tp); 11075 tg3_timer_stop(tp); 11076 tp->irq_sync = 0; 11077 tg3_napi_enable(tp); 11078 dev_close(tp->dev); 11079 tg3_full_lock(tp, 0); 11080 } 11081 return err; 11082 } 11083 11084 static void tg3_reset_task(struct work_struct *work) 11085 { 11086 struct tg3 *tp = container_of(work, struct tg3, reset_task); 11087 int err; 11088 11089 tg3_full_lock(tp, 0); 11090 11091 if (!netif_running(tp->dev)) { 11092 tg3_flag_clear(tp, RESET_TASK_PENDING); 11093 tg3_full_unlock(tp); 11094 return; 11095 } 11096 11097 tg3_full_unlock(tp); 11098 11099 tg3_phy_stop(tp); 11100 11101 tg3_netif_stop(tp); 11102 11103 tg3_full_lock(tp, 1); 11104 11105 if (tg3_flag(tp, TX_RECOVERY_PENDING)) { 11106 tp->write32_tx_mbox = tg3_write32_tx_mbox; 11107 tp->write32_rx_mbox = tg3_write_flush_reg32; 11108 tg3_flag_set(tp, MBOX_WRITE_REORDER); 11109 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 11110 } 11111 11112 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 11113 err = tg3_init_hw(tp, true); 11114 if (err) 11115 goto out; 11116 11117 tg3_netif_start(tp); 11118 11119 out: 11120 tg3_full_unlock(tp); 11121 11122 if (!err) 11123 tg3_phy_start(tp); 11124 11125 tg3_flag_clear(tp, RESET_TASK_PENDING); 11126 } 11127 11128 static int tg3_request_irq(struct tg3 *tp, int irq_num) 11129 { 11130 irq_handler_t fn; 11131 unsigned long flags; 11132 char *name; 11133 struct tg3_napi *tnapi = &tp->napi[irq_num]; 11134 11135 if (tp->irq_cnt == 1) 11136 name = tp->dev->name; 11137 else { 11138 name = &tnapi->irq_lbl[0]; 11139 if (tnapi->tx_buffers && tnapi->rx_rcb) 11140 snprintf(name, IFNAMSIZ, 11141 "%s-txrx-%d", tp->dev->name, irq_num); 11142 else if (tnapi->tx_buffers) 11143 snprintf(name, IFNAMSIZ, 11144 "%s-tx-%d", tp->dev->name, irq_num); 11145 else if (tnapi->rx_rcb) 11146 snprintf(name, IFNAMSIZ, 11147 "%s-rx-%d", tp->dev->name, irq_num); 11148 else 11149 snprintf(name, IFNAMSIZ, 11150 "%s-%d", tp->dev->name, irq_num); 11151 name[IFNAMSIZ-1] = 0; 11152 } 11153 11154 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11155 fn = tg3_msi; 11156 if (tg3_flag(tp, 1SHOT_MSI)) 11157 fn = tg3_msi_1shot; 11158 flags = 0; 11159 } else { 11160 fn = tg3_interrupt; 11161 if (tg3_flag(tp, TAGGED_STATUS)) 11162 fn = tg3_interrupt_tagged; 11163 flags = IRQF_SHARED; 11164 } 11165 11166 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); 11167 } 11168 11169 static int tg3_test_interrupt(struct tg3 *tp) 11170 { 11171 struct tg3_napi *tnapi = &tp->napi[0]; 11172 struct net_device *dev = tp->dev; 11173 int err, i, intr_ok = 0; 11174 u32 val; 11175 11176 if (!netif_running(dev)) 11177 return -ENODEV; 11178 11179 tg3_disable_ints(tp); 11180 11181 free_irq(tnapi->irq_vec, tnapi); 11182 11183 /* 11184 * Turn off MSI one shot mode. Otherwise this test has no 11185 * observable way to know whether the interrupt was delivered. 11186 */ 11187 if (tg3_flag(tp, 57765_PLUS)) { 11188 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; 11189 tw32(MSGINT_MODE, val); 11190 } 11191 11192 err = request_irq(tnapi->irq_vec, tg3_test_isr, 11193 IRQF_SHARED, dev->name, tnapi); 11194 if (err) 11195 return err; 11196 11197 tnapi->hw_status->status &= ~SD_STATUS_UPDATED; 11198 tg3_enable_ints(tp); 11199 11200 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 11201 tnapi->coal_now); 11202 11203 for (i = 0; i < 5; i++) { 11204 u32 int_mbox, misc_host_ctrl; 11205 11206 int_mbox = tr32_mailbox(tnapi->int_mbox); 11207 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 11208 11209 if ((int_mbox != 0) || 11210 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { 11211 intr_ok = 1; 11212 break; 11213 } 11214 11215 if (tg3_flag(tp, 57765_PLUS) && 11216 tnapi->hw_status->status_tag != tnapi->last_tag) 11217 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 11218 11219 msleep(10); 11220 } 11221 11222 tg3_disable_ints(tp); 11223 11224 free_irq(tnapi->irq_vec, tnapi); 11225 11226 err = tg3_request_irq(tp, 0); 11227 11228 if (err) 11229 return err; 11230 11231 if (intr_ok) { 11232 /* Reenable MSI one shot mode. */ 11233 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) { 11234 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; 11235 tw32(MSGINT_MODE, val); 11236 } 11237 return 0; 11238 } 11239 11240 return -EIO; 11241 } 11242 11243 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is 11244 * successfully restored 11245 */ 11246 static int tg3_test_msi(struct tg3 *tp) 11247 { 11248 int err; 11249 u16 pci_cmd; 11250 11251 if (!tg3_flag(tp, USING_MSI)) 11252 return 0; 11253 11254 /* Turn off SERR reporting in case MSI terminates with Master 11255 * Abort. 11256 */ 11257 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 11258 pci_write_config_word(tp->pdev, PCI_COMMAND, 11259 pci_cmd & ~PCI_COMMAND_SERR); 11260 11261 err = tg3_test_interrupt(tp); 11262 11263 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 11264 11265 if (!err) 11266 return 0; 11267 11268 /* other failures */ 11269 if (err != -EIO) 11270 return err; 11271 11272 /* MSI test failed, go back to INTx mode */ 11273 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " 11274 "to INTx mode. Please report this failure to the PCI " 11275 "maintainer and include system chipset information\n"); 11276 11277 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11278 11279 pci_disable_msi(tp->pdev); 11280 11281 tg3_flag_clear(tp, USING_MSI); 11282 tp->napi[0].irq_vec = tp->pdev->irq; 11283 11284 err = tg3_request_irq(tp, 0); 11285 if (err) 11286 return err; 11287 11288 /* Need to reset the chip because the MSI cycle may have terminated 11289 * with Master Abort. 11290 */ 11291 tg3_full_lock(tp, 1); 11292 11293 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11294 err = tg3_init_hw(tp, true); 11295 11296 tg3_full_unlock(tp); 11297 11298 if (err) 11299 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11300 11301 return err; 11302 } 11303 11304 static int tg3_request_firmware(struct tg3 *tp) 11305 { 11306 const struct tg3_firmware_hdr *fw_hdr; 11307 11308 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { 11309 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", 11310 tp->fw_needed); 11311 return -ENOENT; 11312 } 11313 11314 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 11315 11316 /* Firmware blob starts with version numbers, followed by 11317 * start address and _full_ length including BSS sections 11318 * (which must be longer than the actual data, of course 11319 */ 11320 11321 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */ 11322 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) { 11323 netdev_err(tp->dev, "bogus length %d in \"%s\"\n", 11324 tp->fw_len, tp->fw_needed); 11325 release_firmware(tp->fw); 11326 tp->fw = NULL; 11327 return -EINVAL; 11328 } 11329 11330 /* We no longer need firmware; we have it. */ 11331 tp->fw_needed = NULL; 11332 return 0; 11333 } 11334 11335 static u32 tg3_irq_count(struct tg3 *tp) 11336 { 11337 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt); 11338 11339 if (irq_cnt > 1) { 11340 /* We want as many rx rings enabled as there are cpus. 11341 * In multiqueue MSI-X mode, the first MSI-X vector 11342 * only deals with link interrupts, etc, so we add 11343 * one to the number of vectors we are requesting. 11344 */ 11345 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max); 11346 } 11347 11348 return irq_cnt; 11349 } 11350 11351 static bool tg3_enable_msix(struct tg3 *tp) 11352 { 11353 int i, rc; 11354 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS]; 11355 11356 tp->txq_cnt = tp->txq_req; 11357 tp->rxq_cnt = tp->rxq_req; 11358 if (!tp->rxq_cnt) 11359 tp->rxq_cnt = netif_get_num_default_rss_queues(); 11360 if (tp->rxq_cnt > tp->rxq_max) 11361 tp->rxq_cnt = tp->rxq_max; 11362 11363 /* Disable multiple TX rings by default. Simple round-robin hardware 11364 * scheduling of the TX rings can cause starvation of rings with 11365 * small packets when other rings have TSO or jumbo packets. 11366 */ 11367 if (!tp->txq_req) 11368 tp->txq_cnt = 1; 11369 11370 tp->irq_cnt = tg3_irq_count(tp); 11371 11372 for (i = 0; i < tp->irq_max; i++) { 11373 msix_ent[i].entry = i; 11374 msix_ent[i].vector = 0; 11375 } 11376 11377 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt); 11378 if (rc < 0) { 11379 return false; 11380 } else if (rc < tp->irq_cnt) { 11381 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", 11382 tp->irq_cnt, rc); 11383 tp->irq_cnt = rc; 11384 tp->rxq_cnt = max(rc - 1, 1); 11385 if (tp->txq_cnt) 11386 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max); 11387 } 11388 11389 for (i = 0; i < tp->irq_max; i++) 11390 tp->napi[i].irq_vec = msix_ent[i].vector; 11391 11392 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) { 11393 pci_disable_msix(tp->pdev); 11394 return false; 11395 } 11396 11397 if (tp->irq_cnt == 1) 11398 return true; 11399 11400 tg3_flag_set(tp, ENABLE_RSS); 11401 11402 if (tp->txq_cnt > 1) 11403 tg3_flag_set(tp, ENABLE_TSS); 11404 11405 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt); 11406 11407 return true; 11408 } 11409 11410 static void tg3_ints_init(struct tg3 *tp) 11411 { 11412 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && 11413 !tg3_flag(tp, TAGGED_STATUS)) { 11414 /* All MSI supporting chips should support tagged 11415 * status. Assert that this is the case. 11416 */ 11417 netdev_warn(tp->dev, 11418 "MSI without TAGGED_STATUS? Not using MSI\n"); 11419 goto defcfg; 11420 } 11421 11422 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) 11423 tg3_flag_set(tp, USING_MSIX); 11424 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) 11425 tg3_flag_set(tp, USING_MSI); 11426 11427 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11428 u32 msi_mode = tr32(MSGINT_MODE); 11429 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) 11430 msi_mode |= MSGINT_MODE_MULTIVEC_EN; 11431 if (!tg3_flag(tp, 1SHOT_MSI)) 11432 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE; 11433 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); 11434 } 11435 defcfg: 11436 if (!tg3_flag(tp, USING_MSIX)) { 11437 tp->irq_cnt = 1; 11438 tp->napi[0].irq_vec = tp->pdev->irq; 11439 } 11440 11441 if (tp->irq_cnt == 1) { 11442 tp->txq_cnt = 1; 11443 tp->rxq_cnt = 1; 11444 netif_set_real_num_tx_queues(tp->dev, 1); 11445 netif_set_real_num_rx_queues(tp->dev, 1); 11446 } 11447 } 11448 11449 static void tg3_ints_fini(struct tg3 *tp) 11450 { 11451 if (tg3_flag(tp, USING_MSIX)) 11452 pci_disable_msix(tp->pdev); 11453 else if (tg3_flag(tp, USING_MSI)) 11454 pci_disable_msi(tp->pdev); 11455 tg3_flag_clear(tp, USING_MSI); 11456 tg3_flag_clear(tp, USING_MSIX); 11457 tg3_flag_clear(tp, ENABLE_RSS); 11458 tg3_flag_clear(tp, ENABLE_TSS); 11459 } 11460 11461 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, 11462 bool init) 11463 { 11464 struct net_device *dev = tp->dev; 11465 int i, err; 11466 11467 /* 11468 * Setup interrupts first so we know how 11469 * many NAPI resources to allocate 11470 */ 11471 tg3_ints_init(tp); 11472 11473 tg3_rss_check_indir_tbl(tp); 11474 11475 /* The placement of this call is tied 11476 * to the setup and use of Host TX descriptors. 11477 */ 11478 err = tg3_alloc_consistent(tp); 11479 if (err) 11480 goto out_ints_fini; 11481 11482 tg3_napi_init(tp); 11483 11484 tg3_napi_enable(tp); 11485 11486 for (i = 0; i < tp->irq_cnt; i++) { 11487 struct tg3_napi *tnapi = &tp->napi[i]; 11488 err = tg3_request_irq(tp, i); 11489 if (err) { 11490 for (i--; i >= 0; i--) { 11491 tnapi = &tp->napi[i]; 11492 free_irq(tnapi->irq_vec, tnapi); 11493 } 11494 goto out_napi_fini; 11495 } 11496 } 11497 11498 tg3_full_lock(tp, 0); 11499 11500 if (init) 11501 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 11502 11503 err = tg3_init_hw(tp, reset_phy); 11504 if (err) { 11505 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11506 tg3_free_rings(tp); 11507 } 11508 11509 tg3_full_unlock(tp); 11510 11511 if (err) 11512 goto out_free_irq; 11513 11514 if (test_irq && tg3_flag(tp, USING_MSI)) { 11515 err = tg3_test_msi(tp); 11516 11517 if (err) { 11518 tg3_full_lock(tp, 0); 11519 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11520 tg3_free_rings(tp); 11521 tg3_full_unlock(tp); 11522 11523 goto out_napi_fini; 11524 } 11525 11526 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { 11527 u32 val = tr32(PCIE_TRANSACTION_CFG); 11528 11529 tw32(PCIE_TRANSACTION_CFG, 11530 val | PCIE_TRANS_CFG_1SHOT_MSI); 11531 } 11532 } 11533 11534 tg3_phy_start(tp); 11535 11536 tg3_hwmon_open(tp); 11537 11538 tg3_full_lock(tp, 0); 11539 11540 tg3_timer_start(tp); 11541 tg3_flag_set(tp, INIT_COMPLETE); 11542 tg3_enable_ints(tp); 11543 11544 if (init) 11545 tg3_ptp_init(tp); 11546 else 11547 tg3_ptp_resume(tp); 11548 11549 11550 tg3_full_unlock(tp); 11551 11552 netif_tx_start_all_queues(dev); 11553 11554 /* 11555 * Reset loopback feature if it was turned on while the device was down 11556 * make sure that it's installed properly now. 11557 */ 11558 if (dev->features & NETIF_F_LOOPBACK) 11559 tg3_set_loopback(dev, dev->features); 11560 11561 return 0; 11562 11563 out_free_irq: 11564 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11565 struct tg3_napi *tnapi = &tp->napi[i]; 11566 free_irq(tnapi->irq_vec, tnapi); 11567 } 11568 11569 out_napi_fini: 11570 tg3_napi_disable(tp); 11571 tg3_napi_fini(tp); 11572 tg3_free_consistent(tp); 11573 11574 out_ints_fini: 11575 tg3_ints_fini(tp); 11576 11577 return err; 11578 } 11579 11580 static void tg3_stop(struct tg3 *tp) 11581 { 11582 int i; 11583 11584 tg3_reset_task_cancel(tp); 11585 tg3_netif_stop(tp); 11586 11587 tg3_timer_stop(tp); 11588 11589 tg3_hwmon_close(tp); 11590 11591 tg3_phy_stop(tp); 11592 11593 tg3_full_lock(tp, 1); 11594 11595 tg3_disable_ints(tp); 11596 11597 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11598 tg3_free_rings(tp); 11599 tg3_flag_clear(tp, INIT_COMPLETE); 11600 11601 tg3_full_unlock(tp); 11602 11603 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11604 struct tg3_napi *tnapi = &tp->napi[i]; 11605 free_irq(tnapi->irq_vec, tnapi); 11606 } 11607 11608 tg3_ints_fini(tp); 11609 11610 tg3_napi_fini(tp); 11611 11612 tg3_free_consistent(tp); 11613 } 11614 11615 static int tg3_open(struct net_device *dev) 11616 { 11617 struct tg3 *tp = netdev_priv(dev); 11618 int err; 11619 11620 if (tp->fw_needed) { 11621 err = tg3_request_firmware(tp); 11622 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 11623 if (err) { 11624 netdev_warn(tp->dev, "EEE capability disabled\n"); 11625 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 11626 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 11627 netdev_warn(tp->dev, "EEE capability restored\n"); 11628 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 11629 } 11630 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 11631 if (err) 11632 return err; 11633 } else if (err) { 11634 netdev_warn(tp->dev, "TSO capability disabled\n"); 11635 tg3_flag_clear(tp, TSO_CAPABLE); 11636 } else if (!tg3_flag(tp, TSO_CAPABLE)) { 11637 netdev_notice(tp->dev, "TSO capability restored\n"); 11638 tg3_flag_set(tp, TSO_CAPABLE); 11639 } 11640 } 11641 11642 tg3_carrier_off(tp); 11643 11644 err = tg3_power_up(tp); 11645 if (err) 11646 return err; 11647 11648 tg3_full_lock(tp, 0); 11649 11650 tg3_disable_ints(tp); 11651 tg3_flag_clear(tp, INIT_COMPLETE); 11652 11653 tg3_full_unlock(tp); 11654 11655 err = tg3_start(tp, 11656 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN), 11657 true, true); 11658 if (err) { 11659 tg3_frob_aux_power(tp, false); 11660 pci_set_power_state(tp->pdev, PCI_D3hot); 11661 } 11662 11663 if (tg3_flag(tp, PTP_CAPABLE)) { 11664 tp->ptp_clock = ptp_clock_register(&tp->ptp_info, 11665 &tp->pdev->dev); 11666 if (IS_ERR(tp->ptp_clock)) 11667 tp->ptp_clock = NULL; 11668 } 11669 11670 return err; 11671 } 11672 11673 static int tg3_close(struct net_device *dev) 11674 { 11675 struct tg3 *tp = netdev_priv(dev); 11676 11677 tg3_ptp_fini(tp); 11678 11679 tg3_stop(tp); 11680 11681 /* Clear stats across close / open calls */ 11682 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); 11683 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); 11684 11685 if (pci_device_is_present(tp->pdev)) { 11686 tg3_power_down_prepare(tp); 11687 11688 tg3_carrier_off(tp); 11689 } 11690 return 0; 11691 } 11692 11693 static inline u64 get_stat64(tg3_stat64_t *val) 11694 { 11695 return ((u64)val->high << 32) | ((u64)val->low); 11696 } 11697 11698 static u64 tg3_calc_crc_errors(struct tg3 *tp) 11699 { 11700 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11701 11702 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 11703 (tg3_asic_rev(tp) == ASIC_REV_5700 || 11704 tg3_asic_rev(tp) == ASIC_REV_5701)) { 11705 u32 val; 11706 11707 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { 11708 tg3_writephy(tp, MII_TG3_TEST1, 11709 val | MII_TG3_TEST1_CRC_EN); 11710 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); 11711 } else 11712 val = 0; 11713 11714 tp->phy_crc_errors += val; 11715 11716 return tp->phy_crc_errors; 11717 } 11718 11719 return get_stat64(&hw_stats->rx_fcs_errors); 11720 } 11721 11722 #define ESTAT_ADD(member) \ 11723 estats->member = old_estats->member + \ 11724 get_stat64(&hw_stats->member) 11725 11726 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats) 11727 { 11728 struct tg3_ethtool_stats *old_estats = &tp->estats_prev; 11729 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11730 11731 ESTAT_ADD(rx_octets); 11732 ESTAT_ADD(rx_fragments); 11733 ESTAT_ADD(rx_ucast_packets); 11734 ESTAT_ADD(rx_mcast_packets); 11735 ESTAT_ADD(rx_bcast_packets); 11736 ESTAT_ADD(rx_fcs_errors); 11737 ESTAT_ADD(rx_align_errors); 11738 ESTAT_ADD(rx_xon_pause_rcvd); 11739 ESTAT_ADD(rx_xoff_pause_rcvd); 11740 ESTAT_ADD(rx_mac_ctrl_rcvd); 11741 ESTAT_ADD(rx_xoff_entered); 11742 ESTAT_ADD(rx_frame_too_long_errors); 11743 ESTAT_ADD(rx_jabbers); 11744 ESTAT_ADD(rx_undersize_packets); 11745 ESTAT_ADD(rx_in_length_errors); 11746 ESTAT_ADD(rx_out_length_errors); 11747 ESTAT_ADD(rx_64_or_less_octet_packets); 11748 ESTAT_ADD(rx_65_to_127_octet_packets); 11749 ESTAT_ADD(rx_128_to_255_octet_packets); 11750 ESTAT_ADD(rx_256_to_511_octet_packets); 11751 ESTAT_ADD(rx_512_to_1023_octet_packets); 11752 ESTAT_ADD(rx_1024_to_1522_octet_packets); 11753 ESTAT_ADD(rx_1523_to_2047_octet_packets); 11754 ESTAT_ADD(rx_2048_to_4095_octet_packets); 11755 ESTAT_ADD(rx_4096_to_8191_octet_packets); 11756 ESTAT_ADD(rx_8192_to_9022_octet_packets); 11757 11758 ESTAT_ADD(tx_octets); 11759 ESTAT_ADD(tx_collisions); 11760 ESTAT_ADD(tx_xon_sent); 11761 ESTAT_ADD(tx_xoff_sent); 11762 ESTAT_ADD(tx_flow_control); 11763 ESTAT_ADD(tx_mac_errors); 11764 ESTAT_ADD(tx_single_collisions); 11765 ESTAT_ADD(tx_mult_collisions); 11766 ESTAT_ADD(tx_deferred); 11767 ESTAT_ADD(tx_excessive_collisions); 11768 ESTAT_ADD(tx_late_collisions); 11769 ESTAT_ADD(tx_collide_2times); 11770 ESTAT_ADD(tx_collide_3times); 11771 ESTAT_ADD(tx_collide_4times); 11772 ESTAT_ADD(tx_collide_5times); 11773 ESTAT_ADD(tx_collide_6times); 11774 ESTAT_ADD(tx_collide_7times); 11775 ESTAT_ADD(tx_collide_8times); 11776 ESTAT_ADD(tx_collide_9times); 11777 ESTAT_ADD(tx_collide_10times); 11778 ESTAT_ADD(tx_collide_11times); 11779 ESTAT_ADD(tx_collide_12times); 11780 ESTAT_ADD(tx_collide_13times); 11781 ESTAT_ADD(tx_collide_14times); 11782 ESTAT_ADD(tx_collide_15times); 11783 ESTAT_ADD(tx_ucast_packets); 11784 ESTAT_ADD(tx_mcast_packets); 11785 ESTAT_ADD(tx_bcast_packets); 11786 ESTAT_ADD(tx_carrier_sense_errors); 11787 ESTAT_ADD(tx_discards); 11788 ESTAT_ADD(tx_errors); 11789 11790 ESTAT_ADD(dma_writeq_full); 11791 ESTAT_ADD(dma_write_prioq_full); 11792 ESTAT_ADD(rxbds_empty); 11793 ESTAT_ADD(rx_discards); 11794 ESTAT_ADD(rx_errors); 11795 ESTAT_ADD(rx_threshold_hit); 11796 11797 ESTAT_ADD(dma_readq_full); 11798 ESTAT_ADD(dma_read_prioq_full); 11799 ESTAT_ADD(tx_comp_queue_full); 11800 11801 ESTAT_ADD(ring_set_send_prod_index); 11802 ESTAT_ADD(ring_status_update); 11803 ESTAT_ADD(nic_irqs); 11804 ESTAT_ADD(nic_avoided_irqs); 11805 ESTAT_ADD(nic_tx_threshold_hit); 11806 11807 ESTAT_ADD(mbuf_lwm_thresh_hit); 11808 } 11809 11810 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) 11811 { 11812 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; 11813 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11814 11815 stats->rx_packets = old_stats->rx_packets + 11816 get_stat64(&hw_stats->rx_ucast_packets) + 11817 get_stat64(&hw_stats->rx_mcast_packets) + 11818 get_stat64(&hw_stats->rx_bcast_packets); 11819 11820 stats->tx_packets = old_stats->tx_packets + 11821 get_stat64(&hw_stats->tx_ucast_packets) + 11822 get_stat64(&hw_stats->tx_mcast_packets) + 11823 get_stat64(&hw_stats->tx_bcast_packets); 11824 11825 stats->rx_bytes = old_stats->rx_bytes + 11826 get_stat64(&hw_stats->rx_octets); 11827 stats->tx_bytes = old_stats->tx_bytes + 11828 get_stat64(&hw_stats->tx_octets); 11829 11830 stats->rx_errors = old_stats->rx_errors + 11831 get_stat64(&hw_stats->rx_errors); 11832 stats->tx_errors = old_stats->tx_errors + 11833 get_stat64(&hw_stats->tx_errors) + 11834 get_stat64(&hw_stats->tx_mac_errors) + 11835 get_stat64(&hw_stats->tx_carrier_sense_errors) + 11836 get_stat64(&hw_stats->tx_discards); 11837 11838 stats->multicast = old_stats->multicast + 11839 get_stat64(&hw_stats->rx_mcast_packets); 11840 stats->collisions = old_stats->collisions + 11841 get_stat64(&hw_stats->tx_collisions); 11842 11843 stats->rx_length_errors = old_stats->rx_length_errors + 11844 get_stat64(&hw_stats->rx_frame_too_long_errors) + 11845 get_stat64(&hw_stats->rx_undersize_packets); 11846 11847 stats->rx_frame_errors = old_stats->rx_frame_errors + 11848 get_stat64(&hw_stats->rx_align_errors); 11849 stats->tx_aborted_errors = old_stats->tx_aborted_errors + 11850 get_stat64(&hw_stats->tx_discards); 11851 stats->tx_carrier_errors = old_stats->tx_carrier_errors + 11852 get_stat64(&hw_stats->tx_carrier_sense_errors); 11853 11854 stats->rx_crc_errors = old_stats->rx_crc_errors + 11855 tg3_calc_crc_errors(tp); 11856 11857 stats->rx_missed_errors = old_stats->rx_missed_errors + 11858 get_stat64(&hw_stats->rx_discards); 11859 11860 stats->rx_dropped = tp->rx_dropped; 11861 stats->tx_dropped = tp->tx_dropped; 11862 } 11863 11864 static int tg3_get_regs_len(struct net_device *dev) 11865 { 11866 return TG3_REG_BLK_SIZE; 11867 } 11868 11869 static void tg3_get_regs(struct net_device *dev, 11870 struct ethtool_regs *regs, void *_p) 11871 { 11872 struct tg3 *tp = netdev_priv(dev); 11873 11874 regs->version = 0; 11875 11876 memset(_p, 0, TG3_REG_BLK_SIZE); 11877 11878 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11879 return; 11880 11881 tg3_full_lock(tp, 0); 11882 11883 tg3_dump_legacy_regs(tp, (u32 *)_p); 11884 11885 tg3_full_unlock(tp); 11886 } 11887 11888 static int tg3_get_eeprom_len(struct net_device *dev) 11889 { 11890 struct tg3 *tp = netdev_priv(dev); 11891 11892 return tp->nvram_size; 11893 } 11894 11895 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 11896 { 11897 struct tg3 *tp = netdev_priv(dev); 11898 int ret, cpmu_restore = 0; 11899 u8 *pd; 11900 u32 i, offset, len, b_offset, b_count, cpmu_val = 0; 11901 __be32 val; 11902 11903 if (tg3_flag(tp, NO_NVRAM)) 11904 return -EINVAL; 11905 11906 offset = eeprom->offset; 11907 len = eeprom->len; 11908 eeprom->len = 0; 11909 11910 eeprom->magic = TG3_EEPROM_MAGIC; 11911 11912 /* Override clock, link aware and link idle modes */ 11913 if (tg3_flag(tp, CPMU_PRESENT)) { 11914 cpmu_val = tr32(TG3_CPMU_CTRL); 11915 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE | 11916 CPMU_CTRL_LINK_IDLE_MODE)) { 11917 tw32(TG3_CPMU_CTRL, cpmu_val & 11918 ~(CPMU_CTRL_LINK_AWARE_MODE | 11919 CPMU_CTRL_LINK_IDLE_MODE)); 11920 cpmu_restore = 1; 11921 } 11922 } 11923 tg3_override_clk(tp); 11924 11925 if (offset & 3) { 11926 /* adjustments to start on required 4 byte boundary */ 11927 b_offset = offset & 3; 11928 b_count = 4 - b_offset; 11929 if (b_count > len) { 11930 /* i.e. offset=1 len=2 */ 11931 b_count = len; 11932 } 11933 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); 11934 if (ret) 11935 goto eeprom_done; 11936 memcpy(data, ((char *)&val) + b_offset, b_count); 11937 len -= b_count; 11938 offset += b_count; 11939 eeprom->len += b_count; 11940 } 11941 11942 /* read bytes up to the last 4 byte boundary */ 11943 pd = &data[eeprom->len]; 11944 for (i = 0; i < (len - (len & 3)); i += 4) { 11945 ret = tg3_nvram_read_be32(tp, offset + i, &val); 11946 if (ret) { 11947 if (i) 11948 i -= 4; 11949 eeprom->len += i; 11950 goto eeprom_done; 11951 } 11952 memcpy(pd + i, &val, 4); 11953 if (need_resched()) { 11954 if (signal_pending(current)) { 11955 eeprom->len += i; 11956 ret = -EINTR; 11957 goto eeprom_done; 11958 } 11959 cond_resched(); 11960 } 11961 } 11962 eeprom->len += i; 11963 11964 if (len & 3) { 11965 /* read last bytes not ending on 4 byte boundary */ 11966 pd = &data[eeprom->len]; 11967 b_count = len & 3; 11968 b_offset = offset + len - b_count; 11969 ret = tg3_nvram_read_be32(tp, b_offset, &val); 11970 if (ret) 11971 goto eeprom_done; 11972 memcpy(pd, &val, b_count); 11973 eeprom->len += b_count; 11974 } 11975 ret = 0; 11976 11977 eeprom_done: 11978 /* Restore clock, link aware and link idle modes */ 11979 tg3_restore_clk(tp); 11980 if (cpmu_restore) 11981 tw32(TG3_CPMU_CTRL, cpmu_val); 11982 11983 return ret; 11984 } 11985 11986 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 11987 { 11988 struct tg3 *tp = netdev_priv(dev); 11989 int ret; 11990 u32 offset, len, b_offset, odd_len; 11991 u8 *buf; 11992 __be32 start, end; 11993 11994 if (tg3_flag(tp, NO_NVRAM) || 11995 eeprom->magic != TG3_EEPROM_MAGIC) 11996 return -EINVAL; 11997 11998 offset = eeprom->offset; 11999 len = eeprom->len; 12000 12001 if ((b_offset = (offset & 3))) { 12002 /* adjustments to start on required 4 byte boundary */ 12003 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); 12004 if (ret) 12005 return ret; 12006 len += b_offset; 12007 offset &= ~3; 12008 if (len < 4) 12009 len = 4; 12010 } 12011 12012 odd_len = 0; 12013 if (len & 3) { 12014 /* adjustments to end on required 4 byte boundary */ 12015 odd_len = 1; 12016 len = (len + 3) & ~3; 12017 ret = tg3_nvram_read_be32(tp, offset+len-4, &end); 12018 if (ret) 12019 return ret; 12020 } 12021 12022 buf = data; 12023 if (b_offset || odd_len) { 12024 buf = kmalloc(len, GFP_KERNEL); 12025 if (!buf) 12026 return -ENOMEM; 12027 if (b_offset) 12028 memcpy(buf, &start, 4); 12029 if (odd_len) 12030 memcpy(buf+len-4, &end, 4); 12031 memcpy(buf + b_offset, data, eeprom->len); 12032 } 12033 12034 ret = tg3_nvram_write_block(tp, offset, len, buf); 12035 12036 if (buf != data) 12037 kfree(buf); 12038 12039 return ret; 12040 } 12041 12042 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 12043 { 12044 struct tg3 *tp = netdev_priv(dev); 12045 12046 if (tg3_flag(tp, USE_PHYLIB)) { 12047 struct phy_device *phydev; 12048 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12049 return -EAGAIN; 12050 phydev = tp->mdio_bus->phy_map[tp->phy_addr]; 12051 return phy_ethtool_gset(phydev, cmd); 12052 } 12053 12054 cmd->supported = (SUPPORTED_Autoneg); 12055 12056 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12057 cmd->supported |= (SUPPORTED_1000baseT_Half | 12058 SUPPORTED_1000baseT_Full); 12059 12060 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12061 cmd->supported |= (SUPPORTED_100baseT_Half | 12062 SUPPORTED_100baseT_Full | 12063 SUPPORTED_10baseT_Half | 12064 SUPPORTED_10baseT_Full | 12065 SUPPORTED_TP); 12066 cmd->port = PORT_TP; 12067 } else { 12068 cmd->supported |= SUPPORTED_FIBRE; 12069 cmd->port = PORT_FIBRE; 12070 } 12071 12072 cmd->advertising = tp->link_config.advertising; 12073 if (tg3_flag(tp, PAUSE_AUTONEG)) { 12074 if (tp->link_config.flowctrl & FLOW_CTRL_RX) { 12075 if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12076 cmd->advertising |= ADVERTISED_Pause; 12077 } else { 12078 cmd->advertising |= ADVERTISED_Pause | 12079 ADVERTISED_Asym_Pause; 12080 } 12081 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12082 cmd->advertising |= ADVERTISED_Asym_Pause; 12083 } 12084 } 12085 if (netif_running(dev) && tp->link_up) { 12086 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed); 12087 cmd->duplex = tp->link_config.active_duplex; 12088 cmd->lp_advertising = tp->link_config.rmt_adv; 12089 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12090 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE) 12091 cmd->eth_tp_mdix = ETH_TP_MDI_X; 12092 else 12093 cmd->eth_tp_mdix = ETH_TP_MDI; 12094 } 12095 } else { 12096 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); 12097 cmd->duplex = DUPLEX_UNKNOWN; 12098 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; 12099 } 12100 cmd->phy_address = tp->phy_addr; 12101 cmd->transceiver = XCVR_INTERNAL; 12102 cmd->autoneg = tp->link_config.autoneg; 12103 cmd->maxtxpkt = 0; 12104 cmd->maxrxpkt = 0; 12105 return 0; 12106 } 12107 12108 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 12109 { 12110 struct tg3 *tp = netdev_priv(dev); 12111 u32 speed = ethtool_cmd_speed(cmd); 12112 12113 if (tg3_flag(tp, USE_PHYLIB)) { 12114 struct phy_device *phydev; 12115 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12116 return -EAGAIN; 12117 phydev = tp->mdio_bus->phy_map[tp->phy_addr]; 12118 return phy_ethtool_sset(phydev, cmd); 12119 } 12120 12121 if (cmd->autoneg != AUTONEG_ENABLE && 12122 cmd->autoneg != AUTONEG_DISABLE) 12123 return -EINVAL; 12124 12125 if (cmd->autoneg == AUTONEG_DISABLE && 12126 cmd->duplex != DUPLEX_FULL && 12127 cmd->duplex != DUPLEX_HALF) 12128 return -EINVAL; 12129 12130 if (cmd->autoneg == AUTONEG_ENABLE) { 12131 u32 mask = ADVERTISED_Autoneg | 12132 ADVERTISED_Pause | 12133 ADVERTISED_Asym_Pause; 12134 12135 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12136 mask |= ADVERTISED_1000baseT_Half | 12137 ADVERTISED_1000baseT_Full; 12138 12139 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 12140 mask |= ADVERTISED_100baseT_Half | 12141 ADVERTISED_100baseT_Full | 12142 ADVERTISED_10baseT_Half | 12143 ADVERTISED_10baseT_Full | 12144 ADVERTISED_TP; 12145 else 12146 mask |= ADVERTISED_FIBRE; 12147 12148 if (cmd->advertising & ~mask) 12149 return -EINVAL; 12150 12151 mask &= (ADVERTISED_1000baseT_Half | 12152 ADVERTISED_1000baseT_Full | 12153 ADVERTISED_100baseT_Half | 12154 ADVERTISED_100baseT_Full | 12155 ADVERTISED_10baseT_Half | 12156 ADVERTISED_10baseT_Full); 12157 12158 cmd->advertising &= mask; 12159 } else { 12160 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { 12161 if (speed != SPEED_1000) 12162 return -EINVAL; 12163 12164 if (cmd->duplex != DUPLEX_FULL) 12165 return -EINVAL; 12166 } else { 12167 if (speed != SPEED_100 && 12168 speed != SPEED_10) 12169 return -EINVAL; 12170 } 12171 } 12172 12173 tg3_full_lock(tp, 0); 12174 12175 tp->link_config.autoneg = cmd->autoneg; 12176 if (cmd->autoneg == AUTONEG_ENABLE) { 12177 tp->link_config.advertising = (cmd->advertising | 12178 ADVERTISED_Autoneg); 12179 tp->link_config.speed = SPEED_UNKNOWN; 12180 tp->link_config.duplex = DUPLEX_UNKNOWN; 12181 } else { 12182 tp->link_config.advertising = 0; 12183 tp->link_config.speed = speed; 12184 tp->link_config.duplex = cmd->duplex; 12185 } 12186 12187 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12188 12189 tg3_warn_mgmt_link_flap(tp); 12190 12191 if (netif_running(dev)) 12192 tg3_setup_phy(tp, true); 12193 12194 tg3_full_unlock(tp); 12195 12196 return 0; 12197 } 12198 12199 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 12200 { 12201 struct tg3 *tp = netdev_priv(dev); 12202 12203 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 12204 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 12205 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version)); 12206 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info)); 12207 } 12208 12209 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12210 { 12211 struct tg3 *tp = netdev_priv(dev); 12212 12213 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) 12214 wol->supported = WAKE_MAGIC; 12215 else 12216 wol->supported = 0; 12217 wol->wolopts = 0; 12218 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) 12219 wol->wolopts = WAKE_MAGIC; 12220 memset(&wol->sopass, 0, sizeof(wol->sopass)); 12221 } 12222 12223 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12224 { 12225 struct tg3 *tp = netdev_priv(dev); 12226 struct device *dp = &tp->pdev->dev; 12227 12228 if (wol->wolopts & ~WAKE_MAGIC) 12229 return -EINVAL; 12230 if ((wol->wolopts & WAKE_MAGIC) && 12231 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) 12232 return -EINVAL; 12233 12234 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); 12235 12236 if (device_may_wakeup(dp)) 12237 tg3_flag_set(tp, WOL_ENABLE); 12238 else 12239 tg3_flag_clear(tp, WOL_ENABLE); 12240 12241 return 0; 12242 } 12243 12244 static u32 tg3_get_msglevel(struct net_device *dev) 12245 { 12246 struct tg3 *tp = netdev_priv(dev); 12247 return tp->msg_enable; 12248 } 12249 12250 static void tg3_set_msglevel(struct net_device *dev, u32 value) 12251 { 12252 struct tg3 *tp = netdev_priv(dev); 12253 tp->msg_enable = value; 12254 } 12255 12256 static int tg3_nway_reset(struct net_device *dev) 12257 { 12258 struct tg3 *tp = netdev_priv(dev); 12259 int r; 12260 12261 if (!netif_running(dev)) 12262 return -EAGAIN; 12263 12264 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 12265 return -EINVAL; 12266 12267 tg3_warn_mgmt_link_flap(tp); 12268 12269 if (tg3_flag(tp, USE_PHYLIB)) { 12270 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12271 return -EAGAIN; 12272 r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]); 12273 } else { 12274 u32 bmcr; 12275 12276 spin_lock_bh(&tp->lock); 12277 r = -EINVAL; 12278 tg3_readphy(tp, MII_BMCR, &bmcr); 12279 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 12280 ((bmcr & BMCR_ANENABLE) || 12281 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { 12282 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | 12283 BMCR_ANENABLE); 12284 r = 0; 12285 } 12286 spin_unlock_bh(&tp->lock); 12287 } 12288 12289 return r; 12290 } 12291 12292 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 12293 { 12294 struct tg3 *tp = netdev_priv(dev); 12295 12296 ering->rx_max_pending = tp->rx_std_ring_mask; 12297 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12298 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; 12299 else 12300 ering->rx_jumbo_max_pending = 0; 12301 12302 ering->tx_max_pending = TG3_TX_RING_SIZE - 1; 12303 12304 ering->rx_pending = tp->rx_pending; 12305 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12306 ering->rx_jumbo_pending = tp->rx_jumbo_pending; 12307 else 12308 ering->rx_jumbo_pending = 0; 12309 12310 ering->tx_pending = tp->napi[0].tx_pending; 12311 } 12312 12313 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 12314 { 12315 struct tg3 *tp = netdev_priv(dev); 12316 int i, irq_sync = 0, err = 0; 12317 12318 if ((ering->rx_pending > tp->rx_std_ring_mask) || 12319 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || 12320 (ering->tx_pending > TG3_TX_RING_SIZE - 1) || 12321 (ering->tx_pending <= MAX_SKB_FRAGS) || 12322 (tg3_flag(tp, TSO_BUG) && 12323 (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) 12324 return -EINVAL; 12325 12326 if (netif_running(dev)) { 12327 tg3_phy_stop(tp); 12328 tg3_netif_stop(tp); 12329 irq_sync = 1; 12330 } 12331 12332 tg3_full_lock(tp, irq_sync); 12333 12334 tp->rx_pending = ering->rx_pending; 12335 12336 if (tg3_flag(tp, MAX_RXPEND_64) && 12337 tp->rx_pending > 63) 12338 tp->rx_pending = 63; 12339 12340 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12341 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 12342 12343 for (i = 0; i < tp->irq_max; i++) 12344 tp->napi[i].tx_pending = ering->tx_pending; 12345 12346 if (netif_running(dev)) { 12347 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12348 err = tg3_restart_hw(tp, false); 12349 if (!err) 12350 tg3_netif_start(tp); 12351 } 12352 12353 tg3_full_unlock(tp); 12354 12355 if (irq_sync && !err) 12356 tg3_phy_start(tp); 12357 12358 return err; 12359 } 12360 12361 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12362 { 12363 struct tg3 *tp = netdev_priv(dev); 12364 12365 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); 12366 12367 if (tp->link_config.flowctrl & FLOW_CTRL_RX) 12368 epause->rx_pause = 1; 12369 else 12370 epause->rx_pause = 0; 12371 12372 if (tp->link_config.flowctrl & FLOW_CTRL_TX) 12373 epause->tx_pause = 1; 12374 else 12375 epause->tx_pause = 0; 12376 } 12377 12378 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12379 { 12380 struct tg3 *tp = netdev_priv(dev); 12381 int err = 0; 12382 12383 if (tp->link_config.autoneg == AUTONEG_ENABLE) 12384 tg3_warn_mgmt_link_flap(tp); 12385 12386 if (tg3_flag(tp, USE_PHYLIB)) { 12387 u32 newadv; 12388 struct phy_device *phydev; 12389 12390 phydev = tp->mdio_bus->phy_map[tp->phy_addr]; 12391 12392 if (!(phydev->supported & SUPPORTED_Pause) || 12393 (!(phydev->supported & SUPPORTED_Asym_Pause) && 12394 (epause->rx_pause != epause->tx_pause))) 12395 return -EINVAL; 12396 12397 tp->link_config.flowctrl = 0; 12398 if (epause->rx_pause) { 12399 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12400 12401 if (epause->tx_pause) { 12402 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12403 newadv = ADVERTISED_Pause; 12404 } else 12405 newadv = ADVERTISED_Pause | 12406 ADVERTISED_Asym_Pause; 12407 } else if (epause->tx_pause) { 12408 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12409 newadv = ADVERTISED_Asym_Pause; 12410 } else 12411 newadv = 0; 12412 12413 if (epause->autoneg) 12414 tg3_flag_set(tp, PAUSE_AUTONEG); 12415 else 12416 tg3_flag_clear(tp, PAUSE_AUTONEG); 12417 12418 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 12419 u32 oldadv = phydev->advertising & 12420 (ADVERTISED_Pause | ADVERTISED_Asym_Pause); 12421 if (oldadv != newadv) { 12422 phydev->advertising &= 12423 ~(ADVERTISED_Pause | 12424 ADVERTISED_Asym_Pause); 12425 phydev->advertising |= newadv; 12426 if (phydev->autoneg) { 12427 /* 12428 * Always renegotiate the link to 12429 * inform our link partner of our 12430 * flow control settings, even if the 12431 * flow control is forced. Let 12432 * tg3_adjust_link() do the final 12433 * flow control setup. 12434 */ 12435 return phy_start_aneg(phydev); 12436 } 12437 } 12438 12439 if (!epause->autoneg) 12440 tg3_setup_flow_control(tp, 0, 0); 12441 } else { 12442 tp->link_config.advertising &= 12443 ~(ADVERTISED_Pause | 12444 ADVERTISED_Asym_Pause); 12445 tp->link_config.advertising |= newadv; 12446 } 12447 } else { 12448 int irq_sync = 0; 12449 12450 if (netif_running(dev)) { 12451 tg3_netif_stop(tp); 12452 irq_sync = 1; 12453 } 12454 12455 tg3_full_lock(tp, irq_sync); 12456 12457 if (epause->autoneg) 12458 tg3_flag_set(tp, PAUSE_AUTONEG); 12459 else 12460 tg3_flag_clear(tp, PAUSE_AUTONEG); 12461 if (epause->rx_pause) 12462 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12463 else 12464 tp->link_config.flowctrl &= ~FLOW_CTRL_RX; 12465 if (epause->tx_pause) 12466 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12467 else 12468 tp->link_config.flowctrl &= ~FLOW_CTRL_TX; 12469 12470 if (netif_running(dev)) { 12471 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12472 err = tg3_restart_hw(tp, false); 12473 if (!err) 12474 tg3_netif_start(tp); 12475 } 12476 12477 tg3_full_unlock(tp); 12478 } 12479 12480 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12481 12482 return err; 12483 } 12484 12485 static int tg3_get_sset_count(struct net_device *dev, int sset) 12486 { 12487 switch (sset) { 12488 case ETH_SS_TEST: 12489 return TG3_NUM_TEST; 12490 case ETH_SS_STATS: 12491 return TG3_NUM_STATS; 12492 default: 12493 return -EOPNOTSUPP; 12494 } 12495 } 12496 12497 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 12498 u32 *rules __always_unused) 12499 { 12500 struct tg3 *tp = netdev_priv(dev); 12501 12502 if (!tg3_flag(tp, SUPPORT_MSIX)) 12503 return -EOPNOTSUPP; 12504 12505 switch (info->cmd) { 12506 case ETHTOOL_GRXRINGS: 12507 if (netif_running(tp->dev)) 12508 info->data = tp->rxq_cnt; 12509 else { 12510 info->data = num_online_cpus(); 12511 if (info->data > TG3_RSS_MAX_NUM_QS) 12512 info->data = TG3_RSS_MAX_NUM_QS; 12513 } 12514 12515 /* The first interrupt vector only 12516 * handles link interrupts. 12517 */ 12518 info->data -= 1; 12519 return 0; 12520 12521 default: 12522 return -EOPNOTSUPP; 12523 } 12524 } 12525 12526 static u32 tg3_get_rxfh_indir_size(struct net_device *dev) 12527 { 12528 u32 size = 0; 12529 struct tg3 *tp = netdev_priv(dev); 12530 12531 if (tg3_flag(tp, SUPPORT_MSIX)) 12532 size = TG3_RSS_INDIR_TBL_SIZE; 12533 12534 return size; 12535 } 12536 12537 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key) 12538 { 12539 struct tg3 *tp = netdev_priv(dev); 12540 int i; 12541 12542 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12543 indir[i] = tp->rss_ind_tbl[i]; 12544 12545 return 0; 12546 } 12547 12548 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key) 12549 { 12550 struct tg3 *tp = netdev_priv(dev); 12551 size_t i; 12552 12553 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12554 tp->rss_ind_tbl[i] = indir[i]; 12555 12556 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS)) 12557 return 0; 12558 12559 /* It is legal to write the indirection 12560 * table while the device is running. 12561 */ 12562 tg3_full_lock(tp, 0); 12563 tg3_rss_write_indir_tbl(tp); 12564 tg3_full_unlock(tp); 12565 12566 return 0; 12567 } 12568 12569 static void tg3_get_channels(struct net_device *dev, 12570 struct ethtool_channels *channel) 12571 { 12572 struct tg3 *tp = netdev_priv(dev); 12573 u32 deflt_qs = netif_get_num_default_rss_queues(); 12574 12575 channel->max_rx = tp->rxq_max; 12576 channel->max_tx = tp->txq_max; 12577 12578 if (netif_running(dev)) { 12579 channel->rx_count = tp->rxq_cnt; 12580 channel->tx_count = tp->txq_cnt; 12581 } else { 12582 if (tp->rxq_req) 12583 channel->rx_count = tp->rxq_req; 12584 else 12585 channel->rx_count = min(deflt_qs, tp->rxq_max); 12586 12587 if (tp->txq_req) 12588 channel->tx_count = tp->txq_req; 12589 else 12590 channel->tx_count = min(deflt_qs, tp->txq_max); 12591 } 12592 } 12593 12594 static int tg3_set_channels(struct net_device *dev, 12595 struct ethtool_channels *channel) 12596 { 12597 struct tg3 *tp = netdev_priv(dev); 12598 12599 if (!tg3_flag(tp, SUPPORT_MSIX)) 12600 return -EOPNOTSUPP; 12601 12602 if (channel->rx_count > tp->rxq_max || 12603 channel->tx_count > tp->txq_max) 12604 return -EINVAL; 12605 12606 tp->rxq_req = channel->rx_count; 12607 tp->txq_req = channel->tx_count; 12608 12609 if (!netif_running(dev)) 12610 return 0; 12611 12612 tg3_stop(tp); 12613 12614 tg3_carrier_off(tp); 12615 12616 tg3_start(tp, true, false, false); 12617 12618 return 0; 12619 } 12620 12621 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 12622 { 12623 switch (stringset) { 12624 case ETH_SS_STATS: 12625 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); 12626 break; 12627 case ETH_SS_TEST: 12628 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); 12629 break; 12630 default: 12631 WARN_ON(1); /* we need a WARN() */ 12632 break; 12633 } 12634 } 12635 12636 static int tg3_set_phys_id(struct net_device *dev, 12637 enum ethtool_phys_id_state state) 12638 { 12639 struct tg3 *tp = netdev_priv(dev); 12640 12641 if (!netif_running(tp->dev)) 12642 return -EAGAIN; 12643 12644 switch (state) { 12645 case ETHTOOL_ID_ACTIVE: 12646 return 1; /* cycle on/off once per second */ 12647 12648 case ETHTOOL_ID_ON: 12649 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12650 LED_CTRL_1000MBPS_ON | 12651 LED_CTRL_100MBPS_ON | 12652 LED_CTRL_10MBPS_ON | 12653 LED_CTRL_TRAFFIC_OVERRIDE | 12654 LED_CTRL_TRAFFIC_BLINK | 12655 LED_CTRL_TRAFFIC_LED); 12656 break; 12657 12658 case ETHTOOL_ID_OFF: 12659 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12660 LED_CTRL_TRAFFIC_OVERRIDE); 12661 break; 12662 12663 case ETHTOOL_ID_INACTIVE: 12664 tw32(MAC_LED_CTRL, tp->led_ctrl); 12665 break; 12666 } 12667 12668 return 0; 12669 } 12670 12671 static void tg3_get_ethtool_stats(struct net_device *dev, 12672 struct ethtool_stats *estats, u64 *tmp_stats) 12673 { 12674 struct tg3 *tp = netdev_priv(dev); 12675 12676 if (tp->hw_stats) 12677 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats); 12678 else 12679 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats)); 12680 } 12681 12682 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen) 12683 { 12684 int i; 12685 __be32 *buf; 12686 u32 offset = 0, len = 0; 12687 u32 magic, val; 12688 12689 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) 12690 return NULL; 12691 12692 if (magic == TG3_EEPROM_MAGIC) { 12693 for (offset = TG3_NVM_DIR_START; 12694 offset < TG3_NVM_DIR_END; 12695 offset += TG3_NVM_DIRENT_SIZE) { 12696 if (tg3_nvram_read(tp, offset, &val)) 12697 return NULL; 12698 12699 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == 12700 TG3_NVM_DIRTYPE_EXTVPD) 12701 break; 12702 } 12703 12704 if (offset != TG3_NVM_DIR_END) { 12705 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; 12706 if (tg3_nvram_read(tp, offset + 4, &offset)) 12707 return NULL; 12708 12709 offset = tg3_nvram_logical_addr(tp, offset); 12710 } 12711 } 12712 12713 if (!offset || !len) { 12714 offset = TG3_NVM_VPD_OFF; 12715 len = TG3_NVM_VPD_LEN; 12716 } 12717 12718 buf = kmalloc(len, GFP_KERNEL); 12719 if (buf == NULL) 12720 return NULL; 12721 12722 if (magic == TG3_EEPROM_MAGIC) { 12723 for (i = 0; i < len; i += 4) { 12724 /* The data is in little-endian format in NVRAM. 12725 * Use the big-endian read routines to preserve 12726 * the byte order as it exists in NVRAM. 12727 */ 12728 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) 12729 goto error; 12730 } 12731 } else { 12732 u8 *ptr; 12733 ssize_t cnt; 12734 unsigned int pos = 0; 12735 12736 ptr = (u8 *)&buf[0]; 12737 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) { 12738 cnt = pci_read_vpd(tp->pdev, pos, 12739 len - pos, ptr); 12740 if (cnt == -ETIMEDOUT || cnt == -EINTR) 12741 cnt = 0; 12742 else if (cnt < 0) 12743 goto error; 12744 } 12745 if (pos != len) 12746 goto error; 12747 } 12748 12749 *vpdlen = len; 12750 12751 return buf; 12752 12753 error: 12754 kfree(buf); 12755 return NULL; 12756 } 12757 12758 #define NVRAM_TEST_SIZE 0x100 12759 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 12760 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 12761 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c 12762 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 12763 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 12764 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 12765 #define NVRAM_SELFBOOT_HW_SIZE 0x20 12766 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c 12767 12768 static int tg3_test_nvram(struct tg3 *tp) 12769 { 12770 u32 csum, magic, len; 12771 __be32 *buf; 12772 int i, j, k, err = 0, size; 12773 12774 if (tg3_flag(tp, NO_NVRAM)) 12775 return 0; 12776 12777 if (tg3_nvram_read(tp, 0, &magic) != 0) 12778 return -EIO; 12779 12780 if (magic == TG3_EEPROM_MAGIC) 12781 size = NVRAM_TEST_SIZE; 12782 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { 12783 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == 12784 TG3_EEPROM_SB_FORMAT_1) { 12785 switch (magic & TG3_EEPROM_SB_REVISION_MASK) { 12786 case TG3_EEPROM_SB_REVISION_0: 12787 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; 12788 break; 12789 case TG3_EEPROM_SB_REVISION_2: 12790 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; 12791 break; 12792 case TG3_EEPROM_SB_REVISION_3: 12793 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; 12794 break; 12795 case TG3_EEPROM_SB_REVISION_4: 12796 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; 12797 break; 12798 case TG3_EEPROM_SB_REVISION_5: 12799 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; 12800 break; 12801 case TG3_EEPROM_SB_REVISION_6: 12802 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; 12803 break; 12804 default: 12805 return -EIO; 12806 } 12807 } else 12808 return 0; 12809 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 12810 size = NVRAM_SELFBOOT_HW_SIZE; 12811 else 12812 return -EIO; 12813 12814 buf = kmalloc(size, GFP_KERNEL); 12815 if (buf == NULL) 12816 return -ENOMEM; 12817 12818 err = -EIO; 12819 for (i = 0, j = 0; i < size; i += 4, j++) { 12820 err = tg3_nvram_read_be32(tp, i, &buf[j]); 12821 if (err) 12822 break; 12823 } 12824 if (i < size) 12825 goto out; 12826 12827 /* Selfboot format */ 12828 magic = be32_to_cpu(buf[0]); 12829 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == 12830 TG3_EEPROM_MAGIC_FW) { 12831 u8 *buf8 = (u8 *) buf, csum8 = 0; 12832 12833 if ((magic & TG3_EEPROM_SB_REVISION_MASK) == 12834 TG3_EEPROM_SB_REVISION_2) { 12835 /* For rev 2, the csum doesn't include the MBA. */ 12836 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) 12837 csum8 += buf8[i]; 12838 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) 12839 csum8 += buf8[i]; 12840 } else { 12841 for (i = 0; i < size; i++) 12842 csum8 += buf8[i]; 12843 } 12844 12845 if (csum8 == 0) { 12846 err = 0; 12847 goto out; 12848 } 12849 12850 err = -EIO; 12851 goto out; 12852 } 12853 12854 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == 12855 TG3_EEPROM_MAGIC_HW) { 12856 u8 data[NVRAM_SELFBOOT_DATA_SIZE]; 12857 u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; 12858 u8 *buf8 = (u8 *) buf; 12859 12860 /* Separate the parity bits and the data bytes. */ 12861 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { 12862 if ((i == 0) || (i == 8)) { 12863 int l; 12864 u8 msk; 12865 12866 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) 12867 parity[k++] = buf8[i] & msk; 12868 i++; 12869 } else if (i == 16) { 12870 int l; 12871 u8 msk; 12872 12873 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) 12874 parity[k++] = buf8[i] & msk; 12875 i++; 12876 12877 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) 12878 parity[k++] = buf8[i] & msk; 12879 i++; 12880 } 12881 data[j++] = buf8[i]; 12882 } 12883 12884 err = -EIO; 12885 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { 12886 u8 hw8 = hweight8(data[i]); 12887 12888 if ((hw8 & 0x1) && parity[i]) 12889 goto out; 12890 else if (!(hw8 & 0x1) && !parity[i]) 12891 goto out; 12892 } 12893 err = 0; 12894 goto out; 12895 } 12896 12897 err = -EIO; 12898 12899 /* Bootstrap checksum at offset 0x10 */ 12900 csum = calc_crc((unsigned char *) buf, 0x10); 12901 if (csum != le32_to_cpu(buf[0x10/4])) 12902 goto out; 12903 12904 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ 12905 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); 12906 if (csum != le32_to_cpu(buf[0xfc/4])) 12907 goto out; 12908 12909 kfree(buf); 12910 12911 buf = tg3_vpd_readblock(tp, &len); 12912 if (!buf) 12913 return -ENOMEM; 12914 12915 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA); 12916 if (i > 0) { 12917 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]); 12918 if (j < 0) 12919 goto out; 12920 12921 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len) 12922 goto out; 12923 12924 i += PCI_VPD_LRDT_TAG_SIZE; 12925 j = pci_vpd_find_info_keyword((u8 *)buf, i, j, 12926 PCI_VPD_RO_KEYWORD_CHKSUM); 12927 if (j > 0) { 12928 u8 csum8 = 0; 12929 12930 j += PCI_VPD_INFO_FLD_HDR_SIZE; 12931 12932 for (i = 0; i <= j; i++) 12933 csum8 += ((u8 *)buf)[i]; 12934 12935 if (csum8) 12936 goto out; 12937 } 12938 } 12939 12940 err = 0; 12941 12942 out: 12943 kfree(buf); 12944 return err; 12945 } 12946 12947 #define TG3_SERDES_TIMEOUT_SEC 2 12948 #define TG3_COPPER_TIMEOUT_SEC 6 12949 12950 static int tg3_test_link(struct tg3 *tp) 12951 { 12952 int i, max; 12953 12954 if (!netif_running(tp->dev)) 12955 return -ENODEV; 12956 12957 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 12958 max = TG3_SERDES_TIMEOUT_SEC; 12959 else 12960 max = TG3_COPPER_TIMEOUT_SEC; 12961 12962 for (i = 0; i < max; i++) { 12963 if (tp->link_up) 12964 return 0; 12965 12966 if (msleep_interruptible(1000)) 12967 break; 12968 } 12969 12970 return -EIO; 12971 } 12972 12973 /* Only test the commonly used registers */ 12974 static int tg3_test_registers(struct tg3 *tp) 12975 { 12976 int i, is_5705, is_5750; 12977 u32 offset, read_mask, write_mask, val, save_val, read_val; 12978 static struct { 12979 u16 offset; 12980 u16 flags; 12981 #define TG3_FL_5705 0x1 12982 #define TG3_FL_NOT_5705 0x2 12983 #define TG3_FL_NOT_5788 0x4 12984 #define TG3_FL_NOT_5750 0x8 12985 u32 read_mask; 12986 u32 write_mask; 12987 } reg_tbl[] = { 12988 /* MAC Control Registers */ 12989 { MAC_MODE, TG3_FL_NOT_5705, 12990 0x00000000, 0x00ef6f8c }, 12991 { MAC_MODE, TG3_FL_5705, 12992 0x00000000, 0x01ef6b8c }, 12993 { MAC_STATUS, TG3_FL_NOT_5705, 12994 0x03800107, 0x00000000 }, 12995 { MAC_STATUS, TG3_FL_5705, 12996 0x03800100, 0x00000000 }, 12997 { MAC_ADDR_0_HIGH, 0x0000, 12998 0x00000000, 0x0000ffff }, 12999 { MAC_ADDR_0_LOW, 0x0000, 13000 0x00000000, 0xffffffff }, 13001 { MAC_RX_MTU_SIZE, 0x0000, 13002 0x00000000, 0x0000ffff }, 13003 { MAC_TX_MODE, 0x0000, 13004 0x00000000, 0x00000070 }, 13005 { MAC_TX_LENGTHS, 0x0000, 13006 0x00000000, 0x00003fff }, 13007 { MAC_RX_MODE, TG3_FL_NOT_5705, 13008 0x00000000, 0x000007fc }, 13009 { MAC_RX_MODE, TG3_FL_5705, 13010 0x00000000, 0x000007dc }, 13011 { MAC_HASH_REG_0, 0x0000, 13012 0x00000000, 0xffffffff }, 13013 { MAC_HASH_REG_1, 0x0000, 13014 0x00000000, 0xffffffff }, 13015 { MAC_HASH_REG_2, 0x0000, 13016 0x00000000, 0xffffffff }, 13017 { MAC_HASH_REG_3, 0x0000, 13018 0x00000000, 0xffffffff }, 13019 13020 /* Receive Data and Receive BD Initiator Control Registers. */ 13021 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, 13022 0x00000000, 0xffffffff }, 13023 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, 13024 0x00000000, 0xffffffff }, 13025 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, 13026 0x00000000, 0x00000003 }, 13027 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, 13028 0x00000000, 0xffffffff }, 13029 { RCVDBDI_STD_BD+0, 0x0000, 13030 0x00000000, 0xffffffff }, 13031 { RCVDBDI_STD_BD+4, 0x0000, 13032 0x00000000, 0xffffffff }, 13033 { RCVDBDI_STD_BD+8, 0x0000, 13034 0x00000000, 0xffff0002 }, 13035 { RCVDBDI_STD_BD+0xc, 0x0000, 13036 0x00000000, 0xffffffff }, 13037 13038 /* Receive BD Initiator Control Registers. */ 13039 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, 13040 0x00000000, 0xffffffff }, 13041 { RCVBDI_STD_THRESH, TG3_FL_5705, 13042 0x00000000, 0x000003ff }, 13043 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, 13044 0x00000000, 0xffffffff }, 13045 13046 /* Host Coalescing Control Registers. */ 13047 { HOSTCC_MODE, TG3_FL_NOT_5705, 13048 0x00000000, 0x00000004 }, 13049 { HOSTCC_MODE, TG3_FL_5705, 13050 0x00000000, 0x000000f6 }, 13051 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, 13052 0x00000000, 0xffffffff }, 13053 { HOSTCC_RXCOL_TICKS, TG3_FL_5705, 13054 0x00000000, 0x000003ff }, 13055 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, 13056 0x00000000, 0xffffffff }, 13057 { HOSTCC_TXCOL_TICKS, TG3_FL_5705, 13058 0x00000000, 0x000003ff }, 13059 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, 13060 0x00000000, 0xffffffff }, 13061 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13062 0x00000000, 0x000000ff }, 13063 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, 13064 0x00000000, 0xffffffff }, 13065 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13066 0x00000000, 0x000000ff }, 13067 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, 13068 0x00000000, 0xffffffff }, 13069 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, 13070 0x00000000, 0xffffffff }, 13071 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13072 0x00000000, 0xffffffff }, 13073 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13074 0x00000000, 0x000000ff }, 13075 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13076 0x00000000, 0xffffffff }, 13077 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13078 0x00000000, 0x000000ff }, 13079 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, 13080 0x00000000, 0xffffffff }, 13081 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, 13082 0x00000000, 0xffffffff }, 13083 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, 13084 0x00000000, 0xffffffff }, 13085 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, 13086 0x00000000, 0xffffffff }, 13087 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, 13088 0x00000000, 0xffffffff }, 13089 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, 13090 0xffffffff, 0x00000000 }, 13091 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, 13092 0xffffffff, 0x00000000 }, 13093 13094 /* Buffer Manager Control Registers. */ 13095 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, 13096 0x00000000, 0x007fff80 }, 13097 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, 13098 0x00000000, 0x007fffff }, 13099 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, 13100 0x00000000, 0x0000003f }, 13101 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, 13102 0x00000000, 0x000001ff }, 13103 { BUFMGR_MB_HIGH_WATER, 0x0000, 13104 0x00000000, 0x000001ff }, 13105 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, 13106 0xffffffff, 0x00000000 }, 13107 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, 13108 0xffffffff, 0x00000000 }, 13109 13110 /* Mailbox Registers */ 13111 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, 13112 0x00000000, 0x000001ff }, 13113 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, 13114 0x00000000, 0x000001ff }, 13115 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, 13116 0x00000000, 0x000007ff }, 13117 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, 13118 0x00000000, 0x000001ff }, 13119 13120 { 0xffff, 0x0000, 0x00000000, 0x00000000 }, 13121 }; 13122 13123 is_5705 = is_5750 = 0; 13124 if (tg3_flag(tp, 5705_PLUS)) { 13125 is_5705 = 1; 13126 if (tg3_flag(tp, 5750_PLUS)) 13127 is_5750 = 1; 13128 } 13129 13130 for (i = 0; reg_tbl[i].offset != 0xffff; i++) { 13131 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) 13132 continue; 13133 13134 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) 13135 continue; 13136 13137 if (tg3_flag(tp, IS_5788) && 13138 (reg_tbl[i].flags & TG3_FL_NOT_5788)) 13139 continue; 13140 13141 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) 13142 continue; 13143 13144 offset = (u32) reg_tbl[i].offset; 13145 read_mask = reg_tbl[i].read_mask; 13146 write_mask = reg_tbl[i].write_mask; 13147 13148 /* Save the original register content */ 13149 save_val = tr32(offset); 13150 13151 /* Determine the read-only value. */ 13152 read_val = save_val & read_mask; 13153 13154 /* Write zero to the register, then make sure the read-only bits 13155 * are not changed and the read/write bits are all zeros. 13156 */ 13157 tw32(offset, 0); 13158 13159 val = tr32(offset); 13160 13161 /* Test the read-only and read/write bits. */ 13162 if (((val & read_mask) != read_val) || (val & write_mask)) 13163 goto out; 13164 13165 /* Write ones to all the bits defined by RdMask and WrMask, then 13166 * make sure the read-only bits are not changed and the 13167 * read/write bits are all ones. 13168 */ 13169 tw32(offset, read_mask | write_mask); 13170 13171 val = tr32(offset); 13172 13173 /* Test the read-only bits. */ 13174 if ((val & read_mask) != read_val) 13175 goto out; 13176 13177 /* Test the read/write bits. */ 13178 if ((val & write_mask) != write_mask) 13179 goto out; 13180 13181 tw32(offset, save_val); 13182 } 13183 13184 return 0; 13185 13186 out: 13187 if (netif_msg_hw(tp)) 13188 netdev_err(tp->dev, 13189 "Register test failed at offset %x\n", offset); 13190 tw32(offset, save_val); 13191 return -EIO; 13192 } 13193 13194 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) 13195 { 13196 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; 13197 int i; 13198 u32 j; 13199 13200 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { 13201 for (j = 0; j < len; j += 4) { 13202 u32 val; 13203 13204 tg3_write_mem(tp, offset + j, test_pattern[i]); 13205 tg3_read_mem(tp, offset + j, &val); 13206 if (val != test_pattern[i]) 13207 return -EIO; 13208 } 13209 } 13210 return 0; 13211 } 13212 13213 static int tg3_test_memory(struct tg3 *tp) 13214 { 13215 static struct mem_entry { 13216 u32 offset; 13217 u32 len; 13218 } mem_tbl_570x[] = { 13219 { 0x00000000, 0x00b50}, 13220 { 0x00002000, 0x1c000}, 13221 { 0xffffffff, 0x00000} 13222 }, mem_tbl_5705[] = { 13223 { 0x00000100, 0x0000c}, 13224 { 0x00000200, 0x00008}, 13225 { 0x00004000, 0x00800}, 13226 { 0x00006000, 0x01000}, 13227 { 0x00008000, 0x02000}, 13228 { 0x00010000, 0x0e000}, 13229 { 0xffffffff, 0x00000} 13230 }, mem_tbl_5755[] = { 13231 { 0x00000200, 0x00008}, 13232 { 0x00004000, 0x00800}, 13233 { 0x00006000, 0x00800}, 13234 { 0x00008000, 0x02000}, 13235 { 0x00010000, 0x0c000}, 13236 { 0xffffffff, 0x00000} 13237 }, mem_tbl_5906[] = { 13238 { 0x00000200, 0x00008}, 13239 { 0x00004000, 0x00400}, 13240 { 0x00006000, 0x00400}, 13241 { 0x00008000, 0x01000}, 13242 { 0x00010000, 0x01000}, 13243 { 0xffffffff, 0x00000} 13244 }, mem_tbl_5717[] = { 13245 { 0x00000200, 0x00008}, 13246 { 0x00010000, 0x0a000}, 13247 { 0x00020000, 0x13c00}, 13248 { 0xffffffff, 0x00000} 13249 }, mem_tbl_57765[] = { 13250 { 0x00000200, 0x00008}, 13251 { 0x00004000, 0x00800}, 13252 { 0x00006000, 0x09800}, 13253 { 0x00010000, 0x0a000}, 13254 { 0xffffffff, 0x00000} 13255 }; 13256 struct mem_entry *mem_tbl; 13257 int err = 0; 13258 int i; 13259 13260 if (tg3_flag(tp, 5717_PLUS)) 13261 mem_tbl = mem_tbl_5717; 13262 else if (tg3_flag(tp, 57765_CLASS) || 13263 tg3_asic_rev(tp) == ASIC_REV_5762) 13264 mem_tbl = mem_tbl_57765; 13265 else if (tg3_flag(tp, 5755_PLUS)) 13266 mem_tbl = mem_tbl_5755; 13267 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 13268 mem_tbl = mem_tbl_5906; 13269 else if (tg3_flag(tp, 5705_PLUS)) 13270 mem_tbl = mem_tbl_5705; 13271 else 13272 mem_tbl = mem_tbl_570x; 13273 13274 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { 13275 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); 13276 if (err) 13277 break; 13278 } 13279 13280 return err; 13281 } 13282 13283 #define TG3_TSO_MSS 500 13284 13285 #define TG3_TSO_IP_HDR_LEN 20 13286 #define TG3_TSO_TCP_HDR_LEN 20 13287 #define TG3_TSO_TCP_OPT_LEN 12 13288 13289 static const u8 tg3_tso_header[] = { 13290 0x08, 0x00, 13291 0x45, 0x00, 0x00, 0x00, 13292 0x00, 0x00, 0x40, 0x00, 13293 0x40, 0x06, 0x00, 0x00, 13294 0x0a, 0x00, 0x00, 0x01, 13295 0x0a, 0x00, 0x00, 0x02, 13296 0x0d, 0x00, 0xe0, 0x00, 13297 0x00, 0x00, 0x01, 0x00, 13298 0x00, 0x00, 0x02, 0x00, 13299 0x80, 0x10, 0x10, 0x00, 13300 0x14, 0x09, 0x00, 0x00, 13301 0x01, 0x01, 0x08, 0x0a, 13302 0x11, 0x11, 0x11, 0x11, 13303 0x11, 0x11, 0x11, 0x11, 13304 }; 13305 13306 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) 13307 { 13308 u32 rx_start_idx, rx_idx, tx_idx, opaque_key; 13309 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; 13310 u32 budget; 13311 struct sk_buff *skb; 13312 u8 *tx_data, *rx_data; 13313 dma_addr_t map; 13314 int num_pkts, tx_len, rx_len, i, err; 13315 struct tg3_rx_buffer_desc *desc; 13316 struct tg3_napi *tnapi, *rnapi; 13317 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 13318 13319 tnapi = &tp->napi[0]; 13320 rnapi = &tp->napi[0]; 13321 if (tp->irq_cnt > 1) { 13322 if (tg3_flag(tp, ENABLE_RSS)) 13323 rnapi = &tp->napi[1]; 13324 if (tg3_flag(tp, ENABLE_TSS)) 13325 tnapi = &tp->napi[1]; 13326 } 13327 coal_now = tnapi->coal_now | rnapi->coal_now; 13328 13329 err = -EIO; 13330 13331 tx_len = pktsz; 13332 skb = netdev_alloc_skb(tp->dev, tx_len); 13333 if (!skb) 13334 return -ENOMEM; 13335 13336 tx_data = skb_put(skb, tx_len); 13337 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN); 13338 memset(tx_data + ETH_ALEN, 0x0, 8); 13339 13340 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); 13341 13342 if (tso_loopback) { 13343 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; 13344 13345 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + 13346 TG3_TSO_TCP_OPT_LEN; 13347 13348 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, 13349 sizeof(tg3_tso_header)); 13350 mss = TG3_TSO_MSS; 13351 13352 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); 13353 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); 13354 13355 /* Set the total length field in the IP header */ 13356 iph->tot_len = htons((u16)(mss + hdr_len)); 13357 13358 base_flags = (TXD_FLAG_CPU_PRE_DMA | 13359 TXD_FLAG_CPU_POST_DMA); 13360 13361 if (tg3_flag(tp, HW_TSO_1) || 13362 tg3_flag(tp, HW_TSO_2) || 13363 tg3_flag(tp, HW_TSO_3)) { 13364 struct tcphdr *th; 13365 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; 13366 th = (struct tcphdr *)&tx_data[val]; 13367 th->check = 0; 13368 } else 13369 base_flags |= TXD_FLAG_TCPUDP_CSUM; 13370 13371 if (tg3_flag(tp, HW_TSO_3)) { 13372 mss |= (hdr_len & 0xc) << 12; 13373 if (hdr_len & 0x10) 13374 base_flags |= 0x00000010; 13375 base_flags |= (hdr_len & 0x3e0) << 5; 13376 } else if (tg3_flag(tp, HW_TSO_2)) 13377 mss |= hdr_len << 9; 13378 else if (tg3_flag(tp, HW_TSO_1) || 13379 tg3_asic_rev(tp) == ASIC_REV_5705) { 13380 mss |= (TG3_TSO_TCP_OPT_LEN << 9); 13381 } else { 13382 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); 13383 } 13384 13385 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); 13386 } else { 13387 num_pkts = 1; 13388 data_off = ETH_HLEN; 13389 13390 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 13391 tx_len > VLAN_ETH_FRAME_LEN) 13392 base_flags |= TXD_FLAG_JMB_PKT; 13393 } 13394 13395 for (i = data_off; i < tx_len; i++) 13396 tx_data[i] = (u8) (i & 0xff); 13397 13398 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); 13399 if (pci_dma_mapping_error(tp->pdev, map)) { 13400 dev_kfree_skb(skb); 13401 return -EIO; 13402 } 13403 13404 val = tnapi->tx_prod; 13405 tnapi->tx_buffers[val].skb = skb; 13406 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); 13407 13408 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13409 rnapi->coal_now); 13410 13411 udelay(10); 13412 13413 rx_start_idx = rnapi->hw_status->idx[0].rx_producer; 13414 13415 budget = tg3_tx_avail(tnapi); 13416 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, 13417 base_flags | TXD_FLAG_END, mss, 0)) { 13418 tnapi->tx_buffers[val].skb = NULL; 13419 dev_kfree_skb(skb); 13420 return -EIO; 13421 } 13422 13423 tnapi->tx_prod++; 13424 13425 /* Sync BD data before updating mailbox */ 13426 wmb(); 13427 13428 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); 13429 tr32_mailbox(tnapi->prodmbox); 13430 13431 udelay(10); 13432 13433 /* 350 usec to allow enough time on some 10/100 Mbps devices. */ 13434 for (i = 0; i < 35; i++) { 13435 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13436 coal_now); 13437 13438 udelay(10); 13439 13440 tx_idx = tnapi->hw_status->idx[0].tx_consumer; 13441 rx_idx = rnapi->hw_status->idx[0].rx_producer; 13442 if ((tx_idx == tnapi->tx_prod) && 13443 (rx_idx == (rx_start_idx + num_pkts))) 13444 break; 13445 } 13446 13447 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1); 13448 dev_kfree_skb(skb); 13449 13450 if (tx_idx != tnapi->tx_prod) 13451 goto out; 13452 13453 if (rx_idx != rx_start_idx + num_pkts) 13454 goto out; 13455 13456 val = data_off; 13457 while (rx_idx != rx_start_idx) { 13458 desc = &rnapi->rx_rcb[rx_start_idx++]; 13459 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 13460 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 13461 13462 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 13463 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) 13464 goto out; 13465 13466 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) 13467 - ETH_FCS_LEN; 13468 13469 if (!tso_loopback) { 13470 if (rx_len != tx_len) 13471 goto out; 13472 13473 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { 13474 if (opaque_key != RXD_OPAQUE_RING_STD) 13475 goto out; 13476 } else { 13477 if (opaque_key != RXD_OPAQUE_RING_JUMBO) 13478 goto out; 13479 } 13480 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 13481 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 13482 >> RXD_TCPCSUM_SHIFT != 0xffff) { 13483 goto out; 13484 } 13485 13486 if (opaque_key == RXD_OPAQUE_RING_STD) { 13487 rx_data = tpr->rx_std_buffers[desc_idx].data; 13488 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], 13489 mapping); 13490 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 13491 rx_data = tpr->rx_jmb_buffers[desc_idx].data; 13492 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], 13493 mapping); 13494 } else 13495 goto out; 13496 13497 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, 13498 PCI_DMA_FROMDEVICE); 13499 13500 rx_data += TG3_RX_OFFSET(tp); 13501 for (i = data_off; i < rx_len; i++, val++) { 13502 if (*(rx_data + i) != (u8) (val & 0xff)) 13503 goto out; 13504 } 13505 } 13506 13507 err = 0; 13508 13509 /* tg3_free_rings will unmap and free the rx_data */ 13510 out: 13511 return err; 13512 } 13513 13514 #define TG3_STD_LOOPBACK_FAILED 1 13515 #define TG3_JMB_LOOPBACK_FAILED 2 13516 #define TG3_TSO_LOOPBACK_FAILED 4 13517 #define TG3_LOOPBACK_FAILED \ 13518 (TG3_STD_LOOPBACK_FAILED | \ 13519 TG3_JMB_LOOPBACK_FAILED | \ 13520 TG3_TSO_LOOPBACK_FAILED) 13521 13522 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk) 13523 { 13524 int err = -EIO; 13525 u32 eee_cap; 13526 u32 jmb_pkt_sz = 9000; 13527 13528 if (tp->dma_limit) 13529 jmb_pkt_sz = tp->dma_limit - ETH_HLEN; 13530 13531 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; 13532 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 13533 13534 if (!netif_running(tp->dev)) { 13535 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13536 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13537 if (do_extlpbk) 13538 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13539 goto done; 13540 } 13541 13542 err = tg3_reset_hw(tp, true); 13543 if (err) { 13544 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13545 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13546 if (do_extlpbk) 13547 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13548 goto done; 13549 } 13550 13551 if (tg3_flag(tp, ENABLE_RSS)) { 13552 int i; 13553 13554 /* Reroute all rx packets to the 1st queue */ 13555 for (i = MAC_RSS_INDIR_TBL_0; 13556 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) 13557 tw32(i, 0x0); 13558 } 13559 13560 /* HW errata - mac loopback fails in some cases on 5780. 13561 * Normal traffic and PHY loopback are not affected by 13562 * errata. Also, the MAC loopback test is deprecated for 13563 * all newer ASIC revisions. 13564 */ 13565 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 13566 !tg3_flag(tp, CPMU_PRESENT)) { 13567 tg3_mac_loopback(tp, true); 13568 13569 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13570 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13571 13572 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13573 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13574 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13575 13576 tg3_mac_loopback(tp, false); 13577 } 13578 13579 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 13580 !tg3_flag(tp, USE_PHYLIB)) { 13581 int i; 13582 13583 tg3_phy_lpbk_set(tp, 0, false); 13584 13585 /* Wait for link */ 13586 for (i = 0; i < 100; i++) { 13587 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 13588 break; 13589 mdelay(1); 13590 } 13591 13592 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13593 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13594 if (tg3_flag(tp, TSO_CAPABLE) && 13595 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13596 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED; 13597 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13598 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13599 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13600 13601 if (do_extlpbk) { 13602 tg3_phy_lpbk_set(tp, 0, true); 13603 13604 /* All link indications report up, but the hardware 13605 * isn't really ready for about 20 msec. Double it 13606 * to be sure. 13607 */ 13608 mdelay(40); 13609 13610 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13611 data[TG3_EXT_LOOPB_TEST] |= 13612 TG3_STD_LOOPBACK_FAILED; 13613 if (tg3_flag(tp, TSO_CAPABLE) && 13614 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13615 data[TG3_EXT_LOOPB_TEST] |= 13616 TG3_TSO_LOOPBACK_FAILED; 13617 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13618 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13619 data[TG3_EXT_LOOPB_TEST] |= 13620 TG3_JMB_LOOPBACK_FAILED; 13621 } 13622 13623 /* Re-enable gphy autopowerdown. */ 13624 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 13625 tg3_phy_toggle_apd(tp, true); 13626 } 13627 13628 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] | 13629 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0; 13630 13631 done: 13632 tp->phy_flags |= eee_cap; 13633 13634 return err; 13635 } 13636 13637 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, 13638 u64 *data) 13639 { 13640 struct tg3 *tp = netdev_priv(dev); 13641 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; 13642 13643 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 13644 if (tg3_power_up(tp)) { 13645 etest->flags |= ETH_TEST_FL_FAILED; 13646 memset(data, 1, sizeof(u64) * TG3_NUM_TEST); 13647 return; 13648 } 13649 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 13650 } 13651 13652 memset(data, 0, sizeof(u64) * TG3_NUM_TEST); 13653 13654 if (tg3_test_nvram(tp) != 0) { 13655 etest->flags |= ETH_TEST_FL_FAILED; 13656 data[TG3_NVRAM_TEST] = 1; 13657 } 13658 if (!doextlpbk && tg3_test_link(tp)) { 13659 etest->flags |= ETH_TEST_FL_FAILED; 13660 data[TG3_LINK_TEST] = 1; 13661 } 13662 if (etest->flags & ETH_TEST_FL_OFFLINE) { 13663 int err, err2 = 0, irq_sync = 0; 13664 13665 if (netif_running(dev)) { 13666 tg3_phy_stop(tp); 13667 tg3_netif_stop(tp); 13668 irq_sync = 1; 13669 } 13670 13671 tg3_full_lock(tp, irq_sync); 13672 tg3_halt(tp, RESET_KIND_SUSPEND, 1); 13673 err = tg3_nvram_lock(tp); 13674 tg3_halt_cpu(tp, RX_CPU_BASE); 13675 if (!tg3_flag(tp, 5705_PLUS)) 13676 tg3_halt_cpu(tp, TX_CPU_BASE); 13677 if (!err) 13678 tg3_nvram_unlock(tp); 13679 13680 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 13681 tg3_phy_reset(tp); 13682 13683 if (tg3_test_registers(tp) != 0) { 13684 etest->flags |= ETH_TEST_FL_FAILED; 13685 data[TG3_REGISTER_TEST] = 1; 13686 } 13687 13688 if (tg3_test_memory(tp) != 0) { 13689 etest->flags |= ETH_TEST_FL_FAILED; 13690 data[TG3_MEMORY_TEST] = 1; 13691 } 13692 13693 if (doextlpbk) 13694 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 13695 13696 if (tg3_test_loopback(tp, data, doextlpbk)) 13697 etest->flags |= ETH_TEST_FL_FAILED; 13698 13699 tg3_full_unlock(tp); 13700 13701 if (tg3_test_interrupt(tp) != 0) { 13702 etest->flags |= ETH_TEST_FL_FAILED; 13703 data[TG3_INTERRUPT_TEST] = 1; 13704 } 13705 13706 tg3_full_lock(tp, 0); 13707 13708 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 13709 if (netif_running(dev)) { 13710 tg3_flag_set(tp, INIT_COMPLETE); 13711 err2 = tg3_restart_hw(tp, true); 13712 if (!err2) 13713 tg3_netif_start(tp); 13714 } 13715 13716 tg3_full_unlock(tp); 13717 13718 if (irq_sync && !err2) 13719 tg3_phy_start(tp); 13720 } 13721 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 13722 tg3_power_down_prepare(tp); 13723 13724 } 13725 13726 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 13727 { 13728 struct tg3 *tp = netdev_priv(dev); 13729 struct hwtstamp_config stmpconf; 13730 13731 if (!tg3_flag(tp, PTP_CAPABLE)) 13732 return -EOPNOTSUPP; 13733 13734 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) 13735 return -EFAULT; 13736 13737 if (stmpconf.flags) 13738 return -EINVAL; 13739 13740 if (stmpconf.tx_type != HWTSTAMP_TX_ON && 13741 stmpconf.tx_type != HWTSTAMP_TX_OFF) 13742 return -ERANGE; 13743 13744 switch (stmpconf.rx_filter) { 13745 case HWTSTAMP_FILTER_NONE: 13746 tp->rxptpctl = 0; 13747 break; 13748 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 13749 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13750 TG3_RX_PTP_CTL_ALL_V1_EVENTS; 13751 break; 13752 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 13753 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13754 TG3_RX_PTP_CTL_SYNC_EVNT; 13755 break; 13756 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 13757 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13758 TG3_RX_PTP_CTL_DELAY_REQ; 13759 break; 13760 case HWTSTAMP_FILTER_PTP_V2_EVENT: 13761 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13762 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13763 break; 13764 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 13765 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13766 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13767 break; 13768 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 13769 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13770 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13771 break; 13772 case HWTSTAMP_FILTER_PTP_V2_SYNC: 13773 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13774 TG3_RX_PTP_CTL_SYNC_EVNT; 13775 break; 13776 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 13777 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13778 TG3_RX_PTP_CTL_SYNC_EVNT; 13779 break; 13780 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 13781 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13782 TG3_RX_PTP_CTL_SYNC_EVNT; 13783 break; 13784 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 13785 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13786 TG3_RX_PTP_CTL_DELAY_REQ; 13787 break; 13788 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 13789 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13790 TG3_RX_PTP_CTL_DELAY_REQ; 13791 break; 13792 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 13793 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13794 TG3_RX_PTP_CTL_DELAY_REQ; 13795 break; 13796 default: 13797 return -ERANGE; 13798 } 13799 13800 if (netif_running(dev) && tp->rxptpctl) 13801 tw32(TG3_RX_PTP_CTL, 13802 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 13803 13804 if (stmpconf.tx_type == HWTSTAMP_TX_ON) 13805 tg3_flag_set(tp, TX_TSTAMP_EN); 13806 else 13807 tg3_flag_clear(tp, TX_TSTAMP_EN); 13808 13809 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13810 -EFAULT : 0; 13811 } 13812 13813 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 13814 { 13815 struct tg3 *tp = netdev_priv(dev); 13816 struct hwtstamp_config stmpconf; 13817 13818 if (!tg3_flag(tp, PTP_CAPABLE)) 13819 return -EOPNOTSUPP; 13820 13821 stmpconf.flags = 0; 13822 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ? 13823 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF); 13824 13825 switch (tp->rxptpctl) { 13826 case 0: 13827 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE; 13828 break; 13829 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS: 13830 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 13831 break; 13832 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13833 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 13834 break; 13835 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13836 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 13837 break; 13838 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13839 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 13840 break; 13841 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13842 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 13843 break; 13844 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13845 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 13846 break; 13847 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13848 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 13849 break; 13850 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13851 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC; 13852 break; 13853 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13854 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 13855 break; 13856 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13857 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 13858 break; 13859 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13860 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ; 13861 break; 13862 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13863 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 13864 break; 13865 default: 13866 WARN_ON_ONCE(1); 13867 return -ERANGE; 13868 } 13869 13870 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13871 -EFAULT : 0; 13872 } 13873 13874 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 13875 { 13876 struct mii_ioctl_data *data = if_mii(ifr); 13877 struct tg3 *tp = netdev_priv(dev); 13878 int err; 13879 13880 if (tg3_flag(tp, USE_PHYLIB)) { 13881 struct phy_device *phydev; 13882 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 13883 return -EAGAIN; 13884 phydev = tp->mdio_bus->phy_map[tp->phy_addr]; 13885 return phy_mii_ioctl(phydev, ifr, cmd); 13886 } 13887 13888 switch (cmd) { 13889 case SIOCGMIIPHY: 13890 data->phy_id = tp->phy_addr; 13891 13892 /* fallthru */ 13893 case SIOCGMIIREG: { 13894 u32 mii_regval; 13895 13896 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 13897 break; /* We have no PHY */ 13898 13899 if (!netif_running(dev)) 13900 return -EAGAIN; 13901 13902 spin_lock_bh(&tp->lock); 13903 err = __tg3_readphy(tp, data->phy_id & 0x1f, 13904 data->reg_num & 0x1f, &mii_regval); 13905 spin_unlock_bh(&tp->lock); 13906 13907 data->val_out = mii_regval; 13908 13909 return err; 13910 } 13911 13912 case SIOCSMIIREG: 13913 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 13914 break; /* We have no PHY */ 13915 13916 if (!netif_running(dev)) 13917 return -EAGAIN; 13918 13919 spin_lock_bh(&tp->lock); 13920 err = __tg3_writephy(tp, data->phy_id & 0x1f, 13921 data->reg_num & 0x1f, data->val_in); 13922 spin_unlock_bh(&tp->lock); 13923 13924 return err; 13925 13926 case SIOCSHWTSTAMP: 13927 return tg3_hwtstamp_set(dev, ifr); 13928 13929 case SIOCGHWTSTAMP: 13930 return tg3_hwtstamp_get(dev, ifr); 13931 13932 default: 13933 /* do nothing */ 13934 break; 13935 } 13936 return -EOPNOTSUPP; 13937 } 13938 13939 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 13940 { 13941 struct tg3 *tp = netdev_priv(dev); 13942 13943 memcpy(ec, &tp->coal, sizeof(*ec)); 13944 return 0; 13945 } 13946 13947 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 13948 { 13949 struct tg3 *tp = netdev_priv(dev); 13950 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; 13951 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; 13952 13953 if (!tg3_flag(tp, 5705_PLUS)) { 13954 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; 13955 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; 13956 max_stat_coal_ticks = MAX_STAT_COAL_TICKS; 13957 min_stat_coal_ticks = MIN_STAT_COAL_TICKS; 13958 } 13959 13960 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || 13961 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || 13962 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || 13963 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || 13964 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || 13965 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || 13966 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || 13967 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || 13968 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || 13969 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) 13970 return -EINVAL; 13971 13972 /* No rx interrupts will be generated if both are zero */ 13973 if ((ec->rx_coalesce_usecs == 0) && 13974 (ec->rx_max_coalesced_frames == 0)) 13975 return -EINVAL; 13976 13977 /* No tx interrupts will be generated if both are zero */ 13978 if ((ec->tx_coalesce_usecs == 0) && 13979 (ec->tx_max_coalesced_frames == 0)) 13980 return -EINVAL; 13981 13982 /* Only copy relevant parameters, ignore all others. */ 13983 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; 13984 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; 13985 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; 13986 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; 13987 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; 13988 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; 13989 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; 13990 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; 13991 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; 13992 13993 if (netif_running(dev)) { 13994 tg3_full_lock(tp, 0); 13995 __tg3_set_coalesce(tp, &tp->coal); 13996 tg3_full_unlock(tp); 13997 } 13998 return 0; 13999 } 14000 14001 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) 14002 { 14003 struct tg3 *tp = netdev_priv(dev); 14004 14005 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14006 netdev_warn(tp->dev, "Board does not support EEE!\n"); 14007 return -EOPNOTSUPP; 14008 } 14009 14010 if (edata->advertised != tp->eee.advertised) { 14011 netdev_warn(tp->dev, 14012 "Direct manipulation of EEE advertisement is not supported\n"); 14013 return -EINVAL; 14014 } 14015 14016 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) { 14017 netdev_warn(tp->dev, 14018 "Maximal Tx Lpi timer supported is %#x(u)\n", 14019 TG3_CPMU_DBTMR1_LNKIDLE_MAX); 14020 return -EINVAL; 14021 } 14022 14023 tp->eee = *edata; 14024 14025 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 14026 tg3_warn_mgmt_link_flap(tp); 14027 14028 if (netif_running(tp->dev)) { 14029 tg3_full_lock(tp, 0); 14030 tg3_setup_eee(tp); 14031 tg3_phy_reset(tp); 14032 tg3_full_unlock(tp); 14033 } 14034 14035 return 0; 14036 } 14037 14038 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata) 14039 { 14040 struct tg3 *tp = netdev_priv(dev); 14041 14042 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14043 netdev_warn(tp->dev, 14044 "Board does not support EEE!\n"); 14045 return -EOPNOTSUPP; 14046 } 14047 14048 *edata = tp->eee; 14049 return 0; 14050 } 14051 14052 static const struct ethtool_ops tg3_ethtool_ops = { 14053 .get_settings = tg3_get_settings, 14054 .set_settings = tg3_set_settings, 14055 .get_drvinfo = tg3_get_drvinfo, 14056 .get_regs_len = tg3_get_regs_len, 14057 .get_regs = tg3_get_regs, 14058 .get_wol = tg3_get_wol, 14059 .set_wol = tg3_set_wol, 14060 .get_msglevel = tg3_get_msglevel, 14061 .set_msglevel = tg3_set_msglevel, 14062 .nway_reset = tg3_nway_reset, 14063 .get_link = ethtool_op_get_link, 14064 .get_eeprom_len = tg3_get_eeprom_len, 14065 .get_eeprom = tg3_get_eeprom, 14066 .set_eeprom = tg3_set_eeprom, 14067 .get_ringparam = tg3_get_ringparam, 14068 .set_ringparam = tg3_set_ringparam, 14069 .get_pauseparam = tg3_get_pauseparam, 14070 .set_pauseparam = tg3_set_pauseparam, 14071 .self_test = tg3_self_test, 14072 .get_strings = tg3_get_strings, 14073 .set_phys_id = tg3_set_phys_id, 14074 .get_ethtool_stats = tg3_get_ethtool_stats, 14075 .get_coalesce = tg3_get_coalesce, 14076 .set_coalesce = tg3_set_coalesce, 14077 .get_sset_count = tg3_get_sset_count, 14078 .get_rxnfc = tg3_get_rxnfc, 14079 .get_rxfh_indir_size = tg3_get_rxfh_indir_size, 14080 .get_rxfh = tg3_get_rxfh, 14081 .set_rxfh = tg3_set_rxfh, 14082 .get_channels = tg3_get_channels, 14083 .set_channels = tg3_set_channels, 14084 .get_ts_info = tg3_get_ts_info, 14085 .get_eee = tg3_get_eee, 14086 .set_eee = tg3_set_eee, 14087 }; 14088 14089 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, 14090 struct rtnl_link_stats64 *stats) 14091 { 14092 struct tg3 *tp = netdev_priv(dev); 14093 14094 spin_lock_bh(&tp->lock); 14095 if (!tp->hw_stats) { 14096 spin_unlock_bh(&tp->lock); 14097 return &tp->net_stats_prev; 14098 } 14099 14100 tg3_get_nstats(tp, stats); 14101 spin_unlock_bh(&tp->lock); 14102 14103 return stats; 14104 } 14105 14106 static void tg3_set_rx_mode(struct net_device *dev) 14107 { 14108 struct tg3 *tp = netdev_priv(dev); 14109 14110 if (!netif_running(dev)) 14111 return; 14112 14113 tg3_full_lock(tp, 0); 14114 __tg3_set_rx_mode(dev); 14115 tg3_full_unlock(tp); 14116 } 14117 14118 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, 14119 int new_mtu) 14120 { 14121 dev->mtu = new_mtu; 14122 14123 if (new_mtu > ETH_DATA_LEN) { 14124 if (tg3_flag(tp, 5780_CLASS)) { 14125 netdev_update_features(dev); 14126 tg3_flag_clear(tp, TSO_CAPABLE); 14127 } else { 14128 tg3_flag_set(tp, JUMBO_RING_ENABLE); 14129 } 14130 } else { 14131 if (tg3_flag(tp, 5780_CLASS)) { 14132 tg3_flag_set(tp, TSO_CAPABLE); 14133 netdev_update_features(dev); 14134 } 14135 tg3_flag_clear(tp, JUMBO_RING_ENABLE); 14136 } 14137 } 14138 14139 static int tg3_change_mtu(struct net_device *dev, int new_mtu) 14140 { 14141 struct tg3 *tp = netdev_priv(dev); 14142 int err; 14143 bool reset_phy = false; 14144 14145 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) 14146 return -EINVAL; 14147 14148 if (!netif_running(dev)) { 14149 /* We'll just catch it later when the 14150 * device is up'd. 14151 */ 14152 tg3_set_mtu(dev, tp, new_mtu); 14153 return 0; 14154 } 14155 14156 tg3_phy_stop(tp); 14157 14158 tg3_netif_stop(tp); 14159 14160 tg3_set_mtu(dev, tp, new_mtu); 14161 14162 tg3_full_lock(tp, 1); 14163 14164 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 14165 14166 /* Reset PHY, otherwise the read DMA engine will be in a mode that 14167 * breaks all requests to 256 bytes. 14168 */ 14169 if (tg3_asic_rev(tp) == ASIC_REV_57766) 14170 reset_phy = true; 14171 14172 err = tg3_restart_hw(tp, reset_phy); 14173 14174 if (!err) 14175 tg3_netif_start(tp); 14176 14177 tg3_full_unlock(tp); 14178 14179 if (!err) 14180 tg3_phy_start(tp); 14181 14182 return err; 14183 } 14184 14185 static const struct net_device_ops tg3_netdev_ops = { 14186 .ndo_open = tg3_open, 14187 .ndo_stop = tg3_close, 14188 .ndo_start_xmit = tg3_start_xmit, 14189 .ndo_get_stats64 = tg3_get_stats64, 14190 .ndo_validate_addr = eth_validate_addr, 14191 .ndo_set_rx_mode = tg3_set_rx_mode, 14192 .ndo_set_mac_address = tg3_set_mac_addr, 14193 .ndo_do_ioctl = tg3_ioctl, 14194 .ndo_tx_timeout = tg3_tx_timeout, 14195 .ndo_change_mtu = tg3_change_mtu, 14196 .ndo_fix_features = tg3_fix_features, 14197 .ndo_set_features = tg3_set_features, 14198 #ifdef CONFIG_NET_POLL_CONTROLLER 14199 .ndo_poll_controller = tg3_poll_controller, 14200 #endif 14201 }; 14202 14203 static void tg3_get_eeprom_size(struct tg3 *tp) 14204 { 14205 u32 cursize, val, magic; 14206 14207 tp->nvram_size = EEPROM_CHIP_SIZE; 14208 14209 if (tg3_nvram_read(tp, 0, &magic) != 0) 14210 return; 14211 14212 if ((magic != TG3_EEPROM_MAGIC) && 14213 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && 14214 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) 14215 return; 14216 14217 /* 14218 * Size the chip by reading offsets at increasing powers of two. 14219 * When we encounter our validation signature, we know the addressing 14220 * has wrapped around, and thus have our chip size. 14221 */ 14222 cursize = 0x10; 14223 14224 while (cursize < tp->nvram_size) { 14225 if (tg3_nvram_read(tp, cursize, &val) != 0) 14226 return; 14227 14228 if (val == magic) 14229 break; 14230 14231 cursize <<= 1; 14232 } 14233 14234 tp->nvram_size = cursize; 14235 } 14236 14237 static void tg3_get_nvram_size(struct tg3 *tp) 14238 { 14239 u32 val; 14240 14241 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) 14242 return; 14243 14244 /* Selfboot format */ 14245 if (val != TG3_EEPROM_MAGIC) { 14246 tg3_get_eeprom_size(tp); 14247 return; 14248 } 14249 14250 if (tg3_nvram_read(tp, 0xf0, &val) == 0) { 14251 if (val != 0) { 14252 /* This is confusing. We want to operate on the 14253 * 16-bit value at offset 0xf2. The tg3_nvram_read() 14254 * call will read from NVRAM and byteswap the data 14255 * according to the byteswapping settings for all 14256 * other register accesses. This ensures the data we 14257 * want will always reside in the lower 16-bits. 14258 * However, the data in NVRAM is in LE format, which 14259 * means the data from the NVRAM read will always be 14260 * opposite the endianness of the CPU. The 16-bit 14261 * byteswap then brings the data to CPU endianness. 14262 */ 14263 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; 14264 return; 14265 } 14266 } 14267 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14268 } 14269 14270 static void tg3_get_nvram_info(struct tg3 *tp) 14271 { 14272 u32 nvcfg1; 14273 14274 nvcfg1 = tr32(NVRAM_CFG1); 14275 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { 14276 tg3_flag_set(tp, FLASH); 14277 } else { 14278 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14279 tw32(NVRAM_CFG1, nvcfg1); 14280 } 14281 14282 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 14283 tg3_flag(tp, 5780_CLASS)) { 14284 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { 14285 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: 14286 tp->nvram_jedecnum = JEDEC_ATMEL; 14287 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14288 tg3_flag_set(tp, NVRAM_BUFFERED); 14289 break; 14290 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: 14291 tp->nvram_jedecnum = JEDEC_ATMEL; 14292 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; 14293 break; 14294 case FLASH_VENDOR_ATMEL_EEPROM: 14295 tp->nvram_jedecnum = JEDEC_ATMEL; 14296 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14297 tg3_flag_set(tp, NVRAM_BUFFERED); 14298 break; 14299 case FLASH_VENDOR_ST: 14300 tp->nvram_jedecnum = JEDEC_ST; 14301 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; 14302 tg3_flag_set(tp, NVRAM_BUFFERED); 14303 break; 14304 case FLASH_VENDOR_SAIFUN: 14305 tp->nvram_jedecnum = JEDEC_SAIFUN; 14306 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; 14307 break; 14308 case FLASH_VENDOR_SST_SMALL: 14309 case FLASH_VENDOR_SST_LARGE: 14310 tp->nvram_jedecnum = JEDEC_SST; 14311 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; 14312 break; 14313 } 14314 } else { 14315 tp->nvram_jedecnum = JEDEC_ATMEL; 14316 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14317 tg3_flag_set(tp, NVRAM_BUFFERED); 14318 } 14319 } 14320 14321 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) 14322 { 14323 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { 14324 case FLASH_5752PAGE_SIZE_256: 14325 tp->nvram_pagesize = 256; 14326 break; 14327 case FLASH_5752PAGE_SIZE_512: 14328 tp->nvram_pagesize = 512; 14329 break; 14330 case FLASH_5752PAGE_SIZE_1K: 14331 tp->nvram_pagesize = 1024; 14332 break; 14333 case FLASH_5752PAGE_SIZE_2K: 14334 tp->nvram_pagesize = 2048; 14335 break; 14336 case FLASH_5752PAGE_SIZE_4K: 14337 tp->nvram_pagesize = 4096; 14338 break; 14339 case FLASH_5752PAGE_SIZE_264: 14340 tp->nvram_pagesize = 264; 14341 break; 14342 case FLASH_5752PAGE_SIZE_528: 14343 tp->nvram_pagesize = 528; 14344 break; 14345 } 14346 } 14347 14348 static void tg3_get_5752_nvram_info(struct tg3 *tp) 14349 { 14350 u32 nvcfg1; 14351 14352 nvcfg1 = tr32(NVRAM_CFG1); 14353 14354 /* NVRAM protection for TPM */ 14355 if (nvcfg1 & (1 << 27)) 14356 tg3_flag_set(tp, PROTECTED_NVRAM); 14357 14358 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14359 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 14360 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: 14361 tp->nvram_jedecnum = JEDEC_ATMEL; 14362 tg3_flag_set(tp, NVRAM_BUFFERED); 14363 break; 14364 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14365 tp->nvram_jedecnum = JEDEC_ATMEL; 14366 tg3_flag_set(tp, NVRAM_BUFFERED); 14367 tg3_flag_set(tp, FLASH); 14368 break; 14369 case FLASH_5752VENDOR_ST_M45PE10: 14370 case FLASH_5752VENDOR_ST_M45PE20: 14371 case FLASH_5752VENDOR_ST_M45PE40: 14372 tp->nvram_jedecnum = JEDEC_ST; 14373 tg3_flag_set(tp, NVRAM_BUFFERED); 14374 tg3_flag_set(tp, FLASH); 14375 break; 14376 } 14377 14378 if (tg3_flag(tp, FLASH)) { 14379 tg3_nvram_get_pagesize(tp, nvcfg1); 14380 } else { 14381 /* For eeprom, set pagesize to maximum eeprom size */ 14382 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14383 14384 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14385 tw32(NVRAM_CFG1, nvcfg1); 14386 } 14387 } 14388 14389 static void tg3_get_5755_nvram_info(struct tg3 *tp) 14390 { 14391 u32 nvcfg1, protect = 0; 14392 14393 nvcfg1 = tr32(NVRAM_CFG1); 14394 14395 /* NVRAM protection for TPM */ 14396 if (nvcfg1 & (1 << 27)) { 14397 tg3_flag_set(tp, PROTECTED_NVRAM); 14398 protect = 1; 14399 } 14400 14401 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14402 switch (nvcfg1) { 14403 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14404 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14405 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14406 case FLASH_5755VENDOR_ATMEL_FLASH_5: 14407 tp->nvram_jedecnum = JEDEC_ATMEL; 14408 tg3_flag_set(tp, NVRAM_BUFFERED); 14409 tg3_flag_set(tp, FLASH); 14410 tp->nvram_pagesize = 264; 14411 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || 14412 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) 14413 tp->nvram_size = (protect ? 0x3e200 : 14414 TG3_NVRAM_SIZE_512KB); 14415 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) 14416 tp->nvram_size = (protect ? 0x1f200 : 14417 TG3_NVRAM_SIZE_256KB); 14418 else 14419 tp->nvram_size = (protect ? 0x1f200 : 14420 TG3_NVRAM_SIZE_128KB); 14421 break; 14422 case FLASH_5752VENDOR_ST_M45PE10: 14423 case FLASH_5752VENDOR_ST_M45PE20: 14424 case FLASH_5752VENDOR_ST_M45PE40: 14425 tp->nvram_jedecnum = JEDEC_ST; 14426 tg3_flag_set(tp, NVRAM_BUFFERED); 14427 tg3_flag_set(tp, FLASH); 14428 tp->nvram_pagesize = 256; 14429 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) 14430 tp->nvram_size = (protect ? 14431 TG3_NVRAM_SIZE_64KB : 14432 TG3_NVRAM_SIZE_128KB); 14433 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) 14434 tp->nvram_size = (protect ? 14435 TG3_NVRAM_SIZE_64KB : 14436 TG3_NVRAM_SIZE_256KB); 14437 else 14438 tp->nvram_size = (protect ? 14439 TG3_NVRAM_SIZE_128KB : 14440 TG3_NVRAM_SIZE_512KB); 14441 break; 14442 } 14443 } 14444 14445 static void tg3_get_5787_nvram_info(struct tg3 *tp) 14446 { 14447 u32 nvcfg1; 14448 14449 nvcfg1 = tr32(NVRAM_CFG1); 14450 14451 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14452 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: 14453 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14454 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: 14455 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14456 tp->nvram_jedecnum = JEDEC_ATMEL; 14457 tg3_flag_set(tp, NVRAM_BUFFERED); 14458 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14459 14460 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14461 tw32(NVRAM_CFG1, nvcfg1); 14462 break; 14463 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14464 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14465 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14466 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14467 tp->nvram_jedecnum = JEDEC_ATMEL; 14468 tg3_flag_set(tp, NVRAM_BUFFERED); 14469 tg3_flag_set(tp, FLASH); 14470 tp->nvram_pagesize = 264; 14471 break; 14472 case FLASH_5752VENDOR_ST_M45PE10: 14473 case FLASH_5752VENDOR_ST_M45PE20: 14474 case FLASH_5752VENDOR_ST_M45PE40: 14475 tp->nvram_jedecnum = JEDEC_ST; 14476 tg3_flag_set(tp, NVRAM_BUFFERED); 14477 tg3_flag_set(tp, FLASH); 14478 tp->nvram_pagesize = 256; 14479 break; 14480 } 14481 } 14482 14483 static void tg3_get_5761_nvram_info(struct tg3 *tp) 14484 { 14485 u32 nvcfg1, protect = 0; 14486 14487 nvcfg1 = tr32(NVRAM_CFG1); 14488 14489 /* NVRAM protection for TPM */ 14490 if (nvcfg1 & (1 << 27)) { 14491 tg3_flag_set(tp, PROTECTED_NVRAM); 14492 protect = 1; 14493 } 14494 14495 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14496 switch (nvcfg1) { 14497 case FLASH_5761VENDOR_ATMEL_ADB021D: 14498 case FLASH_5761VENDOR_ATMEL_ADB041D: 14499 case FLASH_5761VENDOR_ATMEL_ADB081D: 14500 case FLASH_5761VENDOR_ATMEL_ADB161D: 14501 case FLASH_5761VENDOR_ATMEL_MDB021D: 14502 case FLASH_5761VENDOR_ATMEL_MDB041D: 14503 case FLASH_5761VENDOR_ATMEL_MDB081D: 14504 case FLASH_5761VENDOR_ATMEL_MDB161D: 14505 tp->nvram_jedecnum = JEDEC_ATMEL; 14506 tg3_flag_set(tp, NVRAM_BUFFERED); 14507 tg3_flag_set(tp, FLASH); 14508 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14509 tp->nvram_pagesize = 256; 14510 break; 14511 case FLASH_5761VENDOR_ST_A_M45PE20: 14512 case FLASH_5761VENDOR_ST_A_M45PE40: 14513 case FLASH_5761VENDOR_ST_A_M45PE80: 14514 case FLASH_5761VENDOR_ST_A_M45PE16: 14515 case FLASH_5761VENDOR_ST_M_M45PE20: 14516 case FLASH_5761VENDOR_ST_M_M45PE40: 14517 case FLASH_5761VENDOR_ST_M_M45PE80: 14518 case FLASH_5761VENDOR_ST_M_M45PE16: 14519 tp->nvram_jedecnum = JEDEC_ST; 14520 tg3_flag_set(tp, NVRAM_BUFFERED); 14521 tg3_flag_set(tp, FLASH); 14522 tp->nvram_pagesize = 256; 14523 break; 14524 } 14525 14526 if (protect) { 14527 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); 14528 } else { 14529 switch (nvcfg1) { 14530 case FLASH_5761VENDOR_ATMEL_ADB161D: 14531 case FLASH_5761VENDOR_ATMEL_MDB161D: 14532 case FLASH_5761VENDOR_ST_A_M45PE16: 14533 case FLASH_5761VENDOR_ST_M_M45PE16: 14534 tp->nvram_size = TG3_NVRAM_SIZE_2MB; 14535 break; 14536 case FLASH_5761VENDOR_ATMEL_ADB081D: 14537 case FLASH_5761VENDOR_ATMEL_MDB081D: 14538 case FLASH_5761VENDOR_ST_A_M45PE80: 14539 case FLASH_5761VENDOR_ST_M_M45PE80: 14540 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14541 break; 14542 case FLASH_5761VENDOR_ATMEL_ADB041D: 14543 case FLASH_5761VENDOR_ATMEL_MDB041D: 14544 case FLASH_5761VENDOR_ST_A_M45PE40: 14545 case FLASH_5761VENDOR_ST_M_M45PE40: 14546 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14547 break; 14548 case FLASH_5761VENDOR_ATMEL_ADB021D: 14549 case FLASH_5761VENDOR_ATMEL_MDB021D: 14550 case FLASH_5761VENDOR_ST_A_M45PE20: 14551 case FLASH_5761VENDOR_ST_M_M45PE20: 14552 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14553 break; 14554 } 14555 } 14556 } 14557 14558 static void tg3_get_5906_nvram_info(struct tg3 *tp) 14559 { 14560 tp->nvram_jedecnum = JEDEC_ATMEL; 14561 tg3_flag_set(tp, NVRAM_BUFFERED); 14562 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14563 } 14564 14565 static void tg3_get_57780_nvram_info(struct tg3 *tp) 14566 { 14567 u32 nvcfg1; 14568 14569 nvcfg1 = tr32(NVRAM_CFG1); 14570 14571 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14572 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14573 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14574 tp->nvram_jedecnum = JEDEC_ATMEL; 14575 tg3_flag_set(tp, NVRAM_BUFFERED); 14576 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14577 14578 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14579 tw32(NVRAM_CFG1, nvcfg1); 14580 return; 14581 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14582 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14583 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14584 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14585 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14586 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14587 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14588 tp->nvram_jedecnum = JEDEC_ATMEL; 14589 tg3_flag_set(tp, NVRAM_BUFFERED); 14590 tg3_flag_set(tp, FLASH); 14591 14592 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14593 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14594 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14595 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14596 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14597 break; 14598 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14599 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14600 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14601 break; 14602 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14603 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14604 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14605 break; 14606 } 14607 break; 14608 case FLASH_5752VENDOR_ST_M45PE10: 14609 case FLASH_5752VENDOR_ST_M45PE20: 14610 case FLASH_5752VENDOR_ST_M45PE40: 14611 tp->nvram_jedecnum = JEDEC_ST; 14612 tg3_flag_set(tp, NVRAM_BUFFERED); 14613 tg3_flag_set(tp, FLASH); 14614 14615 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14616 case FLASH_5752VENDOR_ST_M45PE10: 14617 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14618 break; 14619 case FLASH_5752VENDOR_ST_M45PE20: 14620 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14621 break; 14622 case FLASH_5752VENDOR_ST_M45PE40: 14623 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14624 break; 14625 } 14626 break; 14627 default: 14628 tg3_flag_set(tp, NO_NVRAM); 14629 return; 14630 } 14631 14632 tg3_nvram_get_pagesize(tp, nvcfg1); 14633 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14634 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14635 } 14636 14637 14638 static void tg3_get_5717_nvram_info(struct tg3 *tp) 14639 { 14640 u32 nvcfg1; 14641 14642 nvcfg1 = tr32(NVRAM_CFG1); 14643 14644 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14645 case FLASH_5717VENDOR_ATMEL_EEPROM: 14646 case FLASH_5717VENDOR_MICRO_EEPROM: 14647 tp->nvram_jedecnum = JEDEC_ATMEL; 14648 tg3_flag_set(tp, NVRAM_BUFFERED); 14649 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14650 14651 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14652 tw32(NVRAM_CFG1, nvcfg1); 14653 return; 14654 case FLASH_5717VENDOR_ATMEL_MDB011D: 14655 case FLASH_5717VENDOR_ATMEL_ADB011B: 14656 case FLASH_5717VENDOR_ATMEL_ADB011D: 14657 case FLASH_5717VENDOR_ATMEL_MDB021D: 14658 case FLASH_5717VENDOR_ATMEL_ADB021B: 14659 case FLASH_5717VENDOR_ATMEL_ADB021D: 14660 case FLASH_5717VENDOR_ATMEL_45USPT: 14661 tp->nvram_jedecnum = JEDEC_ATMEL; 14662 tg3_flag_set(tp, NVRAM_BUFFERED); 14663 tg3_flag_set(tp, FLASH); 14664 14665 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14666 case FLASH_5717VENDOR_ATMEL_MDB021D: 14667 /* Detect size with tg3_nvram_get_size() */ 14668 break; 14669 case FLASH_5717VENDOR_ATMEL_ADB021B: 14670 case FLASH_5717VENDOR_ATMEL_ADB021D: 14671 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14672 break; 14673 default: 14674 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14675 break; 14676 } 14677 break; 14678 case FLASH_5717VENDOR_ST_M_M25PE10: 14679 case FLASH_5717VENDOR_ST_A_M25PE10: 14680 case FLASH_5717VENDOR_ST_M_M45PE10: 14681 case FLASH_5717VENDOR_ST_A_M45PE10: 14682 case FLASH_5717VENDOR_ST_M_M25PE20: 14683 case FLASH_5717VENDOR_ST_A_M25PE20: 14684 case FLASH_5717VENDOR_ST_M_M45PE20: 14685 case FLASH_5717VENDOR_ST_A_M45PE20: 14686 case FLASH_5717VENDOR_ST_25USPT: 14687 case FLASH_5717VENDOR_ST_45USPT: 14688 tp->nvram_jedecnum = JEDEC_ST; 14689 tg3_flag_set(tp, NVRAM_BUFFERED); 14690 tg3_flag_set(tp, FLASH); 14691 14692 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14693 case FLASH_5717VENDOR_ST_M_M25PE20: 14694 case FLASH_5717VENDOR_ST_M_M45PE20: 14695 /* Detect size with tg3_nvram_get_size() */ 14696 break; 14697 case FLASH_5717VENDOR_ST_A_M25PE20: 14698 case FLASH_5717VENDOR_ST_A_M45PE20: 14699 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14700 break; 14701 default: 14702 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14703 break; 14704 } 14705 break; 14706 default: 14707 tg3_flag_set(tp, NO_NVRAM); 14708 return; 14709 } 14710 14711 tg3_nvram_get_pagesize(tp, nvcfg1); 14712 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14713 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14714 } 14715 14716 static void tg3_get_5720_nvram_info(struct tg3 *tp) 14717 { 14718 u32 nvcfg1, nvmpinstrp; 14719 14720 nvcfg1 = tr32(NVRAM_CFG1); 14721 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; 14722 14723 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14724 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) { 14725 tg3_flag_set(tp, NO_NVRAM); 14726 return; 14727 } 14728 14729 switch (nvmpinstrp) { 14730 case FLASH_5762_EEPROM_HD: 14731 nvmpinstrp = FLASH_5720_EEPROM_HD; 14732 break; 14733 case FLASH_5762_EEPROM_LD: 14734 nvmpinstrp = FLASH_5720_EEPROM_LD; 14735 break; 14736 case FLASH_5720VENDOR_M_ST_M45PE20: 14737 /* This pinstrap supports multiple sizes, so force it 14738 * to read the actual size from location 0xf0. 14739 */ 14740 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT; 14741 break; 14742 } 14743 } 14744 14745 switch (nvmpinstrp) { 14746 case FLASH_5720_EEPROM_HD: 14747 case FLASH_5720_EEPROM_LD: 14748 tp->nvram_jedecnum = JEDEC_ATMEL; 14749 tg3_flag_set(tp, NVRAM_BUFFERED); 14750 14751 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14752 tw32(NVRAM_CFG1, nvcfg1); 14753 if (nvmpinstrp == FLASH_5720_EEPROM_HD) 14754 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14755 else 14756 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; 14757 return; 14758 case FLASH_5720VENDOR_M_ATMEL_DB011D: 14759 case FLASH_5720VENDOR_A_ATMEL_DB011B: 14760 case FLASH_5720VENDOR_A_ATMEL_DB011D: 14761 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14762 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14763 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14764 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14765 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14766 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14767 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14768 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14769 case FLASH_5720VENDOR_ATMEL_45USPT: 14770 tp->nvram_jedecnum = JEDEC_ATMEL; 14771 tg3_flag_set(tp, NVRAM_BUFFERED); 14772 tg3_flag_set(tp, FLASH); 14773 14774 switch (nvmpinstrp) { 14775 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14776 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14777 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14778 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14779 break; 14780 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14781 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14782 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14783 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14784 break; 14785 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14786 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14787 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14788 break; 14789 default: 14790 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14791 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14792 break; 14793 } 14794 break; 14795 case FLASH_5720VENDOR_M_ST_M25PE10: 14796 case FLASH_5720VENDOR_M_ST_M45PE10: 14797 case FLASH_5720VENDOR_A_ST_M25PE10: 14798 case FLASH_5720VENDOR_A_ST_M45PE10: 14799 case FLASH_5720VENDOR_M_ST_M25PE20: 14800 case FLASH_5720VENDOR_M_ST_M45PE20: 14801 case FLASH_5720VENDOR_A_ST_M25PE20: 14802 case FLASH_5720VENDOR_A_ST_M45PE20: 14803 case FLASH_5720VENDOR_M_ST_M25PE40: 14804 case FLASH_5720VENDOR_M_ST_M45PE40: 14805 case FLASH_5720VENDOR_A_ST_M25PE40: 14806 case FLASH_5720VENDOR_A_ST_M45PE40: 14807 case FLASH_5720VENDOR_M_ST_M25PE80: 14808 case FLASH_5720VENDOR_M_ST_M45PE80: 14809 case FLASH_5720VENDOR_A_ST_M25PE80: 14810 case FLASH_5720VENDOR_A_ST_M45PE80: 14811 case FLASH_5720VENDOR_ST_25USPT: 14812 case FLASH_5720VENDOR_ST_45USPT: 14813 tp->nvram_jedecnum = JEDEC_ST; 14814 tg3_flag_set(tp, NVRAM_BUFFERED); 14815 tg3_flag_set(tp, FLASH); 14816 14817 switch (nvmpinstrp) { 14818 case FLASH_5720VENDOR_M_ST_M25PE20: 14819 case FLASH_5720VENDOR_M_ST_M45PE20: 14820 case FLASH_5720VENDOR_A_ST_M25PE20: 14821 case FLASH_5720VENDOR_A_ST_M45PE20: 14822 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14823 break; 14824 case FLASH_5720VENDOR_M_ST_M25PE40: 14825 case FLASH_5720VENDOR_M_ST_M45PE40: 14826 case FLASH_5720VENDOR_A_ST_M25PE40: 14827 case FLASH_5720VENDOR_A_ST_M45PE40: 14828 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14829 break; 14830 case FLASH_5720VENDOR_M_ST_M25PE80: 14831 case FLASH_5720VENDOR_M_ST_M45PE80: 14832 case FLASH_5720VENDOR_A_ST_M25PE80: 14833 case FLASH_5720VENDOR_A_ST_M45PE80: 14834 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14835 break; 14836 default: 14837 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14838 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14839 break; 14840 } 14841 break; 14842 default: 14843 tg3_flag_set(tp, NO_NVRAM); 14844 return; 14845 } 14846 14847 tg3_nvram_get_pagesize(tp, nvcfg1); 14848 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14849 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14850 14851 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14852 u32 val; 14853 14854 if (tg3_nvram_read(tp, 0, &val)) 14855 return; 14856 14857 if (val != TG3_EEPROM_MAGIC && 14858 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) 14859 tg3_flag_set(tp, NO_NVRAM); 14860 } 14861 } 14862 14863 /* Chips other than 5700/5701 use the NVRAM for fetching info. */ 14864 static void tg3_nvram_init(struct tg3 *tp) 14865 { 14866 if (tg3_flag(tp, IS_SSB_CORE)) { 14867 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */ 14868 tg3_flag_clear(tp, NVRAM); 14869 tg3_flag_clear(tp, NVRAM_BUFFERED); 14870 tg3_flag_set(tp, NO_NVRAM); 14871 return; 14872 } 14873 14874 tw32_f(GRC_EEPROM_ADDR, 14875 (EEPROM_ADDR_FSM_RESET | 14876 (EEPROM_DEFAULT_CLOCK_PERIOD << 14877 EEPROM_ADDR_CLKPERD_SHIFT))); 14878 14879 msleep(1); 14880 14881 /* Enable seeprom accesses. */ 14882 tw32_f(GRC_LOCAL_CTRL, 14883 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); 14884 udelay(100); 14885 14886 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 14887 tg3_asic_rev(tp) != ASIC_REV_5701) { 14888 tg3_flag_set(tp, NVRAM); 14889 14890 if (tg3_nvram_lock(tp)) { 14891 netdev_warn(tp->dev, 14892 "Cannot get nvram lock, %s failed\n", 14893 __func__); 14894 return; 14895 } 14896 tg3_enable_nvram_access(tp); 14897 14898 tp->nvram_size = 0; 14899 14900 if (tg3_asic_rev(tp) == ASIC_REV_5752) 14901 tg3_get_5752_nvram_info(tp); 14902 else if (tg3_asic_rev(tp) == ASIC_REV_5755) 14903 tg3_get_5755_nvram_info(tp); 14904 else if (tg3_asic_rev(tp) == ASIC_REV_5787 || 14905 tg3_asic_rev(tp) == ASIC_REV_5784 || 14906 tg3_asic_rev(tp) == ASIC_REV_5785) 14907 tg3_get_5787_nvram_info(tp); 14908 else if (tg3_asic_rev(tp) == ASIC_REV_5761) 14909 tg3_get_5761_nvram_info(tp); 14910 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 14911 tg3_get_5906_nvram_info(tp); 14912 else if (tg3_asic_rev(tp) == ASIC_REV_57780 || 14913 tg3_flag(tp, 57765_CLASS)) 14914 tg3_get_57780_nvram_info(tp); 14915 else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 14916 tg3_asic_rev(tp) == ASIC_REV_5719) 14917 tg3_get_5717_nvram_info(tp); 14918 else if (tg3_asic_rev(tp) == ASIC_REV_5720 || 14919 tg3_asic_rev(tp) == ASIC_REV_5762) 14920 tg3_get_5720_nvram_info(tp); 14921 else 14922 tg3_get_nvram_info(tp); 14923 14924 if (tp->nvram_size == 0) 14925 tg3_get_nvram_size(tp); 14926 14927 tg3_disable_nvram_access(tp); 14928 tg3_nvram_unlock(tp); 14929 14930 } else { 14931 tg3_flag_clear(tp, NVRAM); 14932 tg3_flag_clear(tp, NVRAM_BUFFERED); 14933 14934 tg3_get_eeprom_size(tp); 14935 } 14936 } 14937 14938 struct subsys_tbl_ent { 14939 u16 subsys_vendor, subsys_devid; 14940 u32 phy_id; 14941 }; 14942 14943 static struct subsys_tbl_ent subsys_id_to_phy_id[] = { 14944 /* Broadcom boards. */ 14945 { TG3PCI_SUBVENDOR_ID_BROADCOM, 14946 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, 14947 { TG3PCI_SUBVENDOR_ID_BROADCOM, 14948 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, 14949 { TG3PCI_SUBVENDOR_ID_BROADCOM, 14950 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, 14951 { TG3PCI_SUBVENDOR_ID_BROADCOM, 14952 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, 14953 { TG3PCI_SUBVENDOR_ID_BROADCOM, 14954 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, 14955 { TG3PCI_SUBVENDOR_ID_BROADCOM, 14956 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, 14957 { TG3PCI_SUBVENDOR_ID_BROADCOM, 14958 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, 14959 { TG3PCI_SUBVENDOR_ID_BROADCOM, 14960 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, 14961 { TG3PCI_SUBVENDOR_ID_BROADCOM, 14962 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, 14963 { TG3PCI_SUBVENDOR_ID_BROADCOM, 14964 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, 14965 { TG3PCI_SUBVENDOR_ID_BROADCOM, 14966 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, 14967 14968 /* 3com boards. */ 14969 { TG3PCI_SUBVENDOR_ID_3COM, 14970 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, 14971 { TG3PCI_SUBVENDOR_ID_3COM, 14972 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, 14973 { TG3PCI_SUBVENDOR_ID_3COM, 14974 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, 14975 { TG3PCI_SUBVENDOR_ID_3COM, 14976 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, 14977 { TG3PCI_SUBVENDOR_ID_3COM, 14978 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, 14979 14980 /* DELL boards. */ 14981 { TG3PCI_SUBVENDOR_ID_DELL, 14982 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, 14983 { TG3PCI_SUBVENDOR_ID_DELL, 14984 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, 14985 { TG3PCI_SUBVENDOR_ID_DELL, 14986 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, 14987 { TG3PCI_SUBVENDOR_ID_DELL, 14988 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, 14989 14990 /* Compaq boards. */ 14991 { TG3PCI_SUBVENDOR_ID_COMPAQ, 14992 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, 14993 { TG3PCI_SUBVENDOR_ID_COMPAQ, 14994 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, 14995 { TG3PCI_SUBVENDOR_ID_COMPAQ, 14996 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, 14997 { TG3PCI_SUBVENDOR_ID_COMPAQ, 14998 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, 14999 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15000 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, 15001 15002 /* IBM boards. */ 15003 { TG3PCI_SUBVENDOR_ID_IBM, 15004 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } 15005 }; 15006 15007 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp) 15008 { 15009 int i; 15010 15011 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { 15012 if ((subsys_id_to_phy_id[i].subsys_vendor == 15013 tp->pdev->subsystem_vendor) && 15014 (subsys_id_to_phy_id[i].subsys_devid == 15015 tp->pdev->subsystem_device)) 15016 return &subsys_id_to_phy_id[i]; 15017 } 15018 return NULL; 15019 } 15020 15021 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp) 15022 { 15023 u32 val; 15024 15025 tp->phy_id = TG3_PHY_ID_INVALID; 15026 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15027 15028 /* Assume an onboard device and WOL capable by default. */ 15029 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15030 tg3_flag_set(tp, WOL_CAP); 15031 15032 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15033 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { 15034 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15035 tg3_flag_set(tp, IS_NIC); 15036 } 15037 val = tr32(VCPU_CFGSHDW); 15038 if (val & VCPU_CFGSHDW_ASPM_DBNC) 15039 tg3_flag_set(tp, ASPM_WORKAROUND); 15040 if ((val & VCPU_CFGSHDW_WOL_ENABLE) && 15041 (val & VCPU_CFGSHDW_WOL_MAGPKT)) { 15042 tg3_flag_set(tp, WOL_ENABLE); 15043 device_set_wakeup_enable(&tp->pdev->dev, true); 15044 } 15045 goto done; 15046 } 15047 15048 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 15049 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 15050 u32 nic_cfg, led_cfg; 15051 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0; 15052 u32 nic_phy_id, ver, eeprom_phy_id; 15053 int eeprom_phy_serdes = 0; 15054 15055 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 15056 tp->nic_sram_data_cfg = nic_cfg; 15057 15058 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); 15059 ver >>= NIC_SRAM_DATA_VER_SHIFT; 15060 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 15061 tg3_asic_rev(tp) != ASIC_REV_5701 && 15062 tg3_asic_rev(tp) != ASIC_REV_5703 && 15063 (ver > 0) && (ver < 0x100)) 15064 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); 15065 15066 if (tg3_asic_rev(tp) == ASIC_REV_5785) 15067 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); 15068 15069 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 15070 tg3_asic_rev(tp) == ASIC_REV_5719 || 15071 tg3_asic_rev(tp) == ASIC_REV_5720) 15072 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5); 15073 15074 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == 15075 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) 15076 eeprom_phy_serdes = 1; 15077 15078 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); 15079 if (nic_phy_id != 0) { 15080 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; 15081 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; 15082 15083 eeprom_phy_id = (id1 >> 16) << 10; 15084 eeprom_phy_id |= (id2 & 0xfc00) << 16; 15085 eeprom_phy_id |= (id2 & 0x03ff) << 0; 15086 } else 15087 eeprom_phy_id = 0; 15088 15089 tp->phy_id = eeprom_phy_id; 15090 if (eeprom_phy_serdes) { 15091 if (!tg3_flag(tp, 5705_PLUS)) 15092 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15093 else 15094 tp->phy_flags |= TG3_PHYFLG_MII_SERDES; 15095 } 15096 15097 if (tg3_flag(tp, 5750_PLUS)) 15098 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | 15099 SHASTA_EXT_LED_MODE_MASK); 15100 else 15101 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; 15102 15103 switch (led_cfg) { 15104 default: 15105 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: 15106 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15107 break; 15108 15109 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: 15110 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15111 break; 15112 15113 case NIC_SRAM_DATA_CFG_LED_MODE_MAC: 15114 tp->led_ctrl = LED_CTRL_MODE_MAC; 15115 15116 /* Default to PHY_1_MODE if 0 (MAC_MODE) is 15117 * read on some older 5700/5701 bootcode. 15118 */ 15119 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 15120 tg3_asic_rev(tp) == ASIC_REV_5701) 15121 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15122 15123 break; 15124 15125 case SHASTA_EXT_LED_SHARED: 15126 tp->led_ctrl = LED_CTRL_MODE_SHARED; 15127 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 15128 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1) 15129 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15130 LED_CTRL_MODE_PHY_2); 15131 15132 if (tg3_flag(tp, 5717_PLUS) || 15133 tg3_asic_rev(tp) == ASIC_REV_5762) 15134 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE | 15135 LED_CTRL_BLINK_RATE_MASK; 15136 15137 break; 15138 15139 case SHASTA_EXT_LED_MAC: 15140 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; 15141 break; 15142 15143 case SHASTA_EXT_LED_COMBO: 15144 tp->led_ctrl = LED_CTRL_MODE_COMBO; 15145 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) 15146 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15147 LED_CTRL_MODE_PHY_2); 15148 break; 15149 15150 } 15151 15152 if ((tg3_asic_rev(tp) == ASIC_REV_5700 || 15153 tg3_asic_rev(tp) == ASIC_REV_5701) && 15154 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) 15155 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15156 15157 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) 15158 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15159 15160 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { 15161 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15162 if ((tp->pdev->subsystem_vendor == 15163 PCI_VENDOR_ID_ARIMA) && 15164 (tp->pdev->subsystem_device == 0x205a || 15165 tp->pdev->subsystem_device == 0x2063)) 15166 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15167 } else { 15168 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15169 tg3_flag_set(tp, IS_NIC); 15170 } 15171 15172 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 15173 tg3_flag_set(tp, ENABLE_ASF); 15174 if (tg3_flag(tp, 5750_PLUS)) 15175 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 15176 } 15177 15178 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && 15179 tg3_flag(tp, 5750_PLUS)) 15180 tg3_flag_set(tp, ENABLE_APE); 15181 15182 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && 15183 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) 15184 tg3_flag_clear(tp, WOL_CAP); 15185 15186 if (tg3_flag(tp, WOL_CAP) && 15187 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { 15188 tg3_flag_set(tp, WOL_ENABLE); 15189 device_set_wakeup_enable(&tp->pdev->dev, true); 15190 } 15191 15192 if (cfg2 & (1 << 17)) 15193 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; 15194 15195 /* serdes signal pre-emphasis in register 0x590 set by */ 15196 /* bootcode if bit 18 is set */ 15197 if (cfg2 & (1 << 18)) 15198 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; 15199 15200 if ((tg3_flag(tp, 57765_PLUS) || 15201 (tg3_asic_rev(tp) == ASIC_REV_5784 && 15202 tg3_chip_rev(tp) != CHIPREV_5784_AX)) && 15203 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 15204 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; 15205 15206 if (tg3_flag(tp, PCI_EXPRESS)) { 15207 u32 cfg3; 15208 15209 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); 15210 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 15211 !tg3_flag(tp, 57765_PLUS) && 15212 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)) 15213 tg3_flag_set(tp, ASPM_WORKAROUND); 15214 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID) 15215 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 15216 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK) 15217 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 15218 } 15219 15220 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) 15221 tg3_flag_set(tp, RGMII_INBAND_DISABLE); 15222 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) 15223 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); 15224 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) 15225 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); 15226 15227 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV) 15228 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV; 15229 } 15230 done: 15231 if (tg3_flag(tp, WOL_CAP)) 15232 device_set_wakeup_enable(&tp->pdev->dev, 15233 tg3_flag(tp, WOL_ENABLE)); 15234 else 15235 device_set_wakeup_capable(&tp->pdev->dev, false); 15236 } 15237 15238 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val) 15239 { 15240 int i, err; 15241 u32 val2, off = offset * 8; 15242 15243 err = tg3_nvram_lock(tp); 15244 if (err) 15245 return err; 15246 15247 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE); 15248 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN | 15249 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START); 15250 tg3_ape_read32(tp, TG3_APE_OTP_CTRL); 15251 udelay(10); 15252 15253 for (i = 0; i < 100; i++) { 15254 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS); 15255 if (val2 & APE_OTP_STATUS_CMD_DONE) { 15256 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA); 15257 break; 15258 } 15259 udelay(10); 15260 } 15261 15262 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0); 15263 15264 tg3_nvram_unlock(tp); 15265 if (val2 & APE_OTP_STATUS_CMD_DONE) 15266 return 0; 15267 15268 return -EBUSY; 15269 } 15270 15271 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd) 15272 { 15273 int i; 15274 u32 val; 15275 15276 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); 15277 tw32(OTP_CTRL, cmd); 15278 15279 /* Wait for up to 1 ms for command to execute. */ 15280 for (i = 0; i < 100; i++) { 15281 val = tr32(OTP_STATUS); 15282 if (val & OTP_STATUS_CMD_DONE) 15283 break; 15284 udelay(10); 15285 } 15286 15287 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; 15288 } 15289 15290 /* Read the gphy configuration from the OTP region of the chip. The gphy 15291 * configuration is a 32-bit value that straddles the alignment boundary. 15292 * We do two 32-bit reads and then shift and merge the results. 15293 */ 15294 static u32 tg3_read_otp_phycfg(struct tg3 *tp) 15295 { 15296 u32 bhalf_otp, thalf_otp; 15297 15298 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); 15299 15300 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) 15301 return 0; 15302 15303 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); 15304 15305 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15306 return 0; 15307 15308 thalf_otp = tr32(OTP_READ_DATA); 15309 15310 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); 15311 15312 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15313 return 0; 15314 15315 bhalf_otp = tr32(OTP_READ_DATA); 15316 15317 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); 15318 } 15319 15320 static void tg3_phy_init_link_config(struct tg3 *tp) 15321 { 15322 u32 adv = ADVERTISED_Autoneg; 15323 15324 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 15325 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV)) 15326 adv |= ADVERTISED_1000baseT_Half; 15327 adv |= ADVERTISED_1000baseT_Full; 15328 } 15329 15330 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 15331 adv |= ADVERTISED_100baseT_Half | 15332 ADVERTISED_100baseT_Full | 15333 ADVERTISED_10baseT_Half | 15334 ADVERTISED_10baseT_Full | 15335 ADVERTISED_TP; 15336 else 15337 adv |= ADVERTISED_FIBRE; 15338 15339 tp->link_config.advertising = adv; 15340 tp->link_config.speed = SPEED_UNKNOWN; 15341 tp->link_config.duplex = DUPLEX_UNKNOWN; 15342 tp->link_config.autoneg = AUTONEG_ENABLE; 15343 tp->link_config.active_speed = SPEED_UNKNOWN; 15344 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 15345 15346 tp->old_link = -1; 15347 } 15348 15349 static int tg3_phy_probe(struct tg3 *tp) 15350 { 15351 u32 hw_phy_id_1, hw_phy_id_2; 15352 u32 hw_phy_id, hw_phy_id_masked; 15353 int err; 15354 15355 /* flow control autonegotiation is default behavior */ 15356 tg3_flag_set(tp, PAUSE_AUTONEG); 15357 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; 15358 15359 if (tg3_flag(tp, ENABLE_APE)) { 15360 switch (tp->pci_fn) { 15361 case 0: 15362 tp->phy_ape_lock = TG3_APE_LOCK_PHY0; 15363 break; 15364 case 1: 15365 tp->phy_ape_lock = TG3_APE_LOCK_PHY1; 15366 break; 15367 case 2: 15368 tp->phy_ape_lock = TG3_APE_LOCK_PHY2; 15369 break; 15370 case 3: 15371 tp->phy_ape_lock = TG3_APE_LOCK_PHY3; 15372 break; 15373 } 15374 } 15375 15376 if (!tg3_flag(tp, ENABLE_ASF) && 15377 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15378 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 15379 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 15380 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 15381 15382 if (tg3_flag(tp, USE_PHYLIB)) 15383 return tg3_phy_init(tp); 15384 15385 /* Reading the PHY ID register can conflict with ASF 15386 * firmware access to the PHY hardware. 15387 */ 15388 err = 0; 15389 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { 15390 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; 15391 } else { 15392 /* Now read the physical PHY_ID from the chip and verify 15393 * that it is sane. If it doesn't look good, we fall back 15394 * to either the hard-coded table based PHY_ID and failing 15395 * that the value found in the eeprom area. 15396 */ 15397 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); 15398 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); 15399 15400 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; 15401 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; 15402 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; 15403 15404 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; 15405 } 15406 15407 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { 15408 tp->phy_id = hw_phy_id; 15409 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) 15410 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15411 else 15412 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; 15413 } else { 15414 if (tp->phy_id != TG3_PHY_ID_INVALID) { 15415 /* Do nothing, phy ID already set up in 15416 * tg3_get_eeprom_hw_cfg(). 15417 */ 15418 } else { 15419 struct subsys_tbl_ent *p; 15420 15421 /* No eeprom signature? Try the hardcoded 15422 * subsys device table. 15423 */ 15424 p = tg3_lookup_by_subsys(tp); 15425 if (p) { 15426 tp->phy_id = p->phy_id; 15427 } else if (!tg3_flag(tp, IS_SSB_CORE)) { 15428 /* For now we saw the IDs 0xbc050cd0, 15429 * 0xbc050f80 and 0xbc050c30 on devices 15430 * connected to an BCM4785 and there are 15431 * probably more. Just assume that the phy is 15432 * supported when it is connected to a SSB core 15433 * for now. 15434 */ 15435 return -ENODEV; 15436 } 15437 15438 if (!tp->phy_id || 15439 tp->phy_id == TG3_PHY_ID_BCM8002) 15440 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15441 } 15442 } 15443 15444 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15445 (tg3_asic_rev(tp) == ASIC_REV_5719 || 15446 tg3_asic_rev(tp) == ASIC_REV_5720 || 15447 tg3_asic_rev(tp) == ASIC_REV_57766 || 15448 tg3_asic_rev(tp) == ASIC_REV_5762 || 15449 (tg3_asic_rev(tp) == ASIC_REV_5717 && 15450 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || 15451 (tg3_asic_rev(tp) == ASIC_REV_57765 && 15452 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) { 15453 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 15454 15455 tp->eee.supported = SUPPORTED_100baseT_Full | 15456 SUPPORTED_1000baseT_Full; 15457 tp->eee.advertised = ADVERTISED_100baseT_Full | 15458 ADVERTISED_1000baseT_Full; 15459 tp->eee.eee_enabled = 1; 15460 tp->eee.tx_lpi_enabled = 1; 15461 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; 15462 } 15463 15464 tg3_phy_init_link_config(tp); 15465 15466 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 15467 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15468 !tg3_flag(tp, ENABLE_APE) && 15469 !tg3_flag(tp, ENABLE_ASF)) { 15470 u32 bmsr, dummy; 15471 15472 tg3_readphy(tp, MII_BMSR, &bmsr); 15473 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 15474 (bmsr & BMSR_LSTATUS)) 15475 goto skip_phy_reset; 15476 15477 err = tg3_phy_reset(tp); 15478 if (err) 15479 return err; 15480 15481 tg3_phy_set_wirespeed(tp); 15482 15483 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) { 15484 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, 15485 tp->link_config.flowctrl); 15486 15487 tg3_writephy(tp, MII_BMCR, 15488 BMCR_ANENABLE | BMCR_ANRESTART); 15489 } 15490 } 15491 15492 skip_phy_reset: 15493 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 15494 err = tg3_init_5401phy_dsp(tp); 15495 if (err) 15496 return err; 15497 15498 err = tg3_init_5401phy_dsp(tp); 15499 } 15500 15501 return err; 15502 } 15503 15504 static void tg3_read_vpd(struct tg3 *tp) 15505 { 15506 u8 *vpd_data; 15507 unsigned int block_end, rosize, len; 15508 u32 vpdlen; 15509 int j, i = 0; 15510 15511 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); 15512 if (!vpd_data) 15513 goto out_no_vpd; 15514 15515 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA); 15516 if (i < 0) 15517 goto out_not_found; 15518 15519 rosize = pci_vpd_lrdt_size(&vpd_data[i]); 15520 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize; 15521 i += PCI_VPD_LRDT_TAG_SIZE; 15522 15523 if (block_end > vpdlen) 15524 goto out_not_found; 15525 15526 j = pci_vpd_find_info_keyword(vpd_data, i, rosize, 15527 PCI_VPD_RO_KEYWORD_MFR_ID); 15528 if (j > 0) { 15529 len = pci_vpd_info_field_size(&vpd_data[j]); 15530 15531 j += PCI_VPD_INFO_FLD_HDR_SIZE; 15532 if (j + len > block_end || len != 4 || 15533 memcmp(&vpd_data[j], "1028", 4)) 15534 goto partno; 15535 15536 j = pci_vpd_find_info_keyword(vpd_data, i, rosize, 15537 PCI_VPD_RO_KEYWORD_VENDOR0); 15538 if (j < 0) 15539 goto partno; 15540 15541 len = pci_vpd_info_field_size(&vpd_data[j]); 15542 15543 j += PCI_VPD_INFO_FLD_HDR_SIZE; 15544 if (j + len > block_end) 15545 goto partno; 15546 15547 if (len >= sizeof(tp->fw_ver)) 15548 len = sizeof(tp->fw_ver) - 1; 15549 memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); 15550 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, 15551 &vpd_data[j]); 15552 } 15553 15554 partno: 15555 i = pci_vpd_find_info_keyword(vpd_data, i, rosize, 15556 PCI_VPD_RO_KEYWORD_PARTNO); 15557 if (i < 0) 15558 goto out_not_found; 15559 15560 len = pci_vpd_info_field_size(&vpd_data[i]); 15561 15562 i += PCI_VPD_INFO_FLD_HDR_SIZE; 15563 if (len > TG3_BPN_SIZE || 15564 (len + i) > vpdlen) 15565 goto out_not_found; 15566 15567 memcpy(tp->board_part_number, &vpd_data[i], len); 15568 15569 out_not_found: 15570 kfree(vpd_data); 15571 if (tp->board_part_number[0]) 15572 return; 15573 15574 out_no_vpd: 15575 if (tg3_asic_rev(tp) == ASIC_REV_5717) { 15576 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 15577 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C) 15578 strcpy(tp->board_part_number, "BCM5717"); 15579 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) 15580 strcpy(tp->board_part_number, "BCM5718"); 15581 else 15582 goto nomatch; 15583 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) { 15584 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) 15585 strcpy(tp->board_part_number, "BCM57780"); 15586 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) 15587 strcpy(tp->board_part_number, "BCM57760"); 15588 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) 15589 strcpy(tp->board_part_number, "BCM57790"); 15590 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) 15591 strcpy(tp->board_part_number, "BCM57788"); 15592 else 15593 goto nomatch; 15594 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) { 15595 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) 15596 strcpy(tp->board_part_number, "BCM57761"); 15597 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) 15598 strcpy(tp->board_part_number, "BCM57765"); 15599 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) 15600 strcpy(tp->board_part_number, "BCM57781"); 15601 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) 15602 strcpy(tp->board_part_number, "BCM57785"); 15603 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) 15604 strcpy(tp->board_part_number, "BCM57791"); 15605 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) 15606 strcpy(tp->board_part_number, "BCM57795"); 15607 else 15608 goto nomatch; 15609 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) { 15610 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) 15611 strcpy(tp->board_part_number, "BCM57762"); 15612 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) 15613 strcpy(tp->board_part_number, "BCM57766"); 15614 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782) 15615 strcpy(tp->board_part_number, "BCM57782"); 15616 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 15617 strcpy(tp->board_part_number, "BCM57786"); 15618 else 15619 goto nomatch; 15620 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15621 strcpy(tp->board_part_number, "BCM95906"); 15622 } else { 15623 nomatch: 15624 strcpy(tp->board_part_number, "none"); 15625 } 15626 } 15627 15628 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) 15629 { 15630 u32 val; 15631 15632 if (tg3_nvram_read(tp, offset, &val) || 15633 (val & 0xfc000000) != 0x0c000000 || 15634 tg3_nvram_read(tp, offset + 4, &val) || 15635 val != 0) 15636 return 0; 15637 15638 return 1; 15639 } 15640 15641 static void tg3_read_bc_ver(struct tg3 *tp) 15642 { 15643 u32 val, offset, start, ver_offset; 15644 int i, dst_off; 15645 bool newver = false; 15646 15647 if (tg3_nvram_read(tp, 0xc, &offset) || 15648 tg3_nvram_read(tp, 0x4, &start)) 15649 return; 15650 15651 offset = tg3_nvram_logical_addr(tp, offset); 15652 15653 if (tg3_nvram_read(tp, offset, &val)) 15654 return; 15655 15656 if ((val & 0xfc000000) == 0x0c000000) { 15657 if (tg3_nvram_read(tp, offset + 4, &val)) 15658 return; 15659 15660 if (val == 0) 15661 newver = true; 15662 } 15663 15664 dst_off = strlen(tp->fw_ver); 15665 15666 if (newver) { 15667 if (TG3_VER_SIZE - dst_off < 16 || 15668 tg3_nvram_read(tp, offset + 8, &ver_offset)) 15669 return; 15670 15671 offset = offset + ver_offset - start; 15672 for (i = 0; i < 16; i += 4) { 15673 __be32 v; 15674 if (tg3_nvram_read_be32(tp, offset + i, &v)) 15675 return; 15676 15677 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); 15678 } 15679 } else { 15680 u32 major, minor; 15681 15682 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) 15683 return; 15684 15685 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> 15686 TG3_NVM_BCVER_MAJSFT; 15687 minor = ver_offset & TG3_NVM_BCVER_MINMSK; 15688 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, 15689 "v%d.%02d", major, minor); 15690 } 15691 } 15692 15693 static void tg3_read_hwsb_ver(struct tg3 *tp) 15694 { 15695 u32 val, major, minor; 15696 15697 /* Use native endian representation */ 15698 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) 15699 return; 15700 15701 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> 15702 TG3_NVM_HWSB_CFG1_MAJSFT; 15703 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> 15704 TG3_NVM_HWSB_CFG1_MINSFT; 15705 15706 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); 15707 } 15708 15709 static void tg3_read_sb_ver(struct tg3 *tp, u32 val) 15710 { 15711 u32 offset, major, minor, build; 15712 15713 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); 15714 15715 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) 15716 return; 15717 15718 switch (val & TG3_EEPROM_SB_REVISION_MASK) { 15719 case TG3_EEPROM_SB_REVISION_0: 15720 offset = TG3_EEPROM_SB_F1R0_EDH_OFF; 15721 break; 15722 case TG3_EEPROM_SB_REVISION_2: 15723 offset = TG3_EEPROM_SB_F1R2_EDH_OFF; 15724 break; 15725 case TG3_EEPROM_SB_REVISION_3: 15726 offset = TG3_EEPROM_SB_F1R3_EDH_OFF; 15727 break; 15728 case TG3_EEPROM_SB_REVISION_4: 15729 offset = TG3_EEPROM_SB_F1R4_EDH_OFF; 15730 break; 15731 case TG3_EEPROM_SB_REVISION_5: 15732 offset = TG3_EEPROM_SB_F1R5_EDH_OFF; 15733 break; 15734 case TG3_EEPROM_SB_REVISION_6: 15735 offset = TG3_EEPROM_SB_F1R6_EDH_OFF; 15736 break; 15737 default: 15738 return; 15739 } 15740 15741 if (tg3_nvram_read(tp, offset, &val)) 15742 return; 15743 15744 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> 15745 TG3_EEPROM_SB_EDH_BLD_SHFT; 15746 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> 15747 TG3_EEPROM_SB_EDH_MAJ_SHFT; 15748 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; 15749 15750 if (minor > 99 || build > 26) 15751 return; 15752 15753 offset = strlen(tp->fw_ver); 15754 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, 15755 " v%d.%02d", major, minor); 15756 15757 if (build > 0) { 15758 offset = strlen(tp->fw_ver); 15759 if (offset < TG3_VER_SIZE - 1) 15760 tp->fw_ver[offset] = 'a' + build - 1; 15761 } 15762 } 15763 15764 static void tg3_read_mgmtfw_ver(struct tg3 *tp) 15765 { 15766 u32 val, offset, start; 15767 int i, vlen; 15768 15769 for (offset = TG3_NVM_DIR_START; 15770 offset < TG3_NVM_DIR_END; 15771 offset += TG3_NVM_DIRENT_SIZE) { 15772 if (tg3_nvram_read(tp, offset, &val)) 15773 return; 15774 15775 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) 15776 break; 15777 } 15778 15779 if (offset == TG3_NVM_DIR_END) 15780 return; 15781 15782 if (!tg3_flag(tp, 5705_PLUS)) 15783 start = 0x08000000; 15784 else if (tg3_nvram_read(tp, offset - 4, &start)) 15785 return; 15786 15787 if (tg3_nvram_read(tp, offset + 4, &offset) || 15788 !tg3_fw_img_is_valid(tp, offset) || 15789 tg3_nvram_read(tp, offset + 8, &val)) 15790 return; 15791 15792 offset += val - start; 15793 15794 vlen = strlen(tp->fw_ver); 15795 15796 tp->fw_ver[vlen++] = ','; 15797 tp->fw_ver[vlen++] = ' '; 15798 15799 for (i = 0; i < 4; i++) { 15800 __be32 v; 15801 if (tg3_nvram_read_be32(tp, offset, &v)) 15802 return; 15803 15804 offset += sizeof(v); 15805 15806 if (vlen > TG3_VER_SIZE - sizeof(v)) { 15807 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); 15808 break; 15809 } 15810 15811 memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); 15812 vlen += sizeof(v); 15813 } 15814 } 15815 15816 static void tg3_probe_ncsi(struct tg3 *tp) 15817 { 15818 u32 apedata; 15819 15820 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 15821 if (apedata != APE_SEG_SIG_MAGIC) 15822 return; 15823 15824 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 15825 if (!(apedata & APE_FW_STATUS_READY)) 15826 return; 15827 15828 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) 15829 tg3_flag_set(tp, APE_HAS_NCSI); 15830 } 15831 15832 static void tg3_read_dash_ver(struct tg3 *tp) 15833 { 15834 int vlen; 15835 u32 apedata; 15836 char *fwtype; 15837 15838 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); 15839 15840 if (tg3_flag(tp, APE_HAS_NCSI)) 15841 fwtype = "NCSI"; 15842 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725) 15843 fwtype = "SMASH"; 15844 else 15845 fwtype = "DASH"; 15846 15847 vlen = strlen(tp->fw_ver); 15848 15849 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", 15850 fwtype, 15851 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, 15852 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, 15853 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, 15854 (apedata & APE_FW_VERSION_BLDMSK)); 15855 } 15856 15857 static void tg3_read_otp_ver(struct tg3 *tp) 15858 { 15859 u32 val, val2; 15860 15861 if (tg3_asic_rev(tp) != ASIC_REV_5762) 15862 return; 15863 15864 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) && 15865 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) && 15866 TG3_OTP_MAGIC0_VALID(val)) { 15867 u64 val64 = (u64) val << 32 | val2; 15868 u32 ver = 0; 15869 int i, vlen; 15870 15871 for (i = 0; i < 7; i++) { 15872 if ((val64 & 0xff) == 0) 15873 break; 15874 ver = val64 & 0xff; 15875 val64 >>= 8; 15876 } 15877 vlen = strlen(tp->fw_ver); 15878 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver); 15879 } 15880 } 15881 15882 static void tg3_read_fw_ver(struct tg3 *tp) 15883 { 15884 u32 val; 15885 bool vpd_vers = false; 15886 15887 if (tp->fw_ver[0] != 0) 15888 vpd_vers = true; 15889 15890 if (tg3_flag(tp, NO_NVRAM)) { 15891 strcat(tp->fw_ver, "sb"); 15892 tg3_read_otp_ver(tp); 15893 return; 15894 } 15895 15896 if (tg3_nvram_read(tp, 0, &val)) 15897 return; 15898 15899 if (val == TG3_EEPROM_MAGIC) 15900 tg3_read_bc_ver(tp); 15901 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) 15902 tg3_read_sb_ver(tp, val); 15903 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 15904 tg3_read_hwsb_ver(tp); 15905 15906 if (tg3_flag(tp, ENABLE_ASF)) { 15907 if (tg3_flag(tp, ENABLE_APE)) { 15908 tg3_probe_ncsi(tp); 15909 if (!vpd_vers) 15910 tg3_read_dash_ver(tp); 15911 } else if (!vpd_vers) { 15912 tg3_read_mgmtfw_ver(tp); 15913 } 15914 } 15915 15916 tp->fw_ver[TG3_VER_SIZE - 1] = 0; 15917 } 15918 15919 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) 15920 { 15921 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 15922 return TG3_RX_RET_MAX_SIZE_5717; 15923 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) 15924 return TG3_RX_RET_MAX_SIZE_5700; 15925 else 15926 return TG3_RX_RET_MAX_SIZE_5705; 15927 } 15928 15929 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = { 15930 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 15931 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, 15932 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, 15933 { }, 15934 }; 15935 15936 static struct pci_dev *tg3_find_peer(struct tg3 *tp) 15937 { 15938 struct pci_dev *peer; 15939 unsigned int func, devnr = tp->pdev->devfn & ~7; 15940 15941 for (func = 0; func < 8; func++) { 15942 peer = pci_get_slot(tp->pdev->bus, devnr | func); 15943 if (peer && peer != tp->pdev) 15944 break; 15945 pci_dev_put(peer); 15946 } 15947 /* 5704 can be configured in single-port mode, set peer to 15948 * tp->pdev in that case. 15949 */ 15950 if (!peer) { 15951 peer = tp->pdev; 15952 return peer; 15953 } 15954 15955 /* 15956 * We don't need to keep the refcount elevated; there's no way 15957 * to remove one half of this device without removing the other 15958 */ 15959 pci_dev_put(peer); 15960 15961 return peer; 15962 } 15963 15964 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) 15965 { 15966 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT; 15967 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) { 15968 u32 reg; 15969 15970 /* All devices that use the alternate 15971 * ASIC REV location have a CPMU. 15972 */ 15973 tg3_flag_set(tp, CPMU_PRESENT); 15974 15975 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 15976 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 15977 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 15978 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 15979 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 15980 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 15981 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 15982 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 15983 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 15984 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 15985 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) 15986 reg = TG3PCI_GEN2_PRODID_ASICREV; 15987 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || 15988 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || 15989 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || 15990 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || 15991 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || 15992 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || 15993 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 || 15994 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 || 15995 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 || 15996 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 15997 reg = TG3PCI_GEN15_PRODID_ASICREV; 15998 else 15999 reg = TG3PCI_PRODID_ASICREV; 16000 16001 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id); 16002 } 16003 16004 /* Wrong chip ID in 5752 A0. This code can be removed later 16005 * as A0 is not in production. 16006 */ 16007 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW) 16008 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; 16009 16010 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0) 16011 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0; 16012 16013 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16014 tg3_asic_rev(tp) == ASIC_REV_5719 || 16015 tg3_asic_rev(tp) == ASIC_REV_5720) 16016 tg3_flag_set(tp, 5717_PLUS); 16017 16018 if (tg3_asic_rev(tp) == ASIC_REV_57765 || 16019 tg3_asic_rev(tp) == ASIC_REV_57766) 16020 tg3_flag_set(tp, 57765_CLASS); 16021 16022 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) || 16023 tg3_asic_rev(tp) == ASIC_REV_5762) 16024 tg3_flag_set(tp, 57765_PLUS); 16025 16026 /* Intentionally exclude ASIC_REV_5906 */ 16027 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16028 tg3_asic_rev(tp) == ASIC_REV_5787 || 16029 tg3_asic_rev(tp) == ASIC_REV_5784 || 16030 tg3_asic_rev(tp) == ASIC_REV_5761 || 16031 tg3_asic_rev(tp) == ASIC_REV_5785 || 16032 tg3_asic_rev(tp) == ASIC_REV_57780 || 16033 tg3_flag(tp, 57765_PLUS)) 16034 tg3_flag_set(tp, 5755_PLUS); 16035 16036 if (tg3_asic_rev(tp) == ASIC_REV_5780 || 16037 tg3_asic_rev(tp) == ASIC_REV_5714) 16038 tg3_flag_set(tp, 5780_CLASS); 16039 16040 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16041 tg3_asic_rev(tp) == ASIC_REV_5752 || 16042 tg3_asic_rev(tp) == ASIC_REV_5906 || 16043 tg3_flag(tp, 5755_PLUS) || 16044 tg3_flag(tp, 5780_CLASS)) 16045 tg3_flag_set(tp, 5750_PLUS); 16046 16047 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 16048 tg3_flag(tp, 5750_PLUS)) 16049 tg3_flag_set(tp, 5705_PLUS); 16050 } 16051 16052 static bool tg3_10_100_only_device(struct tg3 *tp, 16053 const struct pci_device_id *ent) 16054 { 16055 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK; 16056 16057 if ((tg3_asic_rev(tp) == ASIC_REV_5703 && 16058 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 16059 (tp->phy_flags & TG3_PHYFLG_IS_FET)) 16060 return true; 16061 16062 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) { 16063 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 16064 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100) 16065 return true; 16066 } else { 16067 return true; 16068 } 16069 } 16070 16071 return false; 16072 } 16073 16074 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) 16075 { 16076 u32 misc_ctrl_reg; 16077 u32 pci_state_reg, grc_misc_cfg; 16078 u32 val; 16079 u16 pci_cmd; 16080 int err; 16081 16082 /* Force memory write invalidate off. If we leave it on, 16083 * then on 5700_BX chips we have to enable a workaround. 16084 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary 16085 * to match the cacheline size. The Broadcom driver have this 16086 * workaround but turns MWI off all the times so never uses 16087 * it. This seems to suggest that the workaround is insufficient. 16088 */ 16089 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16090 pci_cmd &= ~PCI_COMMAND_INVALIDATE; 16091 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16092 16093 /* Important! -- Make sure register accesses are byteswapped 16094 * correctly. Also, for those chips that require it, make 16095 * sure that indirect register accesses are enabled before 16096 * the first operation. 16097 */ 16098 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16099 &misc_ctrl_reg); 16100 tp->misc_host_ctrl |= (misc_ctrl_reg & 16101 MISC_HOST_CTRL_CHIPREV); 16102 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16103 tp->misc_host_ctrl); 16104 16105 tg3_detect_asic_rev(tp, misc_ctrl_reg); 16106 16107 /* If we have 5702/03 A1 or A2 on certain ICH chipsets, 16108 * we need to disable memory and use config. cycles 16109 * only to access all registers. The 5702/03 chips 16110 * can mistakenly decode the special cycles from the 16111 * ICH chipsets as memory write cycles, causing corruption 16112 * of register and memory space. Only certain ICH bridges 16113 * will drive special cycles with non-zero data during the 16114 * address phase which can fall within the 5703's address 16115 * range. This is not an ICH bug as the PCI spec allows 16116 * non-zero address during special cycles. However, only 16117 * these ICH bridges are known to drive non-zero addresses 16118 * during special cycles. 16119 * 16120 * Since special cycles do not cross PCI bridges, we only 16121 * enable this workaround if the 5703 is on the secondary 16122 * bus of these ICH bridges. 16123 */ 16124 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) || 16125 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) { 16126 static struct tg3_dev_id { 16127 u32 vendor; 16128 u32 device; 16129 u32 rev; 16130 } ich_chipsets[] = { 16131 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, 16132 PCI_ANY_ID }, 16133 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, 16134 PCI_ANY_ID }, 16135 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, 16136 0xa }, 16137 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, 16138 PCI_ANY_ID }, 16139 { }, 16140 }; 16141 struct tg3_dev_id *pci_id = &ich_chipsets[0]; 16142 struct pci_dev *bridge = NULL; 16143 16144 while (pci_id->vendor != 0) { 16145 bridge = pci_get_device(pci_id->vendor, pci_id->device, 16146 bridge); 16147 if (!bridge) { 16148 pci_id++; 16149 continue; 16150 } 16151 if (pci_id->rev != PCI_ANY_ID) { 16152 if (bridge->revision > pci_id->rev) 16153 continue; 16154 } 16155 if (bridge->subordinate && 16156 (bridge->subordinate->number == 16157 tp->pdev->bus->number)) { 16158 tg3_flag_set(tp, ICH_WORKAROUND); 16159 pci_dev_put(bridge); 16160 break; 16161 } 16162 } 16163 } 16164 16165 if (tg3_asic_rev(tp) == ASIC_REV_5701) { 16166 static struct tg3_dev_id { 16167 u32 vendor; 16168 u32 device; 16169 } bridge_chipsets[] = { 16170 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, 16171 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, 16172 { }, 16173 }; 16174 struct tg3_dev_id *pci_id = &bridge_chipsets[0]; 16175 struct pci_dev *bridge = NULL; 16176 16177 while (pci_id->vendor != 0) { 16178 bridge = pci_get_device(pci_id->vendor, 16179 pci_id->device, 16180 bridge); 16181 if (!bridge) { 16182 pci_id++; 16183 continue; 16184 } 16185 if (bridge->subordinate && 16186 (bridge->subordinate->number <= 16187 tp->pdev->bus->number) && 16188 (bridge->subordinate->busn_res.end >= 16189 tp->pdev->bus->number)) { 16190 tg3_flag_set(tp, 5701_DMA_BUG); 16191 pci_dev_put(bridge); 16192 break; 16193 } 16194 } 16195 } 16196 16197 /* The EPB bridge inside 5714, 5715, and 5780 cannot support 16198 * DMA addresses > 40-bit. This bridge may have other additional 16199 * 57xx devices behind it in some 4-port NIC designs for example. 16200 * Any tg3 device found behind the bridge will also need the 40-bit 16201 * DMA workaround. 16202 */ 16203 if (tg3_flag(tp, 5780_CLASS)) { 16204 tg3_flag_set(tp, 40BIT_DMA_BUG); 16205 tp->msi_cap = tp->pdev->msi_cap; 16206 } else { 16207 struct pci_dev *bridge = NULL; 16208 16209 do { 16210 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, 16211 PCI_DEVICE_ID_SERVERWORKS_EPB, 16212 bridge); 16213 if (bridge && bridge->subordinate && 16214 (bridge->subordinate->number <= 16215 tp->pdev->bus->number) && 16216 (bridge->subordinate->busn_res.end >= 16217 tp->pdev->bus->number)) { 16218 tg3_flag_set(tp, 40BIT_DMA_BUG); 16219 pci_dev_put(bridge); 16220 break; 16221 } 16222 } while (bridge); 16223 } 16224 16225 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16226 tg3_asic_rev(tp) == ASIC_REV_5714) 16227 tp->pdev_peer = tg3_find_peer(tp); 16228 16229 /* Determine TSO capabilities */ 16230 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0) 16231 ; /* Do nothing. HW bug. */ 16232 else if (tg3_flag(tp, 57765_PLUS)) 16233 tg3_flag_set(tp, HW_TSO_3); 16234 else if (tg3_flag(tp, 5755_PLUS) || 16235 tg3_asic_rev(tp) == ASIC_REV_5906) 16236 tg3_flag_set(tp, HW_TSO_2); 16237 else if (tg3_flag(tp, 5750_PLUS)) { 16238 tg3_flag_set(tp, HW_TSO_1); 16239 tg3_flag_set(tp, TSO_BUG); 16240 if (tg3_asic_rev(tp) == ASIC_REV_5750 && 16241 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2) 16242 tg3_flag_clear(tp, TSO_BUG); 16243 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 && 16244 tg3_asic_rev(tp) != ASIC_REV_5701 && 16245 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 16246 tg3_flag_set(tp, FW_TSO); 16247 tg3_flag_set(tp, TSO_BUG); 16248 if (tg3_asic_rev(tp) == ASIC_REV_5705) 16249 tp->fw_needed = FIRMWARE_TG3TSO5; 16250 else 16251 tp->fw_needed = FIRMWARE_TG3TSO; 16252 } 16253 16254 /* Selectively allow TSO based on operating conditions */ 16255 if (tg3_flag(tp, HW_TSO_1) || 16256 tg3_flag(tp, HW_TSO_2) || 16257 tg3_flag(tp, HW_TSO_3) || 16258 tg3_flag(tp, FW_TSO)) { 16259 /* For firmware TSO, assume ASF is disabled. 16260 * We'll disable TSO later if we discover ASF 16261 * is enabled in tg3_get_eeprom_hw_cfg(). 16262 */ 16263 tg3_flag_set(tp, TSO_CAPABLE); 16264 } else { 16265 tg3_flag_clear(tp, TSO_CAPABLE); 16266 tg3_flag_clear(tp, TSO_BUG); 16267 tp->fw_needed = NULL; 16268 } 16269 16270 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) 16271 tp->fw_needed = FIRMWARE_TG3; 16272 16273 if (tg3_asic_rev(tp) == ASIC_REV_57766) 16274 tp->fw_needed = FIRMWARE_TG357766; 16275 16276 tp->irq_max = 1; 16277 16278 if (tg3_flag(tp, 5750_PLUS)) { 16279 tg3_flag_set(tp, SUPPORT_MSI); 16280 if (tg3_chip_rev(tp) == CHIPREV_5750_AX || 16281 tg3_chip_rev(tp) == CHIPREV_5750_BX || 16282 (tg3_asic_rev(tp) == ASIC_REV_5714 && 16283 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 && 16284 tp->pdev_peer == tp->pdev)) 16285 tg3_flag_clear(tp, SUPPORT_MSI); 16286 16287 if (tg3_flag(tp, 5755_PLUS) || 16288 tg3_asic_rev(tp) == ASIC_REV_5906) { 16289 tg3_flag_set(tp, 1SHOT_MSI); 16290 } 16291 16292 if (tg3_flag(tp, 57765_PLUS)) { 16293 tg3_flag_set(tp, SUPPORT_MSIX); 16294 tp->irq_max = TG3_IRQ_MAX_VECS; 16295 } 16296 } 16297 16298 tp->txq_max = 1; 16299 tp->rxq_max = 1; 16300 if (tp->irq_max > 1) { 16301 tp->rxq_max = TG3_RSS_MAX_NUM_QS; 16302 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS); 16303 16304 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 16305 tg3_asic_rev(tp) == ASIC_REV_5720) 16306 tp->txq_max = tp->irq_max - 1; 16307 } 16308 16309 if (tg3_flag(tp, 5755_PLUS) || 16310 tg3_asic_rev(tp) == ASIC_REV_5906) 16311 tg3_flag_set(tp, SHORT_DMA_BUG); 16312 16313 if (tg3_asic_rev(tp) == ASIC_REV_5719) 16314 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; 16315 16316 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16317 tg3_asic_rev(tp) == ASIC_REV_5719 || 16318 tg3_asic_rev(tp) == ASIC_REV_5720 || 16319 tg3_asic_rev(tp) == ASIC_REV_5762) 16320 tg3_flag_set(tp, LRG_PROD_RING_CAP); 16321 16322 if (tg3_flag(tp, 57765_PLUS) && 16323 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0) 16324 tg3_flag_set(tp, USE_JUMBO_BDFLAG); 16325 16326 if (!tg3_flag(tp, 5705_PLUS) || 16327 tg3_flag(tp, 5780_CLASS) || 16328 tg3_flag(tp, USE_JUMBO_BDFLAG)) 16329 tg3_flag_set(tp, JUMBO_CAPABLE); 16330 16331 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16332 &pci_state_reg); 16333 16334 if (pci_is_pcie(tp->pdev)) { 16335 u16 lnkctl; 16336 16337 tg3_flag_set(tp, PCI_EXPRESS); 16338 16339 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl); 16340 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { 16341 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16342 tg3_flag_clear(tp, HW_TSO_2); 16343 tg3_flag_clear(tp, TSO_CAPABLE); 16344 } 16345 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 16346 tg3_asic_rev(tp) == ASIC_REV_5761 || 16347 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 || 16348 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1) 16349 tg3_flag_set(tp, CLKREQ_BUG); 16350 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) { 16351 tg3_flag_set(tp, L1PLLPD_EN); 16352 } 16353 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) { 16354 /* BCM5785 devices are effectively PCIe devices, and should 16355 * follow PCIe codepaths, but do not have a PCIe capabilities 16356 * section. 16357 */ 16358 tg3_flag_set(tp, PCI_EXPRESS); 16359 } else if (!tg3_flag(tp, 5705_PLUS) || 16360 tg3_flag(tp, 5780_CLASS)) { 16361 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); 16362 if (!tp->pcix_cap) { 16363 dev_err(&tp->pdev->dev, 16364 "Cannot find PCI-X capability, aborting\n"); 16365 return -EIO; 16366 } 16367 16368 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) 16369 tg3_flag_set(tp, PCIX_MODE); 16370 } 16371 16372 /* If we have an AMD 762 or VIA K8T800 chipset, write 16373 * reordering to the mailbox registers done by the host 16374 * controller can cause major troubles. We read back from 16375 * every mailbox register write to force the writes to be 16376 * posted to the chip in order. 16377 */ 16378 if (pci_dev_present(tg3_write_reorder_chipsets) && 16379 !tg3_flag(tp, PCI_EXPRESS)) 16380 tg3_flag_set(tp, MBOX_WRITE_REORDER); 16381 16382 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 16383 &tp->pci_cacheline_sz); 16384 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16385 &tp->pci_lat_timer); 16386 if (tg3_asic_rev(tp) == ASIC_REV_5703 && 16387 tp->pci_lat_timer < 64) { 16388 tp->pci_lat_timer = 64; 16389 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16390 tp->pci_lat_timer); 16391 } 16392 16393 /* Important! -- It is critical that the PCI-X hw workaround 16394 * situation is decided before the first MMIO register access. 16395 */ 16396 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) { 16397 /* 5700 BX chips need to have their TX producer index 16398 * mailboxes written twice to workaround a bug. 16399 */ 16400 tg3_flag_set(tp, TXD_MBOX_HWBUG); 16401 16402 /* If we are in PCI-X mode, enable register write workaround. 16403 * 16404 * The workaround is to use indirect register accesses 16405 * for all chip writes not to mailbox registers. 16406 */ 16407 if (tg3_flag(tp, PCIX_MODE)) { 16408 u32 pm_reg; 16409 16410 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16411 16412 /* The chip can have it's power management PCI config 16413 * space registers clobbered due to this bug. 16414 * So explicitly force the chip into D0 here. 16415 */ 16416 pci_read_config_dword(tp->pdev, 16417 tp->pdev->pm_cap + PCI_PM_CTRL, 16418 &pm_reg); 16419 pm_reg &= ~PCI_PM_CTRL_STATE_MASK; 16420 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; 16421 pci_write_config_dword(tp->pdev, 16422 tp->pdev->pm_cap + PCI_PM_CTRL, 16423 pm_reg); 16424 16425 /* Also, force SERR#/PERR# in PCI command. */ 16426 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16427 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; 16428 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16429 } 16430 } 16431 16432 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) 16433 tg3_flag_set(tp, PCI_HIGH_SPEED); 16434 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) 16435 tg3_flag_set(tp, PCI_32BIT); 16436 16437 /* Chip-specific fixup from Broadcom driver */ 16438 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) && 16439 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { 16440 pci_state_reg |= PCISTATE_RETRY_SAME_DMA; 16441 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); 16442 } 16443 16444 /* Default fast path register access methods */ 16445 tp->read32 = tg3_read32; 16446 tp->write32 = tg3_write32; 16447 tp->read32_mbox = tg3_read32; 16448 tp->write32_mbox = tg3_write32; 16449 tp->write32_tx_mbox = tg3_write32; 16450 tp->write32_rx_mbox = tg3_write32; 16451 16452 /* Various workaround register access methods */ 16453 if (tg3_flag(tp, PCIX_TARGET_HWBUG)) 16454 tp->write32 = tg3_write_indirect_reg32; 16455 else if (tg3_asic_rev(tp) == ASIC_REV_5701 || 16456 (tg3_flag(tp, PCI_EXPRESS) && 16457 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) { 16458 /* 16459 * Back to back register writes can cause problems on these 16460 * chips, the workaround is to read back all reg writes 16461 * except those to mailbox regs. 16462 * 16463 * See tg3_write_indirect_reg32(). 16464 */ 16465 tp->write32 = tg3_write_flush_reg32; 16466 } 16467 16468 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { 16469 tp->write32_tx_mbox = tg3_write32_tx_mbox; 16470 if (tg3_flag(tp, MBOX_WRITE_REORDER)) 16471 tp->write32_rx_mbox = tg3_write_flush_reg32; 16472 } 16473 16474 if (tg3_flag(tp, ICH_WORKAROUND)) { 16475 tp->read32 = tg3_read_indirect_reg32; 16476 tp->write32 = tg3_write_indirect_reg32; 16477 tp->read32_mbox = tg3_read_indirect_mbox; 16478 tp->write32_mbox = tg3_write_indirect_mbox; 16479 tp->write32_tx_mbox = tg3_write_indirect_mbox; 16480 tp->write32_rx_mbox = tg3_write_indirect_mbox; 16481 16482 iounmap(tp->regs); 16483 tp->regs = NULL; 16484 16485 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16486 pci_cmd &= ~PCI_COMMAND_MEMORY; 16487 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16488 } 16489 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16490 tp->read32_mbox = tg3_read32_mbox_5906; 16491 tp->write32_mbox = tg3_write32_mbox_5906; 16492 tp->write32_tx_mbox = tg3_write32_mbox_5906; 16493 tp->write32_rx_mbox = tg3_write32_mbox_5906; 16494 } 16495 16496 if (tp->write32 == tg3_write_indirect_reg32 || 16497 (tg3_flag(tp, PCIX_MODE) && 16498 (tg3_asic_rev(tp) == ASIC_REV_5700 || 16499 tg3_asic_rev(tp) == ASIC_REV_5701))) 16500 tg3_flag_set(tp, SRAM_USE_CONFIG); 16501 16502 /* The memory arbiter has to be enabled in order for SRAM accesses 16503 * to succeed. Normally on powerup the tg3 chip firmware will make 16504 * sure it is enabled, but other entities such as system netboot 16505 * code might disable it. 16506 */ 16507 val = tr32(MEMARB_MODE); 16508 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 16509 16510 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; 16511 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16512 tg3_flag(tp, 5780_CLASS)) { 16513 if (tg3_flag(tp, PCIX_MODE)) { 16514 pci_read_config_dword(tp->pdev, 16515 tp->pcix_cap + PCI_X_STATUS, 16516 &val); 16517 tp->pci_fn = val & 0x7; 16518 } 16519 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16520 tg3_asic_rev(tp) == ASIC_REV_5719 || 16521 tg3_asic_rev(tp) == ASIC_REV_5720) { 16522 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); 16523 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG) 16524 val = tr32(TG3_CPMU_STATUS); 16525 16526 if (tg3_asic_rev(tp) == ASIC_REV_5717) 16527 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0; 16528 else 16529 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >> 16530 TG3_CPMU_STATUS_FSHFT_5719; 16531 } 16532 16533 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 16534 tp->write32_tx_mbox = tg3_write_flush_reg32; 16535 tp->write32_rx_mbox = tg3_write_flush_reg32; 16536 } 16537 16538 /* Get eeprom hw config before calling tg3_set_power_state(). 16539 * In particular, the TG3_FLAG_IS_NIC flag must be 16540 * determined before calling tg3_set_power_state() so that 16541 * we know whether or not to switch out of Vaux power. 16542 * When the flag is set, it means that GPIO1 is used for eeprom 16543 * write protect and also implies that it is a LOM where GPIOs 16544 * are not used to switch power. 16545 */ 16546 tg3_get_eeprom_hw_cfg(tp); 16547 16548 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) { 16549 tg3_flag_clear(tp, TSO_CAPABLE); 16550 tg3_flag_clear(tp, TSO_BUG); 16551 tp->fw_needed = NULL; 16552 } 16553 16554 if (tg3_flag(tp, ENABLE_APE)) { 16555 /* Allow reads and writes to the 16556 * APE register and memory space. 16557 */ 16558 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | 16559 PCISTATE_ALLOW_APE_SHMEM_WR | 16560 PCISTATE_ALLOW_APE_PSPACE_WR; 16561 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, 16562 pci_state_reg); 16563 16564 tg3_ape_lock_init(tp); 16565 } 16566 16567 /* Set up tp->grc_local_ctrl before calling 16568 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high 16569 * will bring 5700's external PHY out of reset. 16570 * It is also used as eeprom write protect on LOMs. 16571 */ 16572 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; 16573 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16574 tg3_flag(tp, EEPROM_WRITE_PROT)) 16575 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 16576 GRC_LCLCTRL_GPIO_OUTPUT1); 16577 /* Unused GPIO3 must be driven as output on 5752 because there 16578 * are no pull-up resistors on unused GPIO pins. 16579 */ 16580 else if (tg3_asic_rev(tp) == ASIC_REV_5752) 16581 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 16582 16583 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16584 tg3_asic_rev(tp) == ASIC_REV_57780 || 16585 tg3_flag(tp, 57765_CLASS)) 16586 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16587 16588 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 16589 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 16590 /* Turn off the debug UART. */ 16591 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16592 if (tg3_flag(tp, IS_NIC)) 16593 /* Keep VMain power. */ 16594 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 16595 GRC_LCLCTRL_GPIO_OUTPUT0; 16596 } 16597 16598 if (tg3_asic_rev(tp) == ASIC_REV_5762) 16599 tp->grc_local_ctrl |= 16600 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL; 16601 16602 /* Switch out of Vaux if it is a NIC */ 16603 tg3_pwrsrc_switch_to_vmain(tp); 16604 16605 /* Derive initial jumbo mode from MTU assigned in 16606 * ether_setup() via the alloc_etherdev() call 16607 */ 16608 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) 16609 tg3_flag_set(tp, JUMBO_RING_ENABLE); 16610 16611 /* Determine WakeOnLan speed to use. */ 16612 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16613 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16614 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16615 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) { 16616 tg3_flag_clear(tp, WOL_SPEED_100MB); 16617 } else { 16618 tg3_flag_set(tp, WOL_SPEED_100MB); 16619 } 16620 16621 if (tg3_asic_rev(tp) == ASIC_REV_5906) 16622 tp->phy_flags |= TG3_PHYFLG_IS_FET; 16623 16624 /* A few boards don't want Ethernet@WireSpeed phy feature */ 16625 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16626 (tg3_asic_rev(tp) == ASIC_REV_5705 && 16627 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) && 16628 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) || 16629 (tp->phy_flags & TG3_PHYFLG_IS_FET) || 16630 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 16631 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; 16632 16633 if (tg3_chip_rev(tp) == CHIPREV_5703_AX || 16634 tg3_chip_rev(tp) == CHIPREV_5704_AX) 16635 tp->phy_flags |= TG3_PHYFLG_ADC_BUG; 16636 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) 16637 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; 16638 16639 if (tg3_flag(tp, 5705_PLUS) && 16640 !(tp->phy_flags & TG3_PHYFLG_IS_FET) && 16641 tg3_asic_rev(tp) != ASIC_REV_5785 && 16642 tg3_asic_rev(tp) != ASIC_REV_57780 && 16643 !tg3_flag(tp, 57765_PLUS)) { 16644 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16645 tg3_asic_rev(tp) == ASIC_REV_5787 || 16646 tg3_asic_rev(tp) == ASIC_REV_5784 || 16647 tg3_asic_rev(tp) == ASIC_REV_5761) { 16648 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && 16649 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) 16650 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; 16651 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) 16652 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; 16653 } else 16654 tp->phy_flags |= TG3_PHYFLG_BER_BUG; 16655 } 16656 16657 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 16658 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 16659 tp->phy_otp = tg3_read_otp_phycfg(tp); 16660 if (tp->phy_otp == 0) 16661 tp->phy_otp = TG3_OTP_DEFAULT; 16662 } 16663 16664 if (tg3_flag(tp, CPMU_PRESENT)) 16665 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; 16666 else 16667 tp->mi_mode = MAC_MI_MODE_BASE; 16668 16669 tp->coalesce_mode = 0; 16670 if (tg3_chip_rev(tp) != CHIPREV_5700_AX && 16671 tg3_chip_rev(tp) != CHIPREV_5700_BX) 16672 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 16673 16674 /* Set these bits to enable statistics workaround. */ 16675 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16676 tg3_asic_rev(tp) == ASIC_REV_5762 || 16677 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 16678 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) { 16679 tp->coalesce_mode |= HOSTCC_MODE_ATTN; 16680 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; 16681 } 16682 16683 if (tg3_asic_rev(tp) == ASIC_REV_5785 || 16684 tg3_asic_rev(tp) == ASIC_REV_57780) 16685 tg3_flag_set(tp, USE_PHYLIB); 16686 16687 err = tg3_mdio_init(tp); 16688 if (err) 16689 return err; 16690 16691 /* Initialize data/descriptor byte/word swapping. */ 16692 val = tr32(GRC_MODE); 16693 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 16694 tg3_asic_rev(tp) == ASIC_REV_5762) 16695 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | 16696 GRC_MODE_WORD_SWAP_B2HRX_DATA | 16697 GRC_MODE_B2HRX_ENABLE | 16698 GRC_MODE_HTX2B_ENABLE | 16699 GRC_MODE_HOST_STACKUP); 16700 else 16701 val &= GRC_MODE_HOST_STACKUP; 16702 16703 tw32(GRC_MODE, val | tp->grc_mode); 16704 16705 tg3_switch_clocks(tp); 16706 16707 /* Clear this out for sanity. */ 16708 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 16709 16710 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ 16711 tw32(TG3PCI_REG_BASE_ADDR, 0); 16712 16713 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16714 &pci_state_reg); 16715 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && 16716 !tg3_flag(tp, PCIX_TARGET_HWBUG)) { 16717 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16718 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16719 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 || 16720 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) { 16721 void __iomem *sram_base; 16722 16723 /* Write some dummy words into the SRAM status block 16724 * area, see if it reads back correctly. If the return 16725 * value is bad, force enable the PCIX workaround. 16726 */ 16727 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; 16728 16729 writel(0x00000000, sram_base); 16730 writel(0x00000000, sram_base + 4); 16731 writel(0xffffffff, sram_base + 4); 16732 if (readl(sram_base) != 0x00000000) 16733 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16734 } 16735 } 16736 16737 udelay(50); 16738 tg3_nvram_init(tp); 16739 16740 /* If the device has an NVRAM, no need to load patch firmware */ 16741 if (tg3_asic_rev(tp) == ASIC_REV_57766 && 16742 !tg3_flag(tp, NO_NVRAM)) 16743 tp->fw_needed = NULL; 16744 16745 grc_misc_cfg = tr32(GRC_MISC_CFG); 16746 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; 16747 16748 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 16749 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || 16750 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 16751 tg3_flag_set(tp, IS_5788); 16752 16753 if (!tg3_flag(tp, IS_5788) && 16754 tg3_asic_rev(tp) != ASIC_REV_5700) 16755 tg3_flag_set(tp, TAGGED_STATUS); 16756 if (tg3_flag(tp, TAGGED_STATUS)) { 16757 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | 16758 HOSTCC_MODE_CLRTICK_TXBD); 16759 16760 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; 16761 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16762 tp->misc_host_ctrl); 16763 } 16764 16765 /* Preserve the APE MAC_MODE bits */ 16766 if (tg3_flag(tp, ENABLE_APE)) 16767 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 16768 else 16769 tp->mac_mode = 0; 16770 16771 if (tg3_10_100_only_device(tp, ent)) 16772 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; 16773 16774 err = tg3_phy_probe(tp); 16775 if (err) { 16776 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); 16777 /* ... but do not return immediately ... */ 16778 tg3_mdio_fini(tp); 16779 } 16780 16781 tg3_read_vpd(tp); 16782 tg3_read_fw_ver(tp); 16783 16784 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 16785 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16786 } else { 16787 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16788 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16789 else 16790 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16791 } 16792 16793 /* 5700 {AX,BX} chips have a broken status block link 16794 * change bit implementation, so we must use the 16795 * status register in those cases. 16796 */ 16797 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16798 tg3_flag_set(tp, USE_LINKCHG_REG); 16799 else 16800 tg3_flag_clear(tp, USE_LINKCHG_REG); 16801 16802 /* The led_ctrl is set during tg3_phy_probe, here we might 16803 * have to force the link status polling mechanism based 16804 * upon subsystem IDs. 16805 */ 16806 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 16807 tg3_asic_rev(tp) == ASIC_REV_5701 && 16808 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 16809 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16810 tg3_flag_set(tp, USE_LINKCHG_REG); 16811 } 16812 16813 /* For all SERDES we poll the MAC status register. */ 16814 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 16815 tg3_flag_set(tp, POLL_SERDES); 16816 else 16817 tg3_flag_clear(tp, POLL_SERDES); 16818 16819 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF)) 16820 tg3_flag_set(tp, POLL_CPMU_LINK); 16821 16822 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN; 16823 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; 16824 if (tg3_asic_rev(tp) == ASIC_REV_5701 && 16825 tg3_flag(tp, PCIX_MODE)) { 16826 tp->rx_offset = NET_SKB_PAD; 16827 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 16828 tp->rx_copy_thresh = ~(u16)0; 16829 #endif 16830 } 16831 16832 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; 16833 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; 16834 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; 16835 16836 tp->rx_std_max_post = tp->rx_std_ring_mask + 1; 16837 16838 /* Increment the rx prod index on the rx std ring by at most 16839 * 8 for these chips to workaround hw errata. 16840 */ 16841 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16842 tg3_asic_rev(tp) == ASIC_REV_5752 || 16843 tg3_asic_rev(tp) == ASIC_REV_5755) 16844 tp->rx_std_max_post = 8; 16845 16846 if (tg3_flag(tp, ASPM_WORKAROUND)) 16847 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & 16848 PCIE_PWR_MGMT_L1_THRESH_MSK; 16849 16850 return err; 16851 } 16852 16853 #ifdef CONFIG_SPARC 16854 static int tg3_get_macaddr_sparc(struct tg3 *tp) 16855 { 16856 struct net_device *dev = tp->dev; 16857 struct pci_dev *pdev = tp->pdev; 16858 struct device_node *dp = pci_device_to_OF_node(pdev); 16859 const unsigned char *addr; 16860 int len; 16861 16862 addr = of_get_property(dp, "local-mac-address", &len); 16863 if (addr && len == ETH_ALEN) { 16864 memcpy(dev->dev_addr, addr, ETH_ALEN); 16865 return 0; 16866 } 16867 return -ENODEV; 16868 } 16869 16870 static int tg3_get_default_macaddr_sparc(struct tg3 *tp) 16871 { 16872 struct net_device *dev = tp->dev; 16873 16874 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); 16875 return 0; 16876 } 16877 #endif 16878 16879 static int tg3_get_device_address(struct tg3 *tp) 16880 { 16881 struct net_device *dev = tp->dev; 16882 u32 hi, lo, mac_offset; 16883 int addr_ok = 0; 16884 int err; 16885 16886 #ifdef CONFIG_SPARC 16887 if (!tg3_get_macaddr_sparc(tp)) 16888 return 0; 16889 #endif 16890 16891 if (tg3_flag(tp, IS_SSB_CORE)) { 16892 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]); 16893 if (!err && is_valid_ether_addr(&dev->dev_addr[0])) 16894 return 0; 16895 } 16896 16897 mac_offset = 0x7c; 16898 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16899 tg3_flag(tp, 5780_CLASS)) { 16900 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 16901 mac_offset = 0xcc; 16902 if (tg3_nvram_lock(tp)) 16903 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); 16904 else 16905 tg3_nvram_unlock(tp); 16906 } else if (tg3_flag(tp, 5717_PLUS)) { 16907 if (tp->pci_fn & 1) 16908 mac_offset = 0xcc; 16909 if (tp->pci_fn > 1) 16910 mac_offset += 0x18c; 16911 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) 16912 mac_offset = 0x10; 16913 16914 /* First try to get it from MAC address mailbox. */ 16915 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); 16916 if ((hi >> 16) == 0x484b) { 16917 dev->dev_addr[0] = (hi >> 8) & 0xff; 16918 dev->dev_addr[1] = (hi >> 0) & 0xff; 16919 16920 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); 16921 dev->dev_addr[2] = (lo >> 24) & 0xff; 16922 dev->dev_addr[3] = (lo >> 16) & 0xff; 16923 dev->dev_addr[4] = (lo >> 8) & 0xff; 16924 dev->dev_addr[5] = (lo >> 0) & 0xff; 16925 16926 /* Some old bootcode may report a 0 MAC address in SRAM */ 16927 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]); 16928 } 16929 if (!addr_ok) { 16930 /* Next, try NVRAM. */ 16931 if (!tg3_flag(tp, NO_NVRAM) && 16932 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && 16933 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { 16934 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); 16935 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); 16936 } 16937 /* Finally just fetch it out of the MAC control regs. */ 16938 else { 16939 hi = tr32(MAC_ADDR_0_HIGH); 16940 lo = tr32(MAC_ADDR_0_LOW); 16941 16942 dev->dev_addr[5] = lo & 0xff; 16943 dev->dev_addr[4] = (lo >> 8) & 0xff; 16944 dev->dev_addr[3] = (lo >> 16) & 0xff; 16945 dev->dev_addr[2] = (lo >> 24) & 0xff; 16946 dev->dev_addr[1] = hi & 0xff; 16947 dev->dev_addr[0] = (hi >> 8) & 0xff; 16948 } 16949 } 16950 16951 if (!is_valid_ether_addr(&dev->dev_addr[0])) { 16952 #ifdef CONFIG_SPARC 16953 if (!tg3_get_default_macaddr_sparc(tp)) 16954 return 0; 16955 #endif 16956 return -EINVAL; 16957 } 16958 return 0; 16959 } 16960 16961 #define BOUNDARY_SINGLE_CACHELINE 1 16962 #define BOUNDARY_MULTI_CACHELINE 2 16963 16964 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) 16965 { 16966 int cacheline_size; 16967 u8 byte; 16968 int goal; 16969 16970 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); 16971 if (byte == 0) 16972 cacheline_size = 1024; 16973 else 16974 cacheline_size = (int) byte * 4; 16975 16976 /* On 5703 and later chips, the boundary bits have no 16977 * effect. 16978 */ 16979 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 16980 tg3_asic_rev(tp) != ASIC_REV_5701 && 16981 !tg3_flag(tp, PCI_EXPRESS)) 16982 goto out; 16983 16984 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) 16985 goal = BOUNDARY_MULTI_CACHELINE; 16986 #else 16987 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) 16988 goal = BOUNDARY_SINGLE_CACHELINE; 16989 #else 16990 goal = 0; 16991 #endif 16992 #endif 16993 16994 if (tg3_flag(tp, 57765_PLUS)) { 16995 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 16996 goto out; 16997 } 16998 16999 if (!goal) 17000 goto out; 17001 17002 /* PCI controllers on most RISC systems tend to disconnect 17003 * when a device tries to burst across a cache-line boundary. 17004 * Therefore, letting tg3 do so just wastes PCI bandwidth. 17005 * 17006 * Unfortunately, for PCI-E there are only limited 17007 * write-side controls for this, and thus for reads 17008 * we will still get the disconnects. We'll also waste 17009 * these PCI cycles for both read and write for chips 17010 * other than 5700 and 5701 which do not implement the 17011 * boundary bits. 17012 */ 17013 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { 17014 switch (cacheline_size) { 17015 case 16: 17016 case 32: 17017 case 64: 17018 case 128: 17019 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17020 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | 17021 DMA_RWCTRL_WRITE_BNDRY_128_PCIX); 17022 } else { 17023 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17024 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17025 } 17026 break; 17027 17028 case 256: 17029 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | 17030 DMA_RWCTRL_WRITE_BNDRY_256_PCIX); 17031 break; 17032 17033 default: 17034 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17035 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17036 break; 17037 } 17038 } else if (tg3_flag(tp, PCI_EXPRESS)) { 17039 switch (cacheline_size) { 17040 case 16: 17041 case 32: 17042 case 64: 17043 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17044 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17045 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; 17046 break; 17047 } 17048 /* fallthrough */ 17049 case 128: 17050 default: 17051 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17052 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; 17053 break; 17054 } 17055 } else { 17056 switch (cacheline_size) { 17057 case 16: 17058 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17059 val |= (DMA_RWCTRL_READ_BNDRY_16 | 17060 DMA_RWCTRL_WRITE_BNDRY_16); 17061 break; 17062 } 17063 /* fallthrough */ 17064 case 32: 17065 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17066 val |= (DMA_RWCTRL_READ_BNDRY_32 | 17067 DMA_RWCTRL_WRITE_BNDRY_32); 17068 break; 17069 } 17070 /* fallthrough */ 17071 case 64: 17072 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17073 val |= (DMA_RWCTRL_READ_BNDRY_64 | 17074 DMA_RWCTRL_WRITE_BNDRY_64); 17075 break; 17076 } 17077 /* fallthrough */ 17078 case 128: 17079 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17080 val |= (DMA_RWCTRL_READ_BNDRY_128 | 17081 DMA_RWCTRL_WRITE_BNDRY_128); 17082 break; 17083 } 17084 /* fallthrough */ 17085 case 256: 17086 val |= (DMA_RWCTRL_READ_BNDRY_256 | 17087 DMA_RWCTRL_WRITE_BNDRY_256); 17088 break; 17089 case 512: 17090 val |= (DMA_RWCTRL_READ_BNDRY_512 | 17091 DMA_RWCTRL_WRITE_BNDRY_512); 17092 break; 17093 case 1024: 17094 default: 17095 val |= (DMA_RWCTRL_READ_BNDRY_1024 | 17096 DMA_RWCTRL_WRITE_BNDRY_1024); 17097 break; 17098 } 17099 } 17100 17101 out: 17102 return val; 17103 } 17104 17105 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, 17106 int size, bool to_device) 17107 { 17108 struct tg3_internal_buffer_desc test_desc; 17109 u32 sram_dma_descs; 17110 int i, ret; 17111 17112 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; 17113 17114 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); 17115 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); 17116 tw32(RDMAC_STATUS, 0); 17117 tw32(WDMAC_STATUS, 0); 17118 17119 tw32(BUFMGR_MODE, 0); 17120 tw32(FTQ_RESET, 0); 17121 17122 test_desc.addr_hi = ((u64) buf_dma) >> 32; 17123 test_desc.addr_lo = buf_dma & 0xffffffff; 17124 test_desc.nic_mbuf = 0x00002100; 17125 test_desc.len = size; 17126 17127 /* 17128 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz 17129 * the *second* time the tg3 driver was getting loaded after an 17130 * initial scan. 17131 * 17132 * Broadcom tells me: 17133 * ...the DMA engine is connected to the GRC block and a DMA 17134 * reset may affect the GRC block in some unpredictable way... 17135 * The behavior of resets to individual blocks has not been tested. 17136 * 17137 * Broadcom noted the GRC reset will also reset all sub-components. 17138 */ 17139 if (to_device) { 17140 test_desc.cqid_sqid = (13 << 8) | 2; 17141 17142 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); 17143 udelay(40); 17144 } else { 17145 test_desc.cqid_sqid = (16 << 8) | 7; 17146 17147 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); 17148 udelay(40); 17149 } 17150 test_desc.flags = 0x00000005; 17151 17152 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { 17153 u32 val; 17154 17155 val = *(((u32 *)&test_desc) + i); 17156 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 17157 sram_dma_descs + (i * sizeof(u32))); 17158 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 17159 } 17160 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 17161 17162 if (to_device) 17163 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); 17164 else 17165 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); 17166 17167 ret = -ENODEV; 17168 for (i = 0; i < 40; i++) { 17169 u32 val; 17170 17171 if (to_device) 17172 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); 17173 else 17174 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); 17175 if ((val & 0xffff) == sram_dma_descs) { 17176 ret = 0; 17177 break; 17178 } 17179 17180 udelay(100); 17181 } 17182 17183 return ret; 17184 } 17185 17186 #define TEST_BUFFER_SIZE 0x2000 17187 17188 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = { 17189 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, 17190 { }, 17191 }; 17192 17193 static int tg3_test_dma(struct tg3 *tp) 17194 { 17195 dma_addr_t buf_dma; 17196 u32 *buf, saved_dma_rwctrl; 17197 int ret = 0; 17198 17199 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, 17200 &buf_dma, GFP_KERNEL); 17201 if (!buf) { 17202 ret = -ENOMEM; 17203 goto out_nofree; 17204 } 17205 17206 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 17207 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 17208 17209 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 17210 17211 if (tg3_flag(tp, 57765_PLUS)) 17212 goto out; 17213 17214 if (tg3_flag(tp, PCI_EXPRESS)) { 17215 /* DMA read watermark not used on PCIE */ 17216 tp->dma_rwctrl |= 0x00180000; 17217 } else if (!tg3_flag(tp, PCIX_MODE)) { 17218 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 17219 tg3_asic_rev(tp) == ASIC_REV_5750) 17220 tp->dma_rwctrl |= 0x003f0000; 17221 else 17222 tp->dma_rwctrl |= 0x003f000f; 17223 } else { 17224 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17225 tg3_asic_rev(tp) == ASIC_REV_5704) { 17226 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); 17227 u32 read_water = 0x7; 17228 17229 /* If the 5704 is behind the EPB bridge, we can 17230 * do the less restrictive ONE_DMA workaround for 17231 * better performance. 17232 */ 17233 if (tg3_flag(tp, 40BIT_DMA_BUG) && 17234 tg3_asic_rev(tp) == ASIC_REV_5704) 17235 tp->dma_rwctrl |= 0x8000; 17236 else if (ccval == 0x6 || ccval == 0x7) 17237 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17238 17239 if (tg3_asic_rev(tp) == ASIC_REV_5703) 17240 read_water = 4; 17241 /* Set bit 23 to enable PCIX hw bug fix */ 17242 tp->dma_rwctrl |= 17243 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | 17244 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | 17245 (1 << 23); 17246 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) { 17247 /* 5780 always in PCIX mode */ 17248 tp->dma_rwctrl |= 0x00144000; 17249 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) { 17250 /* 5714 always in PCIX mode */ 17251 tp->dma_rwctrl |= 0x00148000; 17252 } else { 17253 tp->dma_rwctrl |= 0x001b000f; 17254 } 17255 } 17256 if (tg3_flag(tp, ONE_DMA_AT_ONCE)) 17257 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17258 17259 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17260 tg3_asic_rev(tp) == ASIC_REV_5704) 17261 tp->dma_rwctrl &= 0xfffffff0; 17262 17263 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 17264 tg3_asic_rev(tp) == ASIC_REV_5701) { 17265 /* Remove this if it causes problems for some boards. */ 17266 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; 17267 17268 /* On 5700/5701 chips, we need to set this bit. 17269 * Otherwise the chip will issue cacheline transactions 17270 * to streamable DMA memory with not all the byte 17271 * enables turned on. This is an error on several 17272 * RISC PCI controllers, in particular sparc64. 17273 * 17274 * On 5703/5704 chips, this bit has been reassigned 17275 * a different meaning. In particular, it is used 17276 * on those chips to enable a PCI-X workaround. 17277 */ 17278 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; 17279 } 17280 17281 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17282 17283 17284 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17285 tg3_asic_rev(tp) != ASIC_REV_5701) 17286 goto out; 17287 17288 /* It is best to perform DMA test with maximum write burst size 17289 * to expose the 5700/5701 write DMA bug. 17290 */ 17291 saved_dma_rwctrl = tp->dma_rwctrl; 17292 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17293 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17294 17295 while (1) { 17296 u32 *p = buf, i; 17297 17298 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) 17299 p[i] = i; 17300 17301 /* Send the buffer to the chip. */ 17302 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true); 17303 if (ret) { 17304 dev_err(&tp->pdev->dev, 17305 "%s: Buffer write failed. err = %d\n", 17306 __func__, ret); 17307 break; 17308 } 17309 17310 /* Now read it back. */ 17311 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false); 17312 if (ret) { 17313 dev_err(&tp->pdev->dev, "%s: Buffer read failed. " 17314 "err = %d\n", __func__, ret); 17315 break; 17316 } 17317 17318 /* Verify it. */ 17319 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { 17320 if (p[i] == i) 17321 continue; 17322 17323 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17324 DMA_RWCTRL_WRITE_BNDRY_16) { 17325 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17326 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17327 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17328 break; 17329 } else { 17330 dev_err(&tp->pdev->dev, 17331 "%s: Buffer corrupted on read back! " 17332 "(%d != %d)\n", __func__, p[i], i); 17333 ret = -ENODEV; 17334 goto out; 17335 } 17336 } 17337 17338 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { 17339 /* Success. */ 17340 ret = 0; 17341 break; 17342 } 17343 } 17344 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17345 DMA_RWCTRL_WRITE_BNDRY_16) { 17346 /* DMA test passed without adjusting DMA boundary, 17347 * now look for chipsets that are known to expose the 17348 * DMA bug without failing the test. 17349 */ 17350 if (pci_dev_present(tg3_dma_wait_state_chipsets)) { 17351 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17352 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17353 } else { 17354 /* Safe to use the calculated DMA boundary. */ 17355 tp->dma_rwctrl = saved_dma_rwctrl; 17356 } 17357 17358 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17359 } 17360 17361 out: 17362 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); 17363 out_nofree: 17364 return ret; 17365 } 17366 17367 static void tg3_init_bufmgr_config(struct tg3 *tp) 17368 { 17369 if (tg3_flag(tp, 57765_PLUS)) { 17370 tp->bufmgr_config.mbuf_read_dma_low_water = 17371 DEFAULT_MB_RDMA_LOW_WATER_5705; 17372 tp->bufmgr_config.mbuf_mac_rx_low_water = 17373 DEFAULT_MB_MACRX_LOW_WATER_57765; 17374 tp->bufmgr_config.mbuf_high_water = 17375 DEFAULT_MB_HIGH_WATER_57765; 17376 17377 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17378 DEFAULT_MB_RDMA_LOW_WATER_5705; 17379 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17380 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; 17381 tp->bufmgr_config.mbuf_high_water_jumbo = 17382 DEFAULT_MB_HIGH_WATER_JUMBO_57765; 17383 } else if (tg3_flag(tp, 5705_PLUS)) { 17384 tp->bufmgr_config.mbuf_read_dma_low_water = 17385 DEFAULT_MB_RDMA_LOW_WATER_5705; 17386 tp->bufmgr_config.mbuf_mac_rx_low_water = 17387 DEFAULT_MB_MACRX_LOW_WATER_5705; 17388 tp->bufmgr_config.mbuf_high_water = 17389 DEFAULT_MB_HIGH_WATER_5705; 17390 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 17391 tp->bufmgr_config.mbuf_mac_rx_low_water = 17392 DEFAULT_MB_MACRX_LOW_WATER_5906; 17393 tp->bufmgr_config.mbuf_high_water = 17394 DEFAULT_MB_HIGH_WATER_5906; 17395 } 17396 17397 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17398 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; 17399 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17400 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; 17401 tp->bufmgr_config.mbuf_high_water_jumbo = 17402 DEFAULT_MB_HIGH_WATER_JUMBO_5780; 17403 } else { 17404 tp->bufmgr_config.mbuf_read_dma_low_water = 17405 DEFAULT_MB_RDMA_LOW_WATER; 17406 tp->bufmgr_config.mbuf_mac_rx_low_water = 17407 DEFAULT_MB_MACRX_LOW_WATER; 17408 tp->bufmgr_config.mbuf_high_water = 17409 DEFAULT_MB_HIGH_WATER; 17410 17411 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17412 DEFAULT_MB_RDMA_LOW_WATER_JUMBO; 17413 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17414 DEFAULT_MB_MACRX_LOW_WATER_JUMBO; 17415 tp->bufmgr_config.mbuf_high_water_jumbo = 17416 DEFAULT_MB_HIGH_WATER_JUMBO; 17417 } 17418 17419 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; 17420 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; 17421 } 17422 17423 static char *tg3_phy_string(struct tg3 *tp) 17424 { 17425 switch (tp->phy_id & TG3_PHY_ID_MASK) { 17426 case TG3_PHY_ID_BCM5400: return "5400"; 17427 case TG3_PHY_ID_BCM5401: return "5401"; 17428 case TG3_PHY_ID_BCM5411: return "5411"; 17429 case TG3_PHY_ID_BCM5701: return "5701"; 17430 case TG3_PHY_ID_BCM5703: return "5703"; 17431 case TG3_PHY_ID_BCM5704: return "5704"; 17432 case TG3_PHY_ID_BCM5705: return "5705"; 17433 case TG3_PHY_ID_BCM5750: return "5750"; 17434 case TG3_PHY_ID_BCM5752: return "5752"; 17435 case TG3_PHY_ID_BCM5714: return "5714"; 17436 case TG3_PHY_ID_BCM5780: return "5780"; 17437 case TG3_PHY_ID_BCM5755: return "5755"; 17438 case TG3_PHY_ID_BCM5787: return "5787"; 17439 case TG3_PHY_ID_BCM5784: return "5784"; 17440 case TG3_PHY_ID_BCM5756: return "5722/5756"; 17441 case TG3_PHY_ID_BCM5906: return "5906"; 17442 case TG3_PHY_ID_BCM5761: return "5761"; 17443 case TG3_PHY_ID_BCM5718C: return "5718C"; 17444 case TG3_PHY_ID_BCM5718S: return "5718S"; 17445 case TG3_PHY_ID_BCM57765: return "57765"; 17446 case TG3_PHY_ID_BCM5719C: return "5719C"; 17447 case TG3_PHY_ID_BCM5720C: return "5720C"; 17448 case TG3_PHY_ID_BCM5762: return "5762C"; 17449 case TG3_PHY_ID_BCM8002: return "8002/serdes"; 17450 case 0: return "serdes"; 17451 default: return "unknown"; 17452 } 17453 } 17454 17455 static char *tg3_bus_string(struct tg3 *tp, char *str) 17456 { 17457 if (tg3_flag(tp, PCI_EXPRESS)) { 17458 strcpy(str, "PCI Express"); 17459 return str; 17460 } else if (tg3_flag(tp, PCIX_MODE)) { 17461 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; 17462 17463 strcpy(str, "PCIX:"); 17464 17465 if ((clock_ctrl == 7) || 17466 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == 17467 GRC_MISC_CFG_BOARD_ID_5704CIOBE)) 17468 strcat(str, "133MHz"); 17469 else if (clock_ctrl == 0) 17470 strcat(str, "33MHz"); 17471 else if (clock_ctrl == 2) 17472 strcat(str, "50MHz"); 17473 else if (clock_ctrl == 4) 17474 strcat(str, "66MHz"); 17475 else if (clock_ctrl == 6) 17476 strcat(str, "100MHz"); 17477 } else { 17478 strcpy(str, "PCI:"); 17479 if (tg3_flag(tp, PCI_HIGH_SPEED)) 17480 strcat(str, "66MHz"); 17481 else 17482 strcat(str, "33MHz"); 17483 } 17484 if (tg3_flag(tp, PCI_32BIT)) 17485 strcat(str, ":32-bit"); 17486 else 17487 strcat(str, ":64-bit"); 17488 return str; 17489 } 17490 17491 static void tg3_init_coal(struct tg3 *tp) 17492 { 17493 struct ethtool_coalesce *ec = &tp->coal; 17494 17495 memset(ec, 0, sizeof(*ec)); 17496 ec->cmd = ETHTOOL_GCOALESCE; 17497 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; 17498 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; 17499 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; 17500 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; 17501 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; 17502 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; 17503 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; 17504 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; 17505 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; 17506 17507 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | 17508 HOSTCC_MODE_CLRTICK_TXBD)) { 17509 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; 17510 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; 17511 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; 17512 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; 17513 } 17514 17515 if (tg3_flag(tp, 5705_PLUS)) { 17516 ec->rx_coalesce_usecs_irq = 0; 17517 ec->tx_coalesce_usecs_irq = 0; 17518 ec->stats_block_coalesce_usecs = 0; 17519 } 17520 } 17521 17522 static int tg3_init_one(struct pci_dev *pdev, 17523 const struct pci_device_id *ent) 17524 { 17525 struct net_device *dev; 17526 struct tg3 *tp; 17527 int i, err; 17528 u32 sndmbx, rcvmbx, intmbx; 17529 char str[40]; 17530 u64 dma_mask, persist_dma_mask; 17531 netdev_features_t features = 0; 17532 17533 printk_once(KERN_INFO "%s\n", version); 17534 17535 err = pci_enable_device(pdev); 17536 if (err) { 17537 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 17538 return err; 17539 } 17540 17541 err = pci_request_regions(pdev, DRV_MODULE_NAME); 17542 if (err) { 17543 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 17544 goto err_out_disable_pdev; 17545 } 17546 17547 pci_set_master(pdev); 17548 17549 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); 17550 if (!dev) { 17551 err = -ENOMEM; 17552 goto err_out_free_res; 17553 } 17554 17555 SET_NETDEV_DEV(dev, &pdev->dev); 17556 17557 tp = netdev_priv(dev); 17558 tp->pdev = pdev; 17559 tp->dev = dev; 17560 tp->rx_mode = TG3_DEF_RX_MODE; 17561 tp->tx_mode = TG3_DEF_TX_MODE; 17562 tp->irq_sync = 1; 17563 17564 if (tg3_debug > 0) 17565 tp->msg_enable = tg3_debug; 17566 else 17567 tp->msg_enable = TG3_DEF_MSG_ENABLE; 17568 17569 if (pdev_is_ssb_gige_core(pdev)) { 17570 tg3_flag_set(tp, IS_SSB_CORE); 17571 if (ssb_gige_must_flush_posted_writes(pdev)) 17572 tg3_flag_set(tp, FLUSH_POSTED_WRITES); 17573 if (ssb_gige_one_dma_at_once(pdev)) 17574 tg3_flag_set(tp, ONE_DMA_AT_ONCE); 17575 if (ssb_gige_have_roboswitch(pdev)) { 17576 tg3_flag_set(tp, USE_PHYLIB); 17577 tg3_flag_set(tp, ROBOSWITCH); 17578 } 17579 if (ssb_gige_is_rgmii(pdev)) 17580 tg3_flag_set(tp, RGMII_MODE); 17581 } 17582 17583 /* The word/byte swap controls here control register access byte 17584 * swapping. DMA data byte swapping is controlled in the GRC_MODE 17585 * setting below. 17586 */ 17587 tp->misc_host_ctrl = 17588 MISC_HOST_CTRL_MASK_PCI_INT | 17589 MISC_HOST_CTRL_WORD_SWAP | 17590 MISC_HOST_CTRL_INDIR_ACCESS | 17591 MISC_HOST_CTRL_PCISTATE_RW; 17592 17593 /* The NONFRM (non-frame) byte/word swap controls take effect 17594 * on descriptor entries, anything which isn't packet data. 17595 * 17596 * The StrongARM chips on the board (one for tx, one for rx) 17597 * are running in big-endian mode. 17598 */ 17599 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | 17600 GRC_MODE_WSWAP_NONFRM_DATA); 17601 #ifdef __BIG_ENDIAN 17602 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; 17603 #endif 17604 spin_lock_init(&tp->lock); 17605 spin_lock_init(&tp->indirect_lock); 17606 INIT_WORK(&tp->reset_task, tg3_reset_task); 17607 17608 tp->regs = pci_ioremap_bar(pdev, BAR_0); 17609 if (!tp->regs) { 17610 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 17611 err = -ENOMEM; 17612 goto err_out_free_dev; 17613 } 17614 17615 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 17616 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || 17617 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || 17618 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || 17619 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 17620 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 17621 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 17622 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 17623 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 17624 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 17625 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 17626 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 17627 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 17628 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 17629 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) { 17630 tg3_flag_set(tp, ENABLE_APE); 17631 tp->aperegs = pci_ioremap_bar(pdev, BAR_2); 17632 if (!tp->aperegs) { 17633 dev_err(&pdev->dev, 17634 "Cannot map APE registers, aborting\n"); 17635 err = -ENOMEM; 17636 goto err_out_iounmap; 17637 } 17638 } 17639 17640 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 17641 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 17642 17643 dev->ethtool_ops = &tg3_ethtool_ops; 17644 dev->watchdog_timeo = TG3_TX_TIMEOUT; 17645 dev->netdev_ops = &tg3_netdev_ops; 17646 dev->irq = pdev->irq; 17647 17648 err = tg3_get_invariants(tp, ent); 17649 if (err) { 17650 dev_err(&pdev->dev, 17651 "Problem fetching invariants of chip, aborting\n"); 17652 goto err_out_apeunmap; 17653 } 17654 17655 /* The EPB bridge inside 5714, 5715, and 5780 and any 17656 * device behind the EPB cannot support DMA addresses > 40-bit. 17657 * On 64-bit systems with IOMMU, use 40-bit dma_mask. 17658 * On 64-bit systems without IOMMU, use 64-bit dma_mask and 17659 * do DMA address check in tg3_start_xmit(). 17660 */ 17661 if (tg3_flag(tp, IS_5788)) 17662 persist_dma_mask = dma_mask = DMA_BIT_MASK(32); 17663 else if (tg3_flag(tp, 40BIT_DMA_BUG)) { 17664 persist_dma_mask = dma_mask = DMA_BIT_MASK(40); 17665 #ifdef CONFIG_HIGHMEM 17666 dma_mask = DMA_BIT_MASK(64); 17667 #endif 17668 } else 17669 persist_dma_mask = dma_mask = DMA_BIT_MASK(64); 17670 17671 /* Configure DMA attributes. */ 17672 if (dma_mask > DMA_BIT_MASK(32)) { 17673 err = pci_set_dma_mask(pdev, dma_mask); 17674 if (!err) { 17675 features |= NETIF_F_HIGHDMA; 17676 err = pci_set_consistent_dma_mask(pdev, 17677 persist_dma_mask); 17678 if (err < 0) { 17679 dev_err(&pdev->dev, "Unable to obtain 64 bit " 17680 "DMA for consistent allocations\n"); 17681 goto err_out_apeunmap; 17682 } 17683 } 17684 } 17685 if (err || dma_mask == DMA_BIT_MASK(32)) { 17686 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 17687 if (err) { 17688 dev_err(&pdev->dev, 17689 "No usable DMA configuration, aborting\n"); 17690 goto err_out_apeunmap; 17691 } 17692 } 17693 17694 tg3_init_bufmgr_config(tp); 17695 17696 /* 5700 B0 chips do not support checksumming correctly due 17697 * to hardware bugs. 17698 */ 17699 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) { 17700 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 17701 17702 if (tg3_flag(tp, 5755_PLUS)) 17703 features |= NETIF_F_IPV6_CSUM; 17704 } 17705 17706 /* TSO is on by default on chips that support hardware TSO. 17707 * Firmware TSO on older chips gives lower performance, so it 17708 * is off by default, but can be enabled using ethtool. 17709 */ 17710 if ((tg3_flag(tp, HW_TSO_1) || 17711 tg3_flag(tp, HW_TSO_2) || 17712 tg3_flag(tp, HW_TSO_3)) && 17713 (features & NETIF_F_IP_CSUM)) 17714 features |= NETIF_F_TSO; 17715 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { 17716 if (features & NETIF_F_IPV6_CSUM) 17717 features |= NETIF_F_TSO6; 17718 if (tg3_flag(tp, HW_TSO_3) || 17719 tg3_asic_rev(tp) == ASIC_REV_5761 || 17720 (tg3_asic_rev(tp) == ASIC_REV_5784 && 17721 tg3_chip_rev(tp) != CHIPREV_5784_AX) || 17722 tg3_asic_rev(tp) == ASIC_REV_5785 || 17723 tg3_asic_rev(tp) == ASIC_REV_57780) 17724 features |= NETIF_F_TSO_ECN; 17725 } 17726 17727 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | 17728 NETIF_F_HW_VLAN_CTAG_RX; 17729 dev->vlan_features |= features; 17730 17731 /* 17732 * Add loopback capability only for a subset of devices that support 17733 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY 17734 * loopback for the remaining devices. 17735 */ 17736 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 17737 !tg3_flag(tp, CPMU_PRESENT)) 17738 /* Add the loopback capability */ 17739 features |= NETIF_F_LOOPBACK; 17740 17741 dev->hw_features |= features; 17742 dev->priv_flags |= IFF_UNICAST_FLT; 17743 17744 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 && 17745 !tg3_flag(tp, TSO_CAPABLE) && 17746 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 17747 tg3_flag_set(tp, MAX_RXPEND_64); 17748 tp->rx_pending = 63; 17749 } 17750 17751 err = tg3_get_device_address(tp); 17752 if (err) { 17753 dev_err(&pdev->dev, 17754 "Could not obtain valid ethernet address, aborting\n"); 17755 goto err_out_apeunmap; 17756 } 17757 17758 /* 17759 * Reset chip in case UNDI or EFI driver did not shutdown 17760 * DMA self test will enable WDMAC and we'll see (spurious) 17761 * pending DMA on the PCI bus at that point. 17762 */ 17763 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || 17764 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 17765 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 17766 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 17767 } 17768 17769 err = tg3_test_dma(tp); 17770 if (err) { 17771 dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); 17772 goto err_out_apeunmap; 17773 } 17774 17775 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; 17776 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; 17777 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; 17778 for (i = 0; i < tp->irq_max; i++) { 17779 struct tg3_napi *tnapi = &tp->napi[i]; 17780 17781 tnapi->tp = tp; 17782 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; 17783 17784 tnapi->int_mbox = intmbx; 17785 if (i <= 4) 17786 intmbx += 0x8; 17787 else 17788 intmbx += 0x4; 17789 17790 tnapi->consmbox = rcvmbx; 17791 tnapi->prodmbox = sndmbx; 17792 17793 if (i) 17794 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); 17795 else 17796 tnapi->coal_now = HOSTCC_MODE_NOW; 17797 17798 if (!tg3_flag(tp, SUPPORT_MSIX)) 17799 break; 17800 17801 /* 17802 * If we support MSIX, we'll be using RSS. If we're using 17803 * RSS, the first vector only handles link interrupts and the 17804 * remaining vectors handle rx and tx interrupts. Reuse the 17805 * mailbox values for the next iteration. The values we setup 17806 * above are still useful for the single vectored mode. 17807 */ 17808 if (!i) 17809 continue; 17810 17811 rcvmbx += 0x8; 17812 17813 if (sndmbx & 0x4) 17814 sndmbx -= 0x4; 17815 else 17816 sndmbx += 0xc; 17817 } 17818 17819 tg3_init_coal(tp); 17820 17821 pci_set_drvdata(pdev, dev); 17822 17823 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 17824 tg3_asic_rev(tp) == ASIC_REV_5720 || 17825 tg3_asic_rev(tp) == ASIC_REV_5762) 17826 tg3_flag_set(tp, PTP_CAPABLE); 17827 17828 tg3_timer_init(tp); 17829 17830 tg3_carrier_off(tp); 17831 17832 err = register_netdev(dev); 17833 if (err) { 17834 dev_err(&pdev->dev, "Cannot register net device, aborting\n"); 17835 goto err_out_apeunmap; 17836 } 17837 17838 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", 17839 tp->board_part_number, 17840 tg3_chip_rev_id(tp), 17841 tg3_bus_string(tp, str), 17842 dev->dev_addr); 17843 17844 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 17845 struct phy_device *phydev; 17846 phydev = tp->mdio_bus->phy_map[tp->phy_addr]; 17847 netdev_info(dev, 17848 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", 17849 phydev->drv->name, dev_name(&phydev->dev)); 17850 } else { 17851 char *ethtype; 17852 17853 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 17854 ethtype = "10/100Base-TX"; 17855 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 17856 ethtype = "1000Base-SX"; 17857 else 17858 ethtype = "10/100/1000Base-T"; 17859 17860 netdev_info(dev, "attached PHY is %s (%s Ethernet) " 17861 "(WireSpeed[%d], EEE[%d])\n", 17862 tg3_phy_string(tp), ethtype, 17863 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, 17864 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); 17865 } 17866 17867 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", 17868 (dev->features & NETIF_F_RXCSUM) != 0, 17869 tg3_flag(tp, USE_LINKCHG_REG) != 0, 17870 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, 17871 tg3_flag(tp, ENABLE_ASF) != 0, 17872 tg3_flag(tp, TSO_CAPABLE) != 0); 17873 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", 17874 tp->dma_rwctrl, 17875 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : 17876 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); 17877 17878 pci_save_state(pdev); 17879 17880 return 0; 17881 17882 err_out_apeunmap: 17883 if (tp->aperegs) { 17884 iounmap(tp->aperegs); 17885 tp->aperegs = NULL; 17886 } 17887 17888 err_out_iounmap: 17889 if (tp->regs) { 17890 iounmap(tp->regs); 17891 tp->regs = NULL; 17892 } 17893 17894 err_out_free_dev: 17895 free_netdev(dev); 17896 17897 err_out_free_res: 17898 pci_release_regions(pdev); 17899 17900 err_out_disable_pdev: 17901 if (pci_is_enabled(pdev)) 17902 pci_disable_device(pdev); 17903 return err; 17904 } 17905 17906 static void tg3_remove_one(struct pci_dev *pdev) 17907 { 17908 struct net_device *dev = pci_get_drvdata(pdev); 17909 17910 if (dev) { 17911 struct tg3 *tp = netdev_priv(dev); 17912 17913 release_firmware(tp->fw); 17914 17915 tg3_reset_task_cancel(tp); 17916 17917 if (tg3_flag(tp, USE_PHYLIB)) { 17918 tg3_phy_fini(tp); 17919 tg3_mdio_fini(tp); 17920 } 17921 17922 unregister_netdev(dev); 17923 if (tp->aperegs) { 17924 iounmap(tp->aperegs); 17925 tp->aperegs = NULL; 17926 } 17927 if (tp->regs) { 17928 iounmap(tp->regs); 17929 tp->regs = NULL; 17930 } 17931 free_netdev(dev); 17932 pci_release_regions(pdev); 17933 pci_disable_device(pdev); 17934 } 17935 } 17936 17937 #ifdef CONFIG_PM_SLEEP 17938 static int tg3_suspend(struct device *device) 17939 { 17940 struct pci_dev *pdev = to_pci_dev(device); 17941 struct net_device *dev = pci_get_drvdata(pdev); 17942 struct tg3 *tp = netdev_priv(dev); 17943 int err = 0; 17944 17945 rtnl_lock(); 17946 17947 if (!netif_running(dev)) 17948 goto unlock; 17949 17950 tg3_reset_task_cancel(tp); 17951 tg3_phy_stop(tp); 17952 tg3_netif_stop(tp); 17953 17954 tg3_timer_stop(tp); 17955 17956 tg3_full_lock(tp, 1); 17957 tg3_disable_ints(tp); 17958 tg3_full_unlock(tp); 17959 17960 netif_device_detach(dev); 17961 17962 tg3_full_lock(tp, 0); 17963 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 17964 tg3_flag_clear(tp, INIT_COMPLETE); 17965 tg3_full_unlock(tp); 17966 17967 err = tg3_power_down_prepare(tp); 17968 if (err) { 17969 int err2; 17970 17971 tg3_full_lock(tp, 0); 17972 17973 tg3_flag_set(tp, INIT_COMPLETE); 17974 err2 = tg3_restart_hw(tp, true); 17975 if (err2) 17976 goto out; 17977 17978 tg3_timer_start(tp); 17979 17980 netif_device_attach(dev); 17981 tg3_netif_start(tp); 17982 17983 out: 17984 tg3_full_unlock(tp); 17985 17986 if (!err2) 17987 tg3_phy_start(tp); 17988 } 17989 17990 unlock: 17991 rtnl_unlock(); 17992 return err; 17993 } 17994 17995 static int tg3_resume(struct device *device) 17996 { 17997 struct pci_dev *pdev = to_pci_dev(device); 17998 struct net_device *dev = pci_get_drvdata(pdev); 17999 struct tg3 *tp = netdev_priv(dev); 18000 int err = 0; 18001 18002 rtnl_lock(); 18003 18004 if (!netif_running(dev)) 18005 goto unlock; 18006 18007 netif_device_attach(dev); 18008 18009 tg3_full_lock(tp, 0); 18010 18011 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18012 18013 tg3_flag_set(tp, INIT_COMPLETE); 18014 err = tg3_restart_hw(tp, 18015 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); 18016 if (err) 18017 goto out; 18018 18019 tg3_timer_start(tp); 18020 18021 tg3_netif_start(tp); 18022 18023 out: 18024 tg3_full_unlock(tp); 18025 18026 if (!err) 18027 tg3_phy_start(tp); 18028 18029 unlock: 18030 rtnl_unlock(); 18031 return err; 18032 } 18033 #endif /* CONFIG_PM_SLEEP */ 18034 18035 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); 18036 18037 static void tg3_shutdown(struct pci_dev *pdev) 18038 { 18039 struct net_device *dev = pci_get_drvdata(pdev); 18040 struct tg3 *tp = netdev_priv(dev); 18041 18042 rtnl_lock(); 18043 netif_device_detach(dev); 18044 18045 if (netif_running(dev)) 18046 dev_close(dev); 18047 18048 if (system_state == SYSTEM_POWER_OFF) 18049 tg3_power_down(tp); 18050 18051 rtnl_unlock(); 18052 } 18053 18054 /** 18055 * tg3_io_error_detected - called when PCI error is detected 18056 * @pdev: Pointer to PCI device 18057 * @state: The current pci connection state 18058 * 18059 * This function is called after a PCI bus error affecting 18060 * this device has been detected. 18061 */ 18062 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, 18063 pci_channel_state_t state) 18064 { 18065 struct net_device *netdev = pci_get_drvdata(pdev); 18066 struct tg3 *tp = netdev_priv(netdev); 18067 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; 18068 18069 netdev_info(netdev, "PCI I/O error detected\n"); 18070 18071 rtnl_lock(); 18072 18073 /* We probably don't have netdev yet */ 18074 if (!netdev || !netif_running(netdev)) 18075 goto done; 18076 18077 tg3_phy_stop(tp); 18078 18079 tg3_netif_stop(tp); 18080 18081 tg3_timer_stop(tp); 18082 18083 /* Want to make sure that the reset task doesn't run */ 18084 tg3_reset_task_cancel(tp); 18085 18086 netif_device_detach(netdev); 18087 18088 /* Clean up software state, even if MMIO is blocked */ 18089 tg3_full_lock(tp, 0); 18090 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 18091 tg3_full_unlock(tp); 18092 18093 done: 18094 if (state == pci_channel_io_perm_failure) { 18095 if (netdev) { 18096 tg3_napi_enable(tp); 18097 dev_close(netdev); 18098 } 18099 err = PCI_ERS_RESULT_DISCONNECT; 18100 } else { 18101 pci_disable_device(pdev); 18102 } 18103 18104 rtnl_unlock(); 18105 18106 return err; 18107 } 18108 18109 /** 18110 * tg3_io_slot_reset - called after the pci bus has been reset. 18111 * @pdev: Pointer to PCI device 18112 * 18113 * Restart the card from scratch, as if from a cold-boot. 18114 * At this point, the card has exprienced a hard reset, 18115 * followed by fixups by BIOS, and has its config space 18116 * set up identically to what it was at cold boot. 18117 */ 18118 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) 18119 { 18120 struct net_device *netdev = pci_get_drvdata(pdev); 18121 struct tg3 *tp = netdev_priv(netdev); 18122 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 18123 int err; 18124 18125 rtnl_lock(); 18126 18127 if (pci_enable_device(pdev)) { 18128 dev_err(&pdev->dev, 18129 "Cannot re-enable PCI device after reset.\n"); 18130 goto done; 18131 } 18132 18133 pci_set_master(pdev); 18134 pci_restore_state(pdev); 18135 pci_save_state(pdev); 18136 18137 if (!netdev || !netif_running(netdev)) { 18138 rc = PCI_ERS_RESULT_RECOVERED; 18139 goto done; 18140 } 18141 18142 err = tg3_power_up(tp); 18143 if (err) 18144 goto done; 18145 18146 rc = PCI_ERS_RESULT_RECOVERED; 18147 18148 done: 18149 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { 18150 tg3_napi_enable(tp); 18151 dev_close(netdev); 18152 } 18153 rtnl_unlock(); 18154 18155 return rc; 18156 } 18157 18158 /** 18159 * tg3_io_resume - called when traffic can start flowing again. 18160 * @pdev: Pointer to PCI device 18161 * 18162 * This callback is called when the error recovery driver tells 18163 * us that its OK to resume normal operation. 18164 */ 18165 static void tg3_io_resume(struct pci_dev *pdev) 18166 { 18167 struct net_device *netdev = pci_get_drvdata(pdev); 18168 struct tg3 *tp = netdev_priv(netdev); 18169 int err; 18170 18171 rtnl_lock(); 18172 18173 if (!netif_running(netdev)) 18174 goto done; 18175 18176 tg3_full_lock(tp, 0); 18177 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18178 tg3_flag_set(tp, INIT_COMPLETE); 18179 err = tg3_restart_hw(tp, true); 18180 if (err) { 18181 tg3_full_unlock(tp); 18182 netdev_err(netdev, "Cannot restart hardware after reset.\n"); 18183 goto done; 18184 } 18185 18186 netif_device_attach(netdev); 18187 18188 tg3_timer_start(tp); 18189 18190 tg3_netif_start(tp); 18191 18192 tg3_full_unlock(tp); 18193 18194 tg3_phy_start(tp); 18195 18196 done: 18197 rtnl_unlock(); 18198 } 18199 18200 static const struct pci_error_handlers tg3_err_handler = { 18201 .error_detected = tg3_io_error_detected, 18202 .slot_reset = tg3_io_slot_reset, 18203 .resume = tg3_io_resume 18204 }; 18205 18206 static struct pci_driver tg3_driver = { 18207 .name = DRV_MODULE_NAME, 18208 .id_table = tg3_pci_tbl, 18209 .probe = tg3_init_one, 18210 .remove = tg3_remove_one, 18211 .err_handler = &tg3_err_handler, 18212 .driver.pm = &tg3_pm_ops, 18213 .shutdown = tg3_shutdown, 18214 }; 18215 18216 module_pci_driver(tg3_driver); 18217