1 /* 2 * tg3.c: Broadcom Tigon3 ethernet driver. 3 * 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 6 * Copyright (C) 2004 Sun Microsystems Inc. 7 * Copyright (C) 2005-2014 Broadcom Corporation. 8 * 9 * Firmware is: 10 * Derived from proprietary unpublished source code, 11 * Copyright (C) 2000-2003 Broadcom Corporation. 12 * 13 * Permission is hereby granted for the distribution of this firmware 14 * data in hexadecimal or equivalent format, provided this copyright 15 * notice is accompanying it. 16 */ 17 18 19 #include <linux/module.h> 20 #include <linux/moduleparam.h> 21 #include <linux/stringify.h> 22 #include <linux/kernel.h> 23 #include <linux/types.h> 24 #include <linux/compiler.h> 25 #include <linux/slab.h> 26 #include <linux/delay.h> 27 #include <linux/in.h> 28 #include <linux/interrupt.h> 29 #include <linux/ioport.h> 30 #include <linux/pci.h> 31 #include <linux/netdevice.h> 32 #include <linux/etherdevice.h> 33 #include <linux/skbuff.h> 34 #include <linux/ethtool.h> 35 #include <linux/mdio.h> 36 #include <linux/mii.h> 37 #include <linux/phy.h> 38 #include <linux/brcmphy.h> 39 #include <linux/if.h> 40 #include <linux/if_vlan.h> 41 #include <linux/ip.h> 42 #include <linux/tcp.h> 43 #include <linux/workqueue.h> 44 #include <linux/prefetch.h> 45 #include <linux/dma-mapping.h> 46 #include <linux/firmware.h> 47 #include <linux/ssb/ssb_driver_gige.h> 48 #include <linux/hwmon.h> 49 #include <linux/hwmon-sysfs.h> 50 51 #include <net/checksum.h> 52 #include <net/ip.h> 53 54 #include <linux/io.h> 55 #include <asm/byteorder.h> 56 #include <linux/uaccess.h> 57 58 #include <uapi/linux/net_tstamp.h> 59 #include <linux/ptp_clock_kernel.h> 60 61 #ifdef CONFIG_SPARC 62 #include <asm/idprom.h> 63 #include <asm/prom.h> 64 #endif 65 66 #define BAR_0 0 67 #define BAR_2 2 68 69 #include "tg3.h" 70 71 /* Functions & macros to verify TG3_FLAGS types */ 72 73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) 74 { 75 return test_bit(flag, bits); 76 } 77 78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) 79 { 80 set_bit(flag, bits); 81 } 82 83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) 84 { 85 clear_bit(flag, bits); 86 } 87 88 #define tg3_flag(tp, flag) \ 89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) 90 #define tg3_flag_set(tp, flag) \ 91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) 92 #define tg3_flag_clear(tp, flag) \ 93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) 94 95 #define DRV_MODULE_NAME "tg3" 96 #define TG3_MAJ_NUM 3 97 #define TG3_MIN_NUM 137 98 #define DRV_MODULE_VERSION \ 99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 100 #define DRV_MODULE_RELDATE "May 11, 2014" 101 102 #define RESET_KIND_SHUTDOWN 0 103 #define RESET_KIND_INIT 1 104 #define RESET_KIND_SUSPEND 2 105 106 #define TG3_DEF_RX_MODE 0 107 #define TG3_DEF_TX_MODE 0 108 #define TG3_DEF_MSG_ENABLE \ 109 (NETIF_MSG_DRV | \ 110 NETIF_MSG_PROBE | \ 111 NETIF_MSG_LINK | \ 112 NETIF_MSG_TIMER | \ 113 NETIF_MSG_IFDOWN | \ 114 NETIF_MSG_IFUP | \ 115 NETIF_MSG_RX_ERR | \ 116 NETIF_MSG_TX_ERR) 117 118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100 119 120 /* length of time before we decide the hardware is borked, 121 * and dev->tx_timeout() should be called to fix the problem 122 */ 123 124 #define TG3_TX_TIMEOUT (5 * HZ) 125 126 /* hardware minimum and maximum for a single frame's data payload */ 127 #define TG3_MIN_MTU 60 128 #define TG3_MAX_MTU(tp) \ 129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) 130 131 /* These numbers seem to be hard coded in the NIC firmware somehow. 132 * You can't change the ring sizes, but you can change where you place 133 * them in the NIC onboard memory. 134 */ 135 #define TG3_RX_STD_RING_SIZE(tp) \ 136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) 138 #define TG3_DEF_RX_RING_PENDING 200 139 #define TG3_RX_JMB_RING_SIZE(tp) \ 140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) 142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100 143 144 /* Do not place this n-ring entries value into the tp struct itself, 145 * we really want to expose these constants to GCC so that modulo et 146 * al. operations are done with shifts and masks instead of with 147 * hw multiply/modulo instructions. Another solution would be to 148 * replace things like '% foo' with '& (foo - 1)'. 149 */ 150 151 #define TG3_TX_RING_SIZE 512 152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) 153 154 #define TG3_RX_STD_RING_BYTES(tp) \ 155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) 156 #define TG3_RX_JMB_RING_BYTES(tp) \ 157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) 158 #define TG3_RX_RCB_RING_BYTES(tp) \ 159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) 160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 161 TG3_TX_RING_SIZE) 162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 163 164 #define TG3_DMA_BYTE_ENAB 64 165 166 #define TG3_RX_STD_DMA_SZ 1536 167 #define TG3_RX_JMB_DMA_SZ 9046 168 169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) 170 171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) 172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 173 174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \ 175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) 176 177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ 178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) 179 180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses 181 * that are at least dword aligned when used in PCIX mode. The driver 182 * works around this bug by double copying the packet. This workaround 183 * is built into the normal double copy length check for efficiency. 184 * 185 * However, the double copy is only necessary on those architectures 186 * where unaligned memory accesses are inefficient. For those architectures 187 * where unaligned memory accesses incur little penalty, we can reintegrate 188 * the 5701 in the normal rx path. Doing so saves a device structure 189 * dereference by hardcoding the double copy threshold in place. 190 */ 191 #define TG3_RX_COPY_THRESHOLD 256 192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD 194 #else 195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) 196 #endif 197 198 #if (NET_IP_ALIGN != 0) 199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset) 200 #else 201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD) 202 #endif 203 204 /* minimum number of free TX descriptors required to wake up TX process */ 205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 206 #define TG3_TX_BD_DMA_MAX_2K 2048 207 #define TG3_TX_BD_DMA_MAX_4K 4096 208 209 #define TG3_RAW_IP_ALIGN 2 210 211 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3) 212 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1) 213 214 #define TG3_FW_UPDATE_TIMEOUT_SEC 5 215 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2) 216 217 #define FIRMWARE_TG3 "tigon/tg3.bin" 218 #define FIRMWARE_TG357766 "tigon/tg357766.bin" 219 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" 220 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" 221 222 static char version[] = 223 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; 224 225 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); 226 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); 227 MODULE_LICENSE("GPL"); 228 MODULE_VERSION(DRV_MODULE_VERSION); 229 MODULE_FIRMWARE(FIRMWARE_TG3); 230 MODULE_FIRMWARE(FIRMWARE_TG3TSO); 231 MODULE_FIRMWARE(FIRMWARE_TG3TSO5); 232 233 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ 234 module_param(tg3_debug, int, 0); 235 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 236 237 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001 238 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002 239 240 static const struct pci_device_id tg3_pci_tbl[] = { 241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, 242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, 243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, 244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, 245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, 246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, 247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, 248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, 249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, 250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, 251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, 252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, 253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, 254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, 255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, 256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, 257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, 258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, 259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901), 260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 261 TG3_DRV_DATA_FLAG_5705_10_100}, 262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2), 263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 264 TG3_DRV_DATA_FLAG_5705_10_100}, 265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, 266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F), 267 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 268 TG3_DRV_DATA_FLAG_5705_10_100}, 269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, 270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, 271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, 272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, 273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, 274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F), 275 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, 277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, 278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, 279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, 280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F), 281 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, 283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, 284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, 285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, 286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, 287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, 288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, 289 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M, 290 PCI_VENDOR_ID_LENOVO, 291 TG3PCI_SUBDEVICE_ID_LENOVO_5787M), 292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, 294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F), 295 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, 297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, 298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, 299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, 300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, 301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, 302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, 303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, 304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, 305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, 306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, 307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, 308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, 309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, 311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, 312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, 313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, 314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A), 316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 317 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 318 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B), 319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, 321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, 322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790), 323 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, 326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)}, 327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, 328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, 329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, 330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, 331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, 332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791), 333 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795), 335 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, 337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, 338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)}, 339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)}, 340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)}, 341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)}, 342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)}, 343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)}, 344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)}, 345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)}, 346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)}, 347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)}, 348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 349 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, 352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, 353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, 354 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, 355 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ 356 {} 357 }; 358 359 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); 360 361 static const struct { 362 const char string[ETH_GSTRING_LEN]; 363 } ethtool_stats_keys[] = { 364 { "rx_octets" }, 365 { "rx_fragments" }, 366 { "rx_ucast_packets" }, 367 { "rx_mcast_packets" }, 368 { "rx_bcast_packets" }, 369 { "rx_fcs_errors" }, 370 { "rx_align_errors" }, 371 { "rx_xon_pause_rcvd" }, 372 { "rx_xoff_pause_rcvd" }, 373 { "rx_mac_ctrl_rcvd" }, 374 { "rx_xoff_entered" }, 375 { "rx_frame_too_long_errors" }, 376 { "rx_jabbers" }, 377 { "rx_undersize_packets" }, 378 { "rx_in_length_errors" }, 379 { "rx_out_length_errors" }, 380 { "rx_64_or_less_octet_packets" }, 381 { "rx_65_to_127_octet_packets" }, 382 { "rx_128_to_255_octet_packets" }, 383 { "rx_256_to_511_octet_packets" }, 384 { "rx_512_to_1023_octet_packets" }, 385 { "rx_1024_to_1522_octet_packets" }, 386 { "rx_1523_to_2047_octet_packets" }, 387 { "rx_2048_to_4095_octet_packets" }, 388 { "rx_4096_to_8191_octet_packets" }, 389 { "rx_8192_to_9022_octet_packets" }, 390 391 { "tx_octets" }, 392 { "tx_collisions" }, 393 394 { "tx_xon_sent" }, 395 { "tx_xoff_sent" }, 396 { "tx_flow_control" }, 397 { "tx_mac_errors" }, 398 { "tx_single_collisions" }, 399 { "tx_mult_collisions" }, 400 { "tx_deferred" }, 401 { "tx_excessive_collisions" }, 402 { "tx_late_collisions" }, 403 { "tx_collide_2times" }, 404 { "tx_collide_3times" }, 405 { "tx_collide_4times" }, 406 { "tx_collide_5times" }, 407 { "tx_collide_6times" }, 408 { "tx_collide_7times" }, 409 { "tx_collide_8times" }, 410 { "tx_collide_9times" }, 411 { "tx_collide_10times" }, 412 { "tx_collide_11times" }, 413 { "tx_collide_12times" }, 414 { "tx_collide_13times" }, 415 { "tx_collide_14times" }, 416 { "tx_collide_15times" }, 417 { "tx_ucast_packets" }, 418 { "tx_mcast_packets" }, 419 { "tx_bcast_packets" }, 420 { "tx_carrier_sense_errors" }, 421 { "tx_discards" }, 422 { "tx_errors" }, 423 424 { "dma_writeq_full" }, 425 { "dma_write_prioq_full" }, 426 { "rxbds_empty" }, 427 { "rx_discards" }, 428 { "rx_errors" }, 429 { "rx_threshold_hit" }, 430 431 { "dma_readq_full" }, 432 { "dma_read_prioq_full" }, 433 { "tx_comp_queue_full" }, 434 435 { "ring_set_send_prod_index" }, 436 { "ring_status_update" }, 437 { "nic_irqs" }, 438 { "nic_avoided_irqs" }, 439 { "nic_tx_threshold_hit" }, 440 441 { "mbuf_lwm_thresh_hit" }, 442 }; 443 444 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) 445 #define TG3_NVRAM_TEST 0 446 #define TG3_LINK_TEST 1 447 #define TG3_REGISTER_TEST 2 448 #define TG3_MEMORY_TEST 3 449 #define TG3_MAC_LOOPB_TEST 4 450 #define TG3_PHY_LOOPB_TEST 5 451 #define TG3_EXT_LOOPB_TEST 6 452 #define TG3_INTERRUPT_TEST 7 453 454 455 static const struct { 456 const char string[ETH_GSTRING_LEN]; 457 } ethtool_test_keys[] = { 458 [TG3_NVRAM_TEST] = { "nvram test (online) " }, 459 [TG3_LINK_TEST] = { "link test (online) " }, 460 [TG3_REGISTER_TEST] = { "register test (offline)" }, 461 [TG3_MEMORY_TEST] = { "memory test (offline)" }, 462 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" }, 463 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" }, 464 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" }, 465 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" }, 466 }; 467 468 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) 469 470 471 static void tg3_write32(struct tg3 *tp, u32 off, u32 val) 472 { 473 writel(val, tp->regs + off); 474 } 475 476 static u32 tg3_read32(struct tg3 *tp, u32 off) 477 { 478 return readl(tp->regs + off); 479 } 480 481 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) 482 { 483 writel(val, tp->aperegs + off); 484 } 485 486 static u32 tg3_ape_read32(struct tg3 *tp, u32 off) 487 { 488 return readl(tp->aperegs + off); 489 } 490 491 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 492 { 493 unsigned long flags; 494 495 spin_lock_irqsave(&tp->indirect_lock, flags); 496 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 497 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 498 spin_unlock_irqrestore(&tp->indirect_lock, flags); 499 } 500 501 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) 502 { 503 writel(val, tp->regs + off); 504 readl(tp->regs + off); 505 } 506 507 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) 508 { 509 unsigned long flags; 510 u32 val; 511 512 spin_lock_irqsave(&tp->indirect_lock, flags); 513 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 514 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 515 spin_unlock_irqrestore(&tp->indirect_lock, flags); 516 return val; 517 } 518 519 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) 520 { 521 unsigned long flags; 522 523 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { 524 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + 525 TG3_64BIT_REG_LOW, val); 526 return; 527 } 528 if (off == TG3_RX_STD_PROD_IDX_REG) { 529 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + 530 TG3_64BIT_REG_LOW, val); 531 return; 532 } 533 534 spin_lock_irqsave(&tp->indirect_lock, flags); 535 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 536 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 537 spin_unlock_irqrestore(&tp->indirect_lock, flags); 538 539 /* In indirect mode when disabling interrupts, we also need 540 * to clear the interrupt bit in the GRC local ctrl register. 541 */ 542 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && 543 (val == 0x1)) { 544 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, 545 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); 546 } 547 } 548 549 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) 550 { 551 unsigned long flags; 552 u32 val; 553 554 spin_lock_irqsave(&tp->indirect_lock, flags); 555 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 556 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 557 spin_unlock_irqrestore(&tp->indirect_lock, flags); 558 return val; 559 } 560 561 /* usec_wait specifies the wait time in usec when writing to certain registers 562 * where it is unsafe to read back the register without some delay. 563 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. 564 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. 565 */ 566 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) 567 { 568 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) 569 /* Non-posted methods */ 570 tp->write32(tp, off, val); 571 else { 572 /* Posted method */ 573 tg3_write32(tp, off, val); 574 if (usec_wait) 575 udelay(usec_wait); 576 tp->read32(tp, off); 577 } 578 /* Wait again after the read for the posted method to guarantee that 579 * the wait time is met. 580 */ 581 if (usec_wait) 582 udelay(usec_wait); 583 } 584 585 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) 586 { 587 tp->write32_mbox(tp, off, val); 588 if (tg3_flag(tp, FLUSH_POSTED_WRITES) || 589 (!tg3_flag(tp, MBOX_WRITE_REORDER) && 590 !tg3_flag(tp, ICH_WORKAROUND))) 591 tp->read32_mbox(tp, off); 592 } 593 594 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) 595 { 596 void __iomem *mbox = tp->regs + off; 597 writel(val, mbox); 598 if (tg3_flag(tp, TXD_MBOX_HWBUG)) 599 writel(val, mbox); 600 if (tg3_flag(tp, MBOX_WRITE_REORDER) || 601 tg3_flag(tp, FLUSH_POSTED_WRITES)) 602 readl(mbox); 603 } 604 605 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) 606 { 607 return readl(tp->regs + off + GRCMBOX_BASE); 608 } 609 610 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) 611 { 612 writel(val, tp->regs + off + GRCMBOX_BASE); 613 } 614 615 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) 616 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) 617 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) 618 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) 619 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg) 620 621 #define tw32(reg, val) tp->write32(tp, reg, val) 622 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) 623 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) 624 #define tr32(reg) tp->read32(tp, reg) 625 626 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 627 { 628 unsigned long flags; 629 630 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 631 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) 632 return; 633 634 spin_lock_irqsave(&tp->indirect_lock, flags); 635 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 637 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 638 639 /* Always leave this as zero. */ 640 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 641 } else { 642 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 643 tw32_f(TG3PCI_MEM_WIN_DATA, val); 644 645 /* Always leave this as zero. */ 646 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 647 } 648 spin_unlock_irqrestore(&tp->indirect_lock, flags); 649 } 650 651 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 652 { 653 unsigned long flags; 654 655 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 656 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { 657 *val = 0; 658 return; 659 } 660 661 spin_lock_irqsave(&tp->indirect_lock, flags); 662 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 663 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 664 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 665 666 /* Always leave this as zero. */ 667 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 668 } else { 669 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 670 *val = tr32(TG3PCI_MEM_WIN_DATA); 671 672 /* Always leave this as zero. */ 673 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 674 } 675 spin_unlock_irqrestore(&tp->indirect_lock, flags); 676 } 677 678 static void tg3_ape_lock_init(struct tg3 *tp) 679 { 680 int i; 681 u32 regbase, bit; 682 683 if (tg3_asic_rev(tp) == ASIC_REV_5761) 684 regbase = TG3_APE_LOCK_GRANT; 685 else 686 regbase = TG3_APE_PER_LOCK_GRANT; 687 688 /* Make sure the driver hasn't any stale locks. */ 689 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) { 690 switch (i) { 691 case TG3_APE_LOCK_PHY0: 692 case TG3_APE_LOCK_PHY1: 693 case TG3_APE_LOCK_PHY2: 694 case TG3_APE_LOCK_PHY3: 695 bit = APE_LOCK_GRANT_DRIVER; 696 break; 697 default: 698 if (!tp->pci_fn) 699 bit = APE_LOCK_GRANT_DRIVER; 700 else 701 bit = 1 << tp->pci_fn; 702 } 703 tg3_ape_write32(tp, regbase + 4 * i, bit); 704 } 705 706 } 707 708 static int tg3_ape_lock(struct tg3 *tp, int locknum) 709 { 710 int i, off; 711 int ret = 0; 712 u32 status, req, gnt, bit; 713 714 if (!tg3_flag(tp, ENABLE_APE)) 715 return 0; 716 717 switch (locknum) { 718 case TG3_APE_LOCK_GPIO: 719 if (tg3_asic_rev(tp) == ASIC_REV_5761) 720 return 0; 721 case TG3_APE_LOCK_GRC: 722 case TG3_APE_LOCK_MEM: 723 if (!tp->pci_fn) 724 bit = APE_LOCK_REQ_DRIVER; 725 else 726 bit = 1 << tp->pci_fn; 727 break; 728 case TG3_APE_LOCK_PHY0: 729 case TG3_APE_LOCK_PHY1: 730 case TG3_APE_LOCK_PHY2: 731 case TG3_APE_LOCK_PHY3: 732 bit = APE_LOCK_REQ_DRIVER; 733 break; 734 default: 735 return -EINVAL; 736 } 737 738 if (tg3_asic_rev(tp) == ASIC_REV_5761) { 739 req = TG3_APE_LOCK_REQ; 740 gnt = TG3_APE_LOCK_GRANT; 741 } else { 742 req = TG3_APE_PER_LOCK_REQ; 743 gnt = TG3_APE_PER_LOCK_GRANT; 744 } 745 746 off = 4 * locknum; 747 748 tg3_ape_write32(tp, req + off, bit); 749 750 /* Wait for up to 1 millisecond to acquire lock. */ 751 for (i = 0; i < 100; i++) { 752 status = tg3_ape_read32(tp, gnt + off); 753 if (status == bit) 754 break; 755 if (pci_channel_offline(tp->pdev)) 756 break; 757 758 udelay(10); 759 } 760 761 if (status != bit) { 762 /* Revoke the lock request. */ 763 tg3_ape_write32(tp, gnt + off, bit); 764 ret = -EBUSY; 765 } 766 767 return ret; 768 } 769 770 static void tg3_ape_unlock(struct tg3 *tp, int locknum) 771 { 772 u32 gnt, bit; 773 774 if (!tg3_flag(tp, ENABLE_APE)) 775 return; 776 777 switch (locknum) { 778 case TG3_APE_LOCK_GPIO: 779 if (tg3_asic_rev(tp) == ASIC_REV_5761) 780 return; 781 case TG3_APE_LOCK_GRC: 782 case TG3_APE_LOCK_MEM: 783 if (!tp->pci_fn) 784 bit = APE_LOCK_GRANT_DRIVER; 785 else 786 bit = 1 << tp->pci_fn; 787 break; 788 case TG3_APE_LOCK_PHY0: 789 case TG3_APE_LOCK_PHY1: 790 case TG3_APE_LOCK_PHY2: 791 case TG3_APE_LOCK_PHY3: 792 bit = APE_LOCK_GRANT_DRIVER; 793 break; 794 default: 795 return; 796 } 797 798 if (tg3_asic_rev(tp) == ASIC_REV_5761) 799 gnt = TG3_APE_LOCK_GRANT; 800 else 801 gnt = TG3_APE_PER_LOCK_GRANT; 802 803 tg3_ape_write32(tp, gnt + 4 * locknum, bit); 804 } 805 806 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us) 807 { 808 u32 apedata; 809 810 while (timeout_us) { 811 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) 812 return -EBUSY; 813 814 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 815 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 816 break; 817 818 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 819 820 udelay(10); 821 timeout_us -= (timeout_us > 10) ? 10 : timeout_us; 822 } 823 824 return timeout_us ? 0 : -EBUSY; 825 } 826 827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us) 828 { 829 u32 i, apedata; 830 831 for (i = 0; i < timeout_us / 10; i++) { 832 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 833 834 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 835 break; 836 837 udelay(10); 838 } 839 840 return i == timeout_us / 10; 841 } 842 843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, 844 u32 len) 845 { 846 int err; 847 u32 i, bufoff, msgoff, maxlen, apedata; 848 849 if (!tg3_flag(tp, APE_HAS_NCSI)) 850 return 0; 851 852 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 853 if (apedata != APE_SEG_SIG_MAGIC) 854 return -ENODEV; 855 856 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 857 if (!(apedata & APE_FW_STATUS_READY)) 858 return -EAGAIN; 859 860 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) + 861 TG3_APE_SHMEM_BASE; 862 msgoff = bufoff + 2 * sizeof(u32); 863 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN); 864 865 while (len) { 866 u32 length; 867 868 /* Cap xfer sizes to scratchpad limits. */ 869 length = (len > maxlen) ? maxlen : len; 870 len -= length; 871 872 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 873 if (!(apedata & APE_FW_STATUS_READY)) 874 return -EAGAIN; 875 876 /* Wait for up to 1 msec for APE to service previous event. */ 877 err = tg3_ape_event_lock(tp, 1000); 878 if (err) 879 return err; 880 881 apedata = APE_EVENT_STATUS_DRIVER_EVNT | 882 APE_EVENT_STATUS_SCRTCHPD_READ | 883 APE_EVENT_STATUS_EVENT_PENDING; 884 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata); 885 886 tg3_ape_write32(tp, bufoff, base_off); 887 tg3_ape_write32(tp, bufoff + sizeof(u32), length); 888 889 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 890 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 891 892 base_off += length; 893 894 if (tg3_ape_wait_for_event(tp, 30000)) 895 return -EAGAIN; 896 897 for (i = 0; length; i += 4, length -= 4) { 898 u32 val = tg3_ape_read32(tp, msgoff + i); 899 memcpy(data, &val, sizeof(u32)); 900 data++; 901 } 902 } 903 904 return 0; 905 } 906 907 static int tg3_ape_send_event(struct tg3 *tp, u32 event) 908 { 909 int err; 910 u32 apedata; 911 912 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 913 if (apedata != APE_SEG_SIG_MAGIC) 914 return -EAGAIN; 915 916 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 917 if (!(apedata & APE_FW_STATUS_READY)) 918 return -EAGAIN; 919 920 /* Wait for up to 1 millisecond for APE to service previous event. */ 921 err = tg3_ape_event_lock(tp, 1000); 922 if (err) 923 return err; 924 925 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, 926 event | APE_EVENT_STATUS_EVENT_PENDING); 927 928 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 929 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 930 931 return 0; 932 } 933 934 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) 935 { 936 u32 event; 937 u32 apedata; 938 939 if (!tg3_flag(tp, ENABLE_APE)) 940 return; 941 942 switch (kind) { 943 case RESET_KIND_INIT: 944 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 945 APE_HOST_SEG_SIG_MAGIC); 946 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, 947 APE_HOST_SEG_LEN_MAGIC); 948 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); 949 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); 950 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, 951 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); 952 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, 953 APE_HOST_BEHAV_NO_PHYLOCK); 954 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, 955 TG3_APE_HOST_DRVR_STATE_START); 956 957 event = APE_EVENT_STATUS_STATE_START; 958 break; 959 case RESET_KIND_SHUTDOWN: 960 /* With the interface we are currently using, 961 * APE does not track driver state. Wiping 962 * out the HOST SEGMENT SIGNATURE forces 963 * the APE to assume OS absent status. 964 */ 965 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); 966 967 if (device_may_wakeup(&tp->pdev->dev) && 968 tg3_flag(tp, WOL_ENABLE)) { 969 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, 970 TG3_APE_HOST_WOL_SPEED_AUTO); 971 apedata = TG3_APE_HOST_DRVR_STATE_WOL; 972 } else 973 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; 974 975 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); 976 977 event = APE_EVENT_STATUS_STATE_UNLOAD; 978 break; 979 default: 980 return; 981 } 982 983 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; 984 985 tg3_ape_send_event(tp, event); 986 } 987 988 static void tg3_disable_ints(struct tg3 *tp) 989 { 990 int i; 991 992 tw32(TG3PCI_MISC_HOST_CTRL, 993 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); 994 for (i = 0; i < tp->irq_max; i++) 995 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); 996 } 997 998 static void tg3_enable_ints(struct tg3 *tp) 999 { 1000 int i; 1001 1002 tp->irq_sync = 0; 1003 wmb(); 1004 1005 tw32(TG3PCI_MISC_HOST_CTRL, 1006 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 1007 1008 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; 1009 for (i = 0; i < tp->irq_cnt; i++) { 1010 struct tg3_napi *tnapi = &tp->napi[i]; 1011 1012 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1013 if (tg3_flag(tp, 1SHOT_MSI)) 1014 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1015 1016 tp->coal_now |= tnapi->coal_now; 1017 } 1018 1019 /* Force an initial interrupt */ 1020 if (!tg3_flag(tp, TAGGED_STATUS) && 1021 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) 1022 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 1023 else 1024 tw32(HOSTCC_MODE, tp->coal_now); 1025 1026 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); 1027 } 1028 1029 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) 1030 { 1031 struct tg3 *tp = tnapi->tp; 1032 struct tg3_hw_status *sblk = tnapi->hw_status; 1033 unsigned int work_exists = 0; 1034 1035 /* check for phy events */ 1036 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 1037 if (sblk->status & SD_STATUS_LINK_CHG) 1038 work_exists = 1; 1039 } 1040 1041 /* check for TX work to do */ 1042 if (sblk->idx[0].tx_consumer != tnapi->tx_cons) 1043 work_exists = 1; 1044 1045 /* check for RX work to do */ 1046 if (tnapi->rx_rcb_prod_idx && 1047 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 1048 work_exists = 1; 1049 1050 return work_exists; 1051 } 1052 1053 /* tg3_int_reenable 1054 * similar to tg3_enable_ints, but it accurately determines whether there 1055 * is new work pending and can return without flushing the PIO write 1056 * which reenables interrupts 1057 */ 1058 static void tg3_int_reenable(struct tg3_napi *tnapi) 1059 { 1060 struct tg3 *tp = tnapi->tp; 1061 1062 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 1063 mmiowb(); 1064 1065 /* When doing tagged status, this work check is unnecessary. 1066 * The last_tag we write above tells the chip which piece of 1067 * work we've completed. 1068 */ 1069 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) 1070 tw32(HOSTCC_MODE, tp->coalesce_mode | 1071 HOSTCC_MODE_ENABLE | tnapi->coal_now); 1072 } 1073 1074 static void tg3_switch_clocks(struct tg3 *tp) 1075 { 1076 u32 clock_ctrl; 1077 u32 orig_clock_ctrl; 1078 1079 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) 1080 return; 1081 1082 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); 1083 1084 orig_clock_ctrl = clock_ctrl; 1085 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | 1086 CLOCK_CTRL_CLKRUN_OENABLE | 1087 0x1f); 1088 tp->pci_clock_ctrl = clock_ctrl; 1089 1090 if (tg3_flag(tp, 5705_PLUS)) { 1091 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { 1092 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1093 clock_ctrl | CLOCK_CTRL_625_CORE, 40); 1094 } 1095 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { 1096 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1097 clock_ctrl | 1098 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), 1099 40); 1100 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1101 clock_ctrl | (CLOCK_CTRL_ALTCLK), 1102 40); 1103 } 1104 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); 1105 } 1106 1107 #define PHY_BUSY_LOOPS 5000 1108 1109 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg, 1110 u32 *val) 1111 { 1112 u32 frame_val; 1113 unsigned int loops; 1114 int ret; 1115 1116 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1117 tw32_f(MAC_MI_MODE, 1118 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1119 udelay(80); 1120 } 1121 1122 tg3_ape_lock(tp, tp->phy_ape_lock); 1123 1124 *val = 0x0; 1125 1126 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1127 MI_COM_PHY_ADDR_MASK); 1128 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1129 MI_COM_REG_ADDR_MASK); 1130 frame_val |= (MI_COM_CMD_READ | MI_COM_START); 1131 1132 tw32_f(MAC_MI_COM, frame_val); 1133 1134 loops = PHY_BUSY_LOOPS; 1135 while (loops != 0) { 1136 udelay(10); 1137 frame_val = tr32(MAC_MI_COM); 1138 1139 if ((frame_val & MI_COM_BUSY) == 0) { 1140 udelay(5); 1141 frame_val = tr32(MAC_MI_COM); 1142 break; 1143 } 1144 loops -= 1; 1145 } 1146 1147 ret = -EBUSY; 1148 if (loops != 0) { 1149 *val = frame_val & MI_COM_DATA_MASK; 1150 ret = 0; 1151 } 1152 1153 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1154 tw32_f(MAC_MI_MODE, tp->mi_mode); 1155 udelay(80); 1156 } 1157 1158 tg3_ape_unlock(tp, tp->phy_ape_lock); 1159 1160 return ret; 1161 } 1162 1163 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) 1164 { 1165 return __tg3_readphy(tp, tp->phy_addr, reg, val); 1166 } 1167 1168 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg, 1169 u32 val) 1170 { 1171 u32 frame_val; 1172 unsigned int loops; 1173 int ret; 1174 1175 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 1176 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) 1177 return 0; 1178 1179 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1180 tw32_f(MAC_MI_MODE, 1181 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1182 udelay(80); 1183 } 1184 1185 tg3_ape_lock(tp, tp->phy_ape_lock); 1186 1187 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1188 MI_COM_PHY_ADDR_MASK); 1189 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1190 MI_COM_REG_ADDR_MASK); 1191 frame_val |= (val & MI_COM_DATA_MASK); 1192 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); 1193 1194 tw32_f(MAC_MI_COM, frame_val); 1195 1196 loops = PHY_BUSY_LOOPS; 1197 while (loops != 0) { 1198 udelay(10); 1199 frame_val = tr32(MAC_MI_COM); 1200 if ((frame_val & MI_COM_BUSY) == 0) { 1201 udelay(5); 1202 frame_val = tr32(MAC_MI_COM); 1203 break; 1204 } 1205 loops -= 1; 1206 } 1207 1208 ret = -EBUSY; 1209 if (loops != 0) 1210 ret = 0; 1211 1212 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1213 tw32_f(MAC_MI_MODE, tp->mi_mode); 1214 udelay(80); 1215 } 1216 1217 tg3_ape_unlock(tp, tp->phy_ape_lock); 1218 1219 return ret; 1220 } 1221 1222 static int tg3_writephy(struct tg3 *tp, int reg, u32 val) 1223 { 1224 return __tg3_writephy(tp, tp->phy_addr, reg, val); 1225 } 1226 1227 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) 1228 { 1229 int err; 1230 1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1232 if (err) 1233 goto done; 1234 1235 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1236 if (err) 1237 goto done; 1238 1239 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1240 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1241 if (err) 1242 goto done; 1243 1244 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); 1245 1246 done: 1247 return err; 1248 } 1249 1250 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) 1251 { 1252 int err; 1253 1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1255 if (err) 1256 goto done; 1257 1258 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1259 if (err) 1260 goto done; 1261 1262 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1263 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1264 if (err) 1265 goto done; 1266 1267 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); 1268 1269 done: 1270 return err; 1271 } 1272 1273 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) 1274 { 1275 int err; 1276 1277 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1278 if (!err) 1279 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); 1280 1281 return err; 1282 } 1283 1284 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) 1285 { 1286 int err; 1287 1288 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1289 if (!err) 1290 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); 1291 1292 return err; 1293 } 1294 1295 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) 1296 { 1297 int err; 1298 1299 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 1300 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | 1301 MII_TG3_AUXCTL_SHDWSEL_MISC); 1302 if (!err) 1303 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); 1304 1305 return err; 1306 } 1307 1308 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) 1309 { 1310 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) 1311 set |= MII_TG3_AUXCTL_MISC_WREN; 1312 1313 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); 1314 } 1315 1316 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) 1317 { 1318 u32 val; 1319 int err; 1320 1321 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 1322 1323 if (err) 1324 return err; 1325 1326 if (enable) 1327 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1328 else 1329 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1330 1331 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 1332 val | MII_TG3_AUXCTL_ACTL_TX_6DB); 1333 1334 return err; 1335 } 1336 1337 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val) 1338 { 1339 return tg3_writephy(tp, MII_TG3_MISC_SHDW, 1340 reg | val | MII_TG3_MISC_SHDW_WREN); 1341 } 1342 1343 static int tg3_bmcr_reset(struct tg3 *tp) 1344 { 1345 u32 phy_control; 1346 int limit, err; 1347 1348 /* OK, reset it, and poll the BMCR_RESET bit until it 1349 * clears or we time out. 1350 */ 1351 phy_control = BMCR_RESET; 1352 err = tg3_writephy(tp, MII_BMCR, phy_control); 1353 if (err != 0) 1354 return -EBUSY; 1355 1356 limit = 5000; 1357 while (limit--) { 1358 err = tg3_readphy(tp, MII_BMCR, &phy_control); 1359 if (err != 0) 1360 return -EBUSY; 1361 1362 if ((phy_control & BMCR_RESET) == 0) { 1363 udelay(40); 1364 break; 1365 } 1366 udelay(10); 1367 } 1368 if (limit < 0) 1369 return -EBUSY; 1370 1371 return 0; 1372 } 1373 1374 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) 1375 { 1376 struct tg3 *tp = bp->priv; 1377 u32 val; 1378 1379 spin_lock_bh(&tp->lock); 1380 1381 if (__tg3_readphy(tp, mii_id, reg, &val)) 1382 val = -EIO; 1383 1384 spin_unlock_bh(&tp->lock); 1385 1386 return val; 1387 } 1388 1389 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) 1390 { 1391 struct tg3 *tp = bp->priv; 1392 u32 ret = 0; 1393 1394 spin_lock_bh(&tp->lock); 1395 1396 if (__tg3_writephy(tp, mii_id, reg, val)) 1397 ret = -EIO; 1398 1399 spin_unlock_bh(&tp->lock); 1400 1401 return ret; 1402 } 1403 1404 static void tg3_mdio_config_5785(struct tg3 *tp) 1405 { 1406 u32 val; 1407 struct phy_device *phydev; 1408 1409 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1410 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1411 case PHY_ID_BCM50610: 1412 case PHY_ID_BCM50610M: 1413 val = MAC_PHYCFG2_50610_LED_MODES; 1414 break; 1415 case PHY_ID_BCMAC131: 1416 val = MAC_PHYCFG2_AC131_LED_MODES; 1417 break; 1418 case PHY_ID_RTL8211C: 1419 val = MAC_PHYCFG2_RTL8211C_LED_MODES; 1420 break; 1421 case PHY_ID_RTL8201E: 1422 val = MAC_PHYCFG2_RTL8201E_LED_MODES; 1423 break; 1424 default: 1425 return; 1426 } 1427 1428 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { 1429 tw32(MAC_PHYCFG2, val); 1430 1431 val = tr32(MAC_PHYCFG1); 1432 val &= ~(MAC_PHYCFG1_RGMII_INT | 1433 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); 1434 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; 1435 tw32(MAC_PHYCFG1, val); 1436 1437 return; 1438 } 1439 1440 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) 1441 val |= MAC_PHYCFG2_EMODE_MASK_MASK | 1442 MAC_PHYCFG2_FMODE_MASK_MASK | 1443 MAC_PHYCFG2_GMODE_MASK_MASK | 1444 MAC_PHYCFG2_ACT_MASK_MASK | 1445 MAC_PHYCFG2_QUAL_MASK_MASK | 1446 MAC_PHYCFG2_INBAND_ENABLE; 1447 1448 tw32(MAC_PHYCFG2, val); 1449 1450 val = tr32(MAC_PHYCFG1); 1451 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | 1452 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); 1453 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1454 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1455 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; 1456 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1457 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; 1458 } 1459 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | 1460 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; 1461 tw32(MAC_PHYCFG1, val); 1462 1463 val = tr32(MAC_EXT_RGMII_MODE); 1464 val &= ~(MAC_RGMII_MODE_RX_INT_B | 1465 MAC_RGMII_MODE_RX_QUALITY | 1466 MAC_RGMII_MODE_RX_ACTIVITY | 1467 MAC_RGMII_MODE_RX_ENG_DET | 1468 MAC_RGMII_MODE_TX_ENABLE | 1469 MAC_RGMII_MODE_TX_LOWPWR | 1470 MAC_RGMII_MODE_TX_RESET); 1471 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1472 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1473 val |= MAC_RGMII_MODE_RX_INT_B | 1474 MAC_RGMII_MODE_RX_QUALITY | 1475 MAC_RGMII_MODE_RX_ACTIVITY | 1476 MAC_RGMII_MODE_RX_ENG_DET; 1477 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1478 val |= MAC_RGMII_MODE_TX_ENABLE | 1479 MAC_RGMII_MODE_TX_LOWPWR | 1480 MAC_RGMII_MODE_TX_RESET; 1481 } 1482 tw32(MAC_EXT_RGMII_MODE, val); 1483 } 1484 1485 static void tg3_mdio_start(struct tg3 *tp) 1486 { 1487 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; 1488 tw32_f(MAC_MI_MODE, tp->mi_mode); 1489 udelay(80); 1490 1491 if (tg3_flag(tp, MDIOBUS_INITED) && 1492 tg3_asic_rev(tp) == ASIC_REV_5785) 1493 tg3_mdio_config_5785(tp); 1494 } 1495 1496 static int tg3_mdio_init(struct tg3 *tp) 1497 { 1498 int i; 1499 u32 reg; 1500 struct phy_device *phydev; 1501 1502 if (tg3_flag(tp, 5717_PLUS)) { 1503 u32 is_serdes; 1504 1505 tp->phy_addr = tp->pci_fn + 1; 1506 1507 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) 1508 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; 1509 else 1510 is_serdes = tr32(TG3_CPMU_PHY_STRAP) & 1511 TG3_CPMU_PHY_STRAP_IS_SERDES; 1512 if (is_serdes) 1513 tp->phy_addr += 7; 1514 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) { 1515 int addr; 1516 1517 addr = ssb_gige_get_phyaddr(tp->pdev); 1518 if (addr < 0) 1519 return addr; 1520 tp->phy_addr = addr; 1521 } else 1522 tp->phy_addr = TG3_PHY_MII_ADDR; 1523 1524 tg3_mdio_start(tp); 1525 1526 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) 1527 return 0; 1528 1529 tp->mdio_bus = mdiobus_alloc(); 1530 if (tp->mdio_bus == NULL) 1531 return -ENOMEM; 1532 1533 tp->mdio_bus->name = "tg3 mdio bus"; 1534 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", 1535 (tp->pdev->bus->number << 8) | tp->pdev->devfn); 1536 tp->mdio_bus->priv = tp; 1537 tp->mdio_bus->parent = &tp->pdev->dev; 1538 tp->mdio_bus->read = &tg3_mdio_read; 1539 tp->mdio_bus->write = &tg3_mdio_write; 1540 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); 1541 1542 /* The bus registration will look for all the PHYs on the mdio bus. 1543 * Unfortunately, it does not ensure the PHY is powered up before 1544 * accessing the PHY ID registers. A chip reset is the 1545 * quickest way to bring the device back to an operational state.. 1546 */ 1547 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) 1548 tg3_bmcr_reset(tp); 1549 1550 i = mdiobus_register(tp->mdio_bus); 1551 if (i) { 1552 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); 1553 mdiobus_free(tp->mdio_bus); 1554 return i; 1555 } 1556 1557 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1558 1559 if (!phydev || !phydev->drv) { 1560 dev_warn(&tp->pdev->dev, "No PHY devices\n"); 1561 mdiobus_unregister(tp->mdio_bus); 1562 mdiobus_free(tp->mdio_bus); 1563 return -ENODEV; 1564 } 1565 1566 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1567 case PHY_ID_BCM57780: 1568 phydev->interface = PHY_INTERFACE_MODE_GMII; 1569 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1570 break; 1571 case PHY_ID_BCM50610: 1572 case PHY_ID_BCM50610M: 1573 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | 1574 PHY_BRCM_RX_REFCLK_UNUSED | 1575 PHY_BRCM_DIS_TXCRXC_NOENRGY | 1576 PHY_BRCM_AUTO_PWRDWN_ENABLE; 1577 if (tg3_flag(tp, RGMII_INBAND_DISABLE)) 1578 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; 1579 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1580 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; 1581 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1582 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; 1583 /* fallthru */ 1584 case PHY_ID_RTL8211C: 1585 phydev->interface = PHY_INTERFACE_MODE_RGMII; 1586 break; 1587 case PHY_ID_RTL8201E: 1588 case PHY_ID_BCMAC131: 1589 phydev->interface = PHY_INTERFACE_MODE_MII; 1590 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1591 tp->phy_flags |= TG3_PHYFLG_IS_FET; 1592 break; 1593 } 1594 1595 tg3_flag_set(tp, MDIOBUS_INITED); 1596 1597 if (tg3_asic_rev(tp) == ASIC_REV_5785) 1598 tg3_mdio_config_5785(tp); 1599 1600 return 0; 1601 } 1602 1603 static void tg3_mdio_fini(struct tg3 *tp) 1604 { 1605 if (tg3_flag(tp, MDIOBUS_INITED)) { 1606 tg3_flag_clear(tp, MDIOBUS_INITED); 1607 mdiobus_unregister(tp->mdio_bus); 1608 mdiobus_free(tp->mdio_bus); 1609 } 1610 } 1611 1612 /* tp->lock is held. */ 1613 static inline void tg3_generate_fw_event(struct tg3 *tp) 1614 { 1615 u32 val; 1616 1617 val = tr32(GRC_RX_CPU_EVENT); 1618 val |= GRC_RX_CPU_DRIVER_EVENT; 1619 tw32_f(GRC_RX_CPU_EVENT, val); 1620 1621 tp->last_event_jiffies = jiffies; 1622 } 1623 1624 #define TG3_FW_EVENT_TIMEOUT_USEC 2500 1625 1626 /* tp->lock is held. */ 1627 static void tg3_wait_for_event_ack(struct tg3 *tp) 1628 { 1629 int i; 1630 unsigned int delay_cnt; 1631 long time_remain; 1632 1633 /* If enough time has passed, no wait is necessary. */ 1634 time_remain = (long)(tp->last_event_jiffies + 1 + 1635 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - 1636 (long)jiffies; 1637 if (time_remain < 0) 1638 return; 1639 1640 /* Check if we can shorten the wait time. */ 1641 delay_cnt = jiffies_to_usecs(time_remain); 1642 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) 1643 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; 1644 delay_cnt = (delay_cnt >> 3) + 1; 1645 1646 for (i = 0; i < delay_cnt; i++) { 1647 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) 1648 break; 1649 if (pci_channel_offline(tp->pdev)) 1650 break; 1651 1652 udelay(8); 1653 } 1654 } 1655 1656 /* tp->lock is held. */ 1657 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data) 1658 { 1659 u32 reg, val; 1660 1661 val = 0; 1662 if (!tg3_readphy(tp, MII_BMCR, ®)) 1663 val = reg << 16; 1664 if (!tg3_readphy(tp, MII_BMSR, ®)) 1665 val |= (reg & 0xffff); 1666 *data++ = val; 1667 1668 val = 0; 1669 if (!tg3_readphy(tp, MII_ADVERTISE, ®)) 1670 val = reg << 16; 1671 if (!tg3_readphy(tp, MII_LPA, ®)) 1672 val |= (reg & 0xffff); 1673 *data++ = val; 1674 1675 val = 0; 1676 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { 1677 if (!tg3_readphy(tp, MII_CTRL1000, ®)) 1678 val = reg << 16; 1679 if (!tg3_readphy(tp, MII_STAT1000, ®)) 1680 val |= (reg & 0xffff); 1681 } 1682 *data++ = val; 1683 1684 if (!tg3_readphy(tp, MII_PHYADDR, ®)) 1685 val = reg << 16; 1686 else 1687 val = 0; 1688 *data++ = val; 1689 } 1690 1691 /* tp->lock is held. */ 1692 static void tg3_ump_link_report(struct tg3 *tp) 1693 { 1694 u32 data[4]; 1695 1696 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) 1697 return; 1698 1699 tg3_phy_gather_ump_data(tp, data); 1700 1701 tg3_wait_for_event_ack(tp); 1702 1703 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); 1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); 1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]); 1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]); 1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]); 1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]); 1709 1710 tg3_generate_fw_event(tp); 1711 } 1712 1713 /* tp->lock is held. */ 1714 static void tg3_stop_fw(struct tg3 *tp) 1715 { 1716 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 1717 /* Wait for RX cpu to ACK the previous event. */ 1718 tg3_wait_for_event_ack(tp); 1719 1720 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); 1721 1722 tg3_generate_fw_event(tp); 1723 1724 /* Wait for RX cpu to ACK this event. */ 1725 tg3_wait_for_event_ack(tp); 1726 } 1727 } 1728 1729 /* tp->lock is held. */ 1730 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) 1731 { 1732 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, 1733 NIC_SRAM_FIRMWARE_MBOX_MAGIC1); 1734 1735 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1736 switch (kind) { 1737 case RESET_KIND_INIT: 1738 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1739 DRV_STATE_START); 1740 break; 1741 1742 case RESET_KIND_SHUTDOWN: 1743 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1744 DRV_STATE_UNLOAD); 1745 break; 1746 1747 case RESET_KIND_SUSPEND: 1748 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1749 DRV_STATE_SUSPEND); 1750 break; 1751 1752 default: 1753 break; 1754 } 1755 } 1756 } 1757 1758 /* tp->lock is held. */ 1759 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) 1760 { 1761 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1762 switch (kind) { 1763 case RESET_KIND_INIT: 1764 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1765 DRV_STATE_START_DONE); 1766 break; 1767 1768 case RESET_KIND_SHUTDOWN: 1769 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1770 DRV_STATE_UNLOAD_DONE); 1771 break; 1772 1773 default: 1774 break; 1775 } 1776 } 1777 } 1778 1779 /* tp->lock is held. */ 1780 static void tg3_write_sig_legacy(struct tg3 *tp, int kind) 1781 { 1782 if (tg3_flag(tp, ENABLE_ASF)) { 1783 switch (kind) { 1784 case RESET_KIND_INIT: 1785 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1786 DRV_STATE_START); 1787 break; 1788 1789 case RESET_KIND_SHUTDOWN: 1790 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1791 DRV_STATE_UNLOAD); 1792 break; 1793 1794 case RESET_KIND_SUSPEND: 1795 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1796 DRV_STATE_SUSPEND); 1797 break; 1798 1799 default: 1800 break; 1801 } 1802 } 1803 } 1804 1805 static int tg3_poll_fw(struct tg3 *tp) 1806 { 1807 int i; 1808 u32 val; 1809 1810 if (tg3_flag(tp, NO_FWARE_REPORTED)) 1811 return 0; 1812 1813 if (tg3_flag(tp, IS_SSB_CORE)) { 1814 /* We don't use firmware. */ 1815 return 0; 1816 } 1817 1818 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 1819 /* Wait up to 20ms for init done. */ 1820 for (i = 0; i < 200; i++) { 1821 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) 1822 return 0; 1823 if (pci_channel_offline(tp->pdev)) 1824 return -ENODEV; 1825 1826 udelay(100); 1827 } 1828 return -ENODEV; 1829 } 1830 1831 /* Wait for firmware initialization to complete. */ 1832 for (i = 0; i < 100000; i++) { 1833 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); 1834 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 1835 break; 1836 if (pci_channel_offline(tp->pdev)) { 1837 if (!tg3_flag(tp, NO_FWARE_REPORTED)) { 1838 tg3_flag_set(tp, NO_FWARE_REPORTED); 1839 netdev_info(tp->dev, "No firmware running\n"); 1840 } 1841 1842 break; 1843 } 1844 1845 udelay(10); 1846 } 1847 1848 /* Chip might not be fitted with firmware. Some Sun onboard 1849 * parts are configured like that. So don't signal the timeout 1850 * of the above loop as an error, but do report the lack of 1851 * running firmware once. 1852 */ 1853 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { 1854 tg3_flag_set(tp, NO_FWARE_REPORTED); 1855 1856 netdev_info(tp->dev, "No firmware running\n"); 1857 } 1858 1859 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 1860 /* The 57765 A0 needs a little more 1861 * time to do some important work. 1862 */ 1863 mdelay(10); 1864 } 1865 1866 return 0; 1867 } 1868 1869 static void tg3_link_report(struct tg3 *tp) 1870 { 1871 if (!netif_carrier_ok(tp->dev)) { 1872 netif_info(tp, link, tp->dev, "Link is down\n"); 1873 tg3_ump_link_report(tp); 1874 } else if (netif_msg_link(tp)) { 1875 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", 1876 (tp->link_config.active_speed == SPEED_1000 ? 1877 1000 : 1878 (tp->link_config.active_speed == SPEED_100 ? 1879 100 : 10)), 1880 (tp->link_config.active_duplex == DUPLEX_FULL ? 1881 "full" : "half")); 1882 1883 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", 1884 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? 1885 "on" : "off", 1886 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? 1887 "on" : "off"); 1888 1889 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 1890 netdev_info(tp->dev, "EEE is %s\n", 1891 tp->setlpicnt ? "enabled" : "disabled"); 1892 1893 tg3_ump_link_report(tp); 1894 } 1895 1896 tp->link_up = netif_carrier_ok(tp->dev); 1897 } 1898 1899 static u32 tg3_decode_flowctrl_1000T(u32 adv) 1900 { 1901 u32 flowctrl = 0; 1902 1903 if (adv & ADVERTISE_PAUSE_CAP) { 1904 flowctrl |= FLOW_CTRL_RX; 1905 if (!(adv & ADVERTISE_PAUSE_ASYM)) 1906 flowctrl |= FLOW_CTRL_TX; 1907 } else if (adv & ADVERTISE_PAUSE_ASYM) 1908 flowctrl |= FLOW_CTRL_TX; 1909 1910 return flowctrl; 1911 } 1912 1913 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) 1914 { 1915 u16 miireg; 1916 1917 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) 1918 miireg = ADVERTISE_1000XPAUSE; 1919 else if (flow_ctrl & FLOW_CTRL_TX) 1920 miireg = ADVERTISE_1000XPSE_ASYM; 1921 else if (flow_ctrl & FLOW_CTRL_RX) 1922 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; 1923 else 1924 miireg = 0; 1925 1926 return miireg; 1927 } 1928 1929 static u32 tg3_decode_flowctrl_1000X(u32 adv) 1930 { 1931 u32 flowctrl = 0; 1932 1933 if (adv & ADVERTISE_1000XPAUSE) { 1934 flowctrl |= FLOW_CTRL_RX; 1935 if (!(adv & ADVERTISE_1000XPSE_ASYM)) 1936 flowctrl |= FLOW_CTRL_TX; 1937 } else if (adv & ADVERTISE_1000XPSE_ASYM) 1938 flowctrl |= FLOW_CTRL_TX; 1939 1940 return flowctrl; 1941 } 1942 1943 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) 1944 { 1945 u8 cap = 0; 1946 1947 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) { 1948 cap = FLOW_CTRL_TX | FLOW_CTRL_RX; 1949 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) { 1950 if (lcladv & ADVERTISE_1000XPAUSE) 1951 cap = FLOW_CTRL_RX; 1952 if (rmtadv & ADVERTISE_1000XPAUSE) 1953 cap = FLOW_CTRL_TX; 1954 } 1955 1956 return cap; 1957 } 1958 1959 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) 1960 { 1961 u8 autoneg; 1962 u8 flowctrl = 0; 1963 u32 old_rx_mode = tp->rx_mode; 1964 u32 old_tx_mode = tp->tx_mode; 1965 1966 if (tg3_flag(tp, USE_PHYLIB)) 1967 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg; 1968 else 1969 autoneg = tp->link_config.autoneg; 1970 1971 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { 1972 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 1973 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); 1974 else 1975 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 1976 } else 1977 flowctrl = tp->link_config.flowctrl; 1978 1979 tp->link_config.active_flowctrl = flowctrl; 1980 1981 if (flowctrl & FLOW_CTRL_RX) 1982 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; 1983 else 1984 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; 1985 1986 if (old_rx_mode != tp->rx_mode) 1987 tw32_f(MAC_RX_MODE, tp->rx_mode); 1988 1989 if (flowctrl & FLOW_CTRL_TX) 1990 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; 1991 else 1992 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; 1993 1994 if (old_tx_mode != tp->tx_mode) 1995 tw32_f(MAC_TX_MODE, tp->tx_mode); 1996 } 1997 1998 static void tg3_adjust_link(struct net_device *dev) 1999 { 2000 u8 oldflowctrl, linkmesg = 0; 2001 u32 mac_mode, lcl_adv, rmt_adv; 2002 struct tg3 *tp = netdev_priv(dev); 2003 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2004 2005 spin_lock_bh(&tp->lock); 2006 2007 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | 2008 MAC_MODE_HALF_DUPLEX); 2009 2010 oldflowctrl = tp->link_config.active_flowctrl; 2011 2012 if (phydev->link) { 2013 lcl_adv = 0; 2014 rmt_adv = 0; 2015 2016 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) 2017 mac_mode |= MAC_MODE_PORT_MODE_MII; 2018 else if (phydev->speed == SPEED_1000 || 2019 tg3_asic_rev(tp) != ASIC_REV_5785) 2020 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2021 else 2022 mac_mode |= MAC_MODE_PORT_MODE_MII; 2023 2024 if (phydev->duplex == DUPLEX_HALF) 2025 mac_mode |= MAC_MODE_HALF_DUPLEX; 2026 else { 2027 lcl_adv = mii_advertise_flowctrl( 2028 tp->link_config.flowctrl); 2029 2030 if (phydev->pause) 2031 rmt_adv = LPA_PAUSE_CAP; 2032 if (phydev->asym_pause) 2033 rmt_adv |= LPA_PAUSE_ASYM; 2034 } 2035 2036 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 2037 } else 2038 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2039 2040 if (mac_mode != tp->mac_mode) { 2041 tp->mac_mode = mac_mode; 2042 tw32_f(MAC_MODE, tp->mac_mode); 2043 udelay(40); 2044 } 2045 2046 if (tg3_asic_rev(tp) == ASIC_REV_5785) { 2047 if (phydev->speed == SPEED_10) 2048 tw32(MAC_MI_STAT, 2049 MAC_MI_STAT_10MBPS_MODE | 2050 MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2051 else 2052 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2053 } 2054 2055 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) 2056 tw32(MAC_TX_LENGTHS, 2057 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2058 (6 << TX_LENGTHS_IPG_SHIFT) | 2059 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); 2060 else 2061 tw32(MAC_TX_LENGTHS, 2062 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2063 (6 << TX_LENGTHS_IPG_SHIFT) | 2064 (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); 2065 2066 if (phydev->link != tp->old_link || 2067 phydev->speed != tp->link_config.active_speed || 2068 phydev->duplex != tp->link_config.active_duplex || 2069 oldflowctrl != tp->link_config.active_flowctrl) 2070 linkmesg = 1; 2071 2072 tp->old_link = phydev->link; 2073 tp->link_config.active_speed = phydev->speed; 2074 tp->link_config.active_duplex = phydev->duplex; 2075 2076 spin_unlock_bh(&tp->lock); 2077 2078 if (linkmesg) 2079 tg3_link_report(tp); 2080 } 2081 2082 static int tg3_phy_init(struct tg3 *tp) 2083 { 2084 struct phy_device *phydev; 2085 2086 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) 2087 return 0; 2088 2089 /* Bring the PHY back to a known state. */ 2090 tg3_bmcr_reset(tp); 2091 2092 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2093 2094 /* Attach the MAC to the PHY. */ 2095 phydev = phy_connect(tp->dev, phydev_name(phydev), 2096 tg3_adjust_link, phydev->interface); 2097 if (IS_ERR(phydev)) { 2098 dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); 2099 return PTR_ERR(phydev); 2100 } 2101 2102 /* Mask with MAC supported features. */ 2103 switch (phydev->interface) { 2104 case PHY_INTERFACE_MODE_GMII: 2105 case PHY_INTERFACE_MODE_RGMII: 2106 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 2107 phydev->supported &= (PHY_GBIT_FEATURES | 2108 SUPPORTED_Pause | 2109 SUPPORTED_Asym_Pause); 2110 break; 2111 } 2112 /* fallthru */ 2113 case PHY_INTERFACE_MODE_MII: 2114 phydev->supported &= (PHY_BASIC_FEATURES | 2115 SUPPORTED_Pause | 2116 SUPPORTED_Asym_Pause); 2117 break; 2118 default: 2119 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2120 return -EINVAL; 2121 } 2122 2123 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; 2124 2125 phydev->advertising = phydev->supported; 2126 2127 phy_attached_info(phydev); 2128 2129 return 0; 2130 } 2131 2132 static void tg3_phy_start(struct tg3 *tp) 2133 { 2134 struct phy_device *phydev; 2135 2136 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2137 return; 2138 2139 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2140 2141 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 2142 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 2143 phydev->speed = tp->link_config.speed; 2144 phydev->duplex = tp->link_config.duplex; 2145 phydev->autoneg = tp->link_config.autoneg; 2146 phydev->advertising = tp->link_config.advertising; 2147 } 2148 2149 phy_start(phydev); 2150 2151 phy_start_aneg(phydev); 2152 } 2153 2154 static void tg3_phy_stop(struct tg3 *tp) 2155 { 2156 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2157 return; 2158 2159 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2160 } 2161 2162 static void tg3_phy_fini(struct tg3 *tp) 2163 { 2164 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 2165 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2166 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; 2167 } 2168 } 2169 2170 static int tg3_phy_set_extloopbk(struct tg3 *tp) 2171 { 2172 int err; 2173 u32 val; 2174 2175 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 2176 return 0; 2177 2178 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2179 /* Cannot do read-modify-write on 5401 */ 2180 err = tg3_phy_auxctl_write(tp, 2181 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2182 MII_TG3_AUXCTL_ACTL_EXTLOOPBK | 2183 0x4c20); 2184 goto done; 2185 } 2186 2187 err = tg3_phy_auxctl_read(tp, 2188 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2189 if (err) 2190 return err; 2191 2192 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK; 2193 err = tg3_phy_auxctl_write(tp, 2194 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val); 2195 2196 done: 2197 return err; 2198 } 2199 2200 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) 2201 { 2202 u32 phytest; 2203 2204 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 2205 u32 phy; 2206 2207 tg3_writephy(tp, MII_TG3_FET_TEST, 2208 phytest | MII_TG3_FET_SHADOW_EN); 2209 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { 2210 if (enable) 2211 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; 2212 else 2213 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; 2214 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); 2215 } 2216 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 2217 } 2218 } 2219 2220 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) 2221 { 2222 u32 reg; 2223 2224 if (!tg3_flag(tp, 5705_PLUS) || 2225 (tg3_flag(tp, 5717_PLUS) && 2226 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) 2227 return; 2228 2229 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2230 tg3_phy_fet_toggle_apd(tp, enable); 2231 return; 2232 } 2233 2234 reg = MII_TG3_MISC_SHDW_SCR5_LPED | 2235 MII_TG3_MISC_SHDW_SCR5_DLPTLM | 2236 MII_TG3_MISC_SHDW_SCR5_SDTL | 2237 MII_TG3_MISC_SHDW_SCR5_C125OE; 2238 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable) 2239 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; 2240 2241 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg); 2242 2243 2244 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS; 2245 if (enable) 2246 reg |= MII_TG3_MISC_SHDW_APD_ENABLE; 2247 2248 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg); 2249 } 2250 2251 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable) 2252 { 2253 u32 phy; 2254 2255 if (!tg3_flag(tp, 5705_PLUS) || 2256 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 2257 return; 2258 2259 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2260 u32 ephy; 2261 2262 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { 2263 u32 reg = MII_TG3_FET_SHDW_MISCCTRL; 2264 2265 tg3_writephy(tp, MII_TG3_FET_TEST, 2266 ephy | MII_TG3_FET_SHADOW_EN); 2267 if (!tg3_readphy(tp, reg, &phy)) { 2268 if (enable) 2269 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2270 else 2271 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2272 tg3_writephy(tp, reg, phy); 2273 } 2274 tg3_writephy(tp, MII_TG3_FET_TEST, ephy); 2275 } 2276 } else { 2277 int ret; 2278 2279 ret = tg3_phy_auxctl_read(tp, 2280 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); 2281 if (!ret) { 2282 if (enable) 2283 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2284 else 2285 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2286 tg3_phy_auxctl_write(tp, 2287 MII_TG3_AUXCTL_SHDWSEL_MISC, phy); 2288 } 2289 } 2290 } 2291 2292 static void tg3_phy_set_wirespeed(struct tg3 *tp) 2293 { 2294 int ret; 2295 u32 val; 2296 2297 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) 2298 return; 2299 2300 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); 2301 if (!ret) 2302 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, 2303 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); 2304 } 2305 2306 static void tg3_phy_apply_otp(struct tg3 *tp) 2307 { 2308 u32 otp, phy; 2309 2310 if (!tp->phy_otp) 2311 return; 2312 2313 otp = tp->phy_otp; 2314 2315 if (tg3_phy_toggle_auxctl_smdsp(tp, true)) 2316 return; 2317 2318 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); 2319 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; 2320 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); 2321 2322 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | 2323 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); 2324 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); 2325 2326 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); 2327 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; 2328 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); 2329 2330 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); 2331 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); 2332 2333 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); 2334 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); 2335 2336 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | 2337 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); 2338 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); 2339 2340 tg3_phy_toggle_auxctl_smdsp(tp, false); 2341 } 2342 2343 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee) 2344 { 2345 u32 val; 2346 struct ethtool_eee *dest = &tp->eee; 2347 2348 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2349 return; 2350 2351 if (eee) 2352 dest = eee; 2353 2354 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val)) 2355 return; 2356 2357 /* Pull eee_active */ 2358 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || 2359 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) { 2360 dest->eee_active = 1; 2361 } else 2362 dest->eee_active = 0; 2363 2364 /* Pull lp advertised settings */ 2365 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val)) 2366 return; 2367 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2368 2369 /* Pull advertised and eee_enabled settings */ 2370 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) 2371 return; 2372 dest->eee_enabled = !!val; 2373 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2374 2375 /* Pull tx_lpi_enabled */ 2376 val = tr32(TG3_CPMU_EEE_MODE); 2377 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX); 2378 2379 /* Pull lpi timer value */ 2380 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff; 2381 } 2382 2383 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) 2384 { 2385 u32 val; 2386 2387 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2388 return; 2389 2390 tp->setlpicnt = 0; 2391 2392 if (tp->link_config.autoneg == AUTONEG_ENABLE && 2393 current_link_up && 2394 tp->link_config.active_duplex == DUPLEX_FULL && 2395 (tp->link_config.active_speed == SPEED_100 || 2396 tp->link_config.active_speed == SPEED_1000)) { 2397 u32 eeectl; 2398 2399 if (tp->link_config.active_speed == SPEED_1000) 2400 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; 2401 else 2402 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; 2403 2404 tw32(TG3_CPMU_EEE_CTRL, eeectl); 2405 2406 tg3_eee_pull_config(tp, NULL); 2407 if (tp->eee.eee_active) 2408 tp->setlpicnt = 2; 2409 } 2410 2411 if (!tp->setlpicnt) { 2412 if (current_link_up && 2413 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2414 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); 2415 tg3_phy_toggle_auxctl_smdsp(tp, false); 2416 } 2417 2418 val = tr32(TG3_CPMU_EEE_MODE); 2419 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); 2420 } 2421 } 2422 2423 static void tg3_phy_eee_enable(struct tg3 *tp) 2424 { 2425 u32 val; 2426 2427 if (tp->link_config.active_speed == SPEED_1000 && 2428 (tg3_asic_rev(tp) == ASIC_REV_5717 || 2429 tg3_asic_rev(tp) == ASIC_REV_5719 || 2430 tg3_flag(tp, 57765_CLASS)) && 2431 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2432 val = MII_TG3_DSP_TAP26_ALNOKO | 2433 MII_TG3_DSP_TAP26_RMRXSTO; 2434 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 2435 tg3_phy_toggle_auxctl_smdsp(tp, false); 2436 } 2437 2438 val = tr32(TG3_CPMU_EEE_MODE); 2439 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); 2440 } 2441 2442 static int tg3_wait_macro_done(struct tg3 *tp) 2443 { 2444 int limit = 100; 2445 2446 while (limit--) { 2447 u32 tmp32; 2448 2449 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { 2450 if ((tmp32 & 0x1000) == 0) 2451 break; 2452 } 2453 } 2454 if (limit < 0) 2455 return -EBUSY; 2456 2457 return 0; 2458 } 2459 2460 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) 2461 { 2462 static const u32 test_pat[4][6] = { 2463 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, 2464 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, 2465 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, 2466 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } 2467 }; 2468 int chan; 2469 2470 for (chan = 0; chan < 4; chan++) { 2471 int i; 2472 2473 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2474 (chan * 0x2000) | 0x0200); 2475 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2476 2477 for (i = 0; i < 6; i++) 2478 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 2479 test_pat[chan][i]); 2480 2481 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2482 if (tg3_wait_macro_done(tp)) { 2483 *resetp = 1; 2484 return -EBUSY; 2485 } 2486 2487 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2488 (chan * 0x2000) | 0x0200); 2489 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); 2490 if (tg3_wait_macro_done(tp)) { 2491 *resetp = 1; 2492 return -EBUSY; 2493 } 2494 2495 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); 2496 if (tg3_wait_macro_done(tp)) { 2497 *resetp = 1; 2498 return -EBUSY; 2499 } 2500 2501 for (i = 0; i < 6; i += 2) { 2502 u32 low, high; 2503 2504 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || 2505 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || 2506 tg3_wait_macro_done(tp)) { 2507 *resetp = 1; 2508 return -EBUSY; 2509 } 2510 low &= 0x7fff; 2511 high &= 0x000f; 2512 if (low != test_pat[chan][i] || 2513 high != test_pat[chan][i+1]) { 2514 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); 2515 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); 2516 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); 2517 2518 return -EBUSY; 2519 } 2520 } 2521 } 2522 2523 return 0; 2524 } 2525 2526 static int tg3_phy_reset_chanpat(struct tg3 *tp) 2527 { 2528 int chan; 2529 2530 for (chan = 0; chan < 4; chan++) { 2531 int i; 2532 2533 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2534 (chan * 0x2000) | 0x0200); 2535 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2536 for (i = 0; i < 6; i++) 2537 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); 2538 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2539 if (tg3_wait_macro_done(tp)) 2540 return -EBUSY; 2541 } 2542 2543 return 0; 2544 } 2545 2546 static int tg3_phy_reset_5703_4_5(struct tg3 *tp) 2547 { 2548 u32 reg32, phy9_orig; 2549 int retries, do_phy_reset, err; 2550 2551 retries = 10; 2552 do_phy_reset = 1; 2553 do { 2554 if (do_phy_reset) { 2555 err = tg3_bmcr_reset(tp); 2556 if (err) 2557 return err; 2558 do_phy_reset = 0; 2559 } 2560 2561 /* Disable transmitter and interrupt. */ 2562 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) 2563 continue; 2564 2565 reg32 |= 0x3000; 2566 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2567 2568 /* Set full-duplex, 1000 mbps. */ 2569 tg3_writephy(tp, MII_BMCR, 2570 BMCR_FULLDPLX | BMCR_SPEED1000); 2571 2572 /* Set to master mode. */ 2573 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) 2574 continue; 2575 2576 tg3_writephy(tp, MII_CTRL1000, 2577 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 2578 2579 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 2580 if (err) 2581 return err; 2582 2583 /* Block the PHY control access. */ 2584 tg3_phydsp_write(tp, 0x8005, 0x0800); 2585 2586 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); 2587 if (!err) 2588 break; 2589 } while (--retries); 2590 2591 err = tg3_phy_reset_chanpat(tp); 2592 if (err) 2593 return err; 2594 2595 tg3_phydsp_write(tp, 0x8005, 0x0000); 2596 2597 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); 2598 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); 2599 2600 tg3_phy_toggle_auxctl_smdsp(tp, false); 2601 2602 tg3_writephy(tp, MII_CTRL1000, phy9_orig); 2603 2604 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32); 2605 if (err) 2606 return err; 2607 2608 reg32 &= ~0x3000; 2609 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2610 2611 return 0; 2612 } 2613 2614 static void tg3_carrier_off(struct tg3 *tp) 2615 { 2616 netif_carrier_off(tp->dev); 2617 tp->link_up = false; 2618 } 2619 2620 static void tg3_warn_mgmt_link_flap(struct tg3 *tp) 2621 { 2622 if (tg3_flag(tp, ENABLE_ASF)) 2623 netdev_warn(tp->dev, 2624 "Management side-band traffic will be interrupted during phy settings change\n"); 2625 } 2626 2627 /* This will reset the tigon3 PHY if there is no valid 2628 * link unless the FORCE argument is non-zero. 2629 */ 2630 static int tg3_phy_reset(struct tg3 *tp) 2631 { 2632 u32 val, cpmuctrl; 2633 int err; 2634 2635 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2636 val = tr32(GRC_MISC_CFG); 2637 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); 2638 udelay(40); 2639 } 2640 err = tg3_readphy(tp, MII_BMSR, &val); 2641 err |= tg3_readphy(tp, MII_BMSR, &val); 2642 if (err != 0) 2643 return -EBUSY; 2644 2645 if (netif_running(tp->dev) && tp->link_up) { 2646 netif_carrier_off(tp->dev); 2647 tg3_link_report(tp); 2648 } 2649 2650 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 2651 tg3_asic_rev(tp) == ASIC_REV_5704 || 2652 tg3_asic_rev(tp) == ASIC_REV_5705) { 2653 err = tg3_phy_reset_5703_4_5(tp); 2654 if (err) 2655 return err; 2656 goto out; 2657 } 2658 2659 cpmuctrl = 0; 2660 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 2661 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 2662 cpmuctrl = tr32(TG3_CPMU_CTRL); 2663 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) 2664 tw32(TG3_CPMU_CTRL, 2665 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); 2666 } 2667 2668 err = tg3_bmcr_reset(tp); 2669 if (err) 2670 return err; 2671 2672 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { 2673 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; 2674 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); 2675 2676 tw32(TG3_CPMU_CTRL, cpmuctrl); 2677 } 2678 2679 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 2680 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 2681 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 2682 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == 2683 CPMU_LSPD_1000MB_MACCLK_12_5) { 2684 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 2685 udelay(40); 2686 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 2687 } 2688 } 2689 2690 if (tg3_flag(tp, 5717_PLUS) && 2691 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) 2692 return 0; 2693 2694 tg3_phy_apply_otp(tp); 2695 2696 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 2697 tg3_phy_toggle_apd(tp, true); 2698 else 2699 tg3_phy_toggle_apd(tp, false); 2700 2701 out: 2702 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && 2703 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2704 tg3_phydsp_write(tp, 0x201f, 0x2aaa); 2705 tg3_phydsp_write(tp, 0x000a, 0x0323); 2706 tg3_phy_toggle_auxctl_smdsp(tp, false); 2707 } 2708 2709 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { 2710 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2711 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2712 } 2713 2714 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { 2715 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2716 tg3_phydsp_write(tp, 0x000a, 0x310b); 2717 tg3_phydsp_write(tp, 0x201f, 0x9506); 2718 tg3_phydsp_write(tp, 0x401f, 0x14e2); 2719 tg3_phy_toggle_auxctl_smdsp(tp, false); 2720 } 2721 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { 2722 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2723 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 2724 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { 2725 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); 2726 tg3_writephy(tp, MII_TG3_TEST1, 2727 MII_TG3_TEST1_TRIM_EN | 0x4); 2728 } else 2729 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); 2730 2731 tg3_phy_toggle_auxctl_smdsp(tp, false); 2732 } 2733 } 2734 2735 /* Set Extended packet length bit (bit 14) on all chips that */ 2736 /* support jumbo frames */ 2737 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2738 /* Cannot do read-modify-write on 5401 */ 2739 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 2740 } else if (tg3_flag(tp, JUMBO_CAPABLE)) { 2741 /* Set bit 14 with read-modify-write to preserve other bits */ 2742 err = tg3_phy_auxctl_read(tp, 2743 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2744 if (!err) 2745 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2746 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); 2747 } 2748 2749 /* Set phy register 0x10 bit 0 to high fifo elasticity to support 2750 * jumbo frames transmission. 2751 */ 2752 if (tg3_flag(tp, JUMBO_CAPABLE)) { 2753 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) 2754 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2755 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); 2756 } 2757 2758 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2759 /* adjust output voltage */ 2760 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); 2761 } 2762 2763 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0) 2764 tg3_phydsp_write(tp, 0xffb, 0x4000); 2765 2766 tg3_phy_toggle_automdix(tp, true); 2767 tg3_phy_set_wirespeed(tp); 2768 return 0; 2769 } 2770 2771 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001 2772 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002 2773 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ 2774 TG3_GPIO_MSG_NEED_VAUX) 2775 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ 2776 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ 2777 (TG3_GPIO_MSG_DRVR_PRES << 4) | \ 2778 (TG3_GPIO_MSG_DRVR_PRES << 8) | \ 2779 (TG3_GPIO_MSG_DRVR_PRES << 12)) 2780 2781 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ 2782 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ 2783 (TG3_GPIO_MSG_NEED_VAUX << 4) | \ 2784 (TG3_GPIO_MSG_NEED_VAUX << 8) | \ 2785 (TG3_GPIO_MSG_NEED_VAUX << 12)) 2786 2787 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) 2788 { 2789 u32 status, shift; 2790 2791 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2792 tg3_asic_rev(tp) == ASIC_REV_5719) 2793 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); 2794 else 2795 status = tr32(TG3_CPMU_DRV_STATUS); 2796 2797 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; 2798 status &= ~(TG3_GPIO_MSG_MASK << shift); 2799 status |= (newstat << shift); 2800 2801 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2802 tg3_asic_rev(tp) == ASIC_REV_5719) 2803 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); 2804 else 2805 tw32(TG3_CPMU_DRV_STATUS, status); 2806 2807 return status >> TG3_APE_GPIO_MSG_SHIFT; 2808 } 2809 2810 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) 2811 { 2812 if (!tg3_flag(tp, IS_NIC)) 2813 return 0; 2814 2815 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2816 tg3_asic_rev(tp) == ASIC_REV_5719 || 2817 tg3_asic_rev(tp) == ASIC_REV_5720) { 2818 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2819 return -EIO; 2820 2821 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); 2822 2823 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2824 TG3_GRC_LCLCTL_PWRSW_DELAY); 2825 2826 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2827 } else { 2828 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2829 TG3_GRC_LCLCTL_PWRSW_DELAY); 2830 } 2831 2832 return 0; 2833 } 2834 2835 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) 2836 { 2837 u32 grc_local_ctrl; 2838 2839 if (!tg3_flag(tp, IS_NIC) || 2840 tg3_asic_rev(tp) == ASIC_REV_5700 || 2841 tg3_asic_rev(tp) == ASIC_REV_5701) 2842 return; 2843 2844 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; 2845 2846 tw32_wait_f(GRC_LOCAL_CTRL, 2847 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2848 TG3_GRC_LCLCTL_PWRSW_DELAY); 2849 2850 tw32_wait_f(GRC_LOCAL_CTRL, 2851 grc_local_ctrl, 2852 TG3_GRC_LCLCTL_PWRSW_DELAY); 2853 2854 tw32_wait_f(GRC_LOCAL_CTRL, 2855 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2856 TG3_GRC_LCLCTL_PWRSW_DELAY); 2857 } 2858 2859 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) 2860 { 2861 if (!tg3_flag(tp, IS_NIC)) 2862 return; 2863 2864 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 2865 tg3_asic_rev(tp) == ASIC_REV_5701) { 2866 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2867 (GRC_LCLCTRL_GPIO_OE0 | 2868 GRC_LCLCTRL_GPIO_OE1 | 2869 GRC_LCLCTRL_GPIO_OE2 | 2870 GRC_LCLCTRL_GPIO_OUTPUT0 | 2871 GRC_LCLCTRL_GPIO_OUTPUT1), 2872 TG3_GRC_LCLCTL_PWRSW_DELAY); 2873 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 2874 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 2875 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ 2876 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | 2877 GRC_LCLCTRL_GPIO_OE1 | 2878 GRC_LCLCTRL_GPIO_OE2 | 2879 GRC_LCLCTRL_GPIO_OUTPUT0 | 2880 GRC_LCLCTRL_GPIO_OUTPUT1 | 2881 tp->grc_local_ctrl; 2882 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2883 TG3_GRC_LCLCTL_PWRSW_DELAY); 2884 2885 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; 2886 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2887 TG3_GRC_LCLCTL_PWRSW_DELAY); 2888 2889 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; 2890 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2891 TG3_GRC_LCLCTL_PWRSW_DELAY); 2892 } else { 2893 u32 no_gpio2; 2894 u32 grc_local_ctrl = 0; 2895 2896 /* Workaround to prevent overdrawing Amps. */ 2897 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 2898 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 2899 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2900 grc_local_ctrl, 2901 TG3_GRC_LCLCTL_PWRSW_DELAY); 2902 } 2903 2904 /* On 5753 and variants, GPIO2 cannot be used. */ 2905 no_gpio2 = tp->nic_sram_data_cfg & 2906 NIC_SRAM_DATA_CFG_NO_GPIO2; 2907 2908 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 2909 GRC_LCLCTRL_GPIO_OE1 | 2910 GRC_LCLCTRL_GPIO_OE2 | 2911 GRC_LCLCTRL_GPIO_OUTPUT1 | 2912 GRC_LCLCTRL_GPIO_OUTPUT2; 2913 if (no_gpio2) { 2914 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | 2915 GRC_LCLCTRL_GPIO_OUTPUT2); 2916 } 2917 tw32_wait_f(GRC_LOCAL_CTRL, 2918 tp->grc_local_ctrl | grc_local_ctrl, 2919 TG3_GRC_LCLCTL_PWRSW_DELAY); 2920 2921 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; 2922 2923 tw32_wait_f(GRC_LOCAL_CTRL, 2924 tp->grc_local_ctrl | grc_local_ctrl, 2925 TG3_GRC_LCLCTL_PWRSW_DELAY); 2926 2927 if (!no_gpio2) { 2928 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; 2929 tw32_wait_f(GRC_LOCAL_CTRL, 2930 tp->grc_local_ctrl | grc_local_ctrl, 2931 TG3_GRC_LCLCTL_PWRSW_DELAY); 2932 } 2933 } 2934 } 2935 2936 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) 2937 { 2938 u32 msg = 0; 2939 2940 /* Serialize power state transitions */ 2941 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2942 return; 2943 2944 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) 2945 msg = TG3_GPIO_MSG_NEED_VAUX; 2946 2947 msg = tg3_set_function_status(tp, msg); 2948 2949 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) 2950 goto done; 2951 2952 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) 2953 tg3_pwrsrc_switch_to_vaux(tp); 2954 else 2955 tg3_pwrsrc_die_with_vmain(tp); 2956 2957 done: 2958 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2959 } 2960 2961 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) 2962 { 2963 bool need_vaux = false; 2964 2965 /* The GPIOs do something completely different on 57765. */ 2966 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS)) 2967 return; 2968 2969 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2970 tg3_asic_rev(tp) == ASIC_REV_5719 || 2971 tg3_asic_rev(tp) == ASIC_REV_5720) { 2972 tg3_frob_aux_power_5717(tp, include_wol ? 2973 tg3_flag(tp, WOL_ENABLE) != 0 : 0); 2974 return; 2975 } 2976 2977 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { 2978 struct net_device *dev_peer; 2979 2980 dev_peer = pci_get_drvdata(tp->pdev_peer); 2981 2982 /* remove_one() may have been run on the peer. */ 2983 if (dev_peer) { 2984 struct tg3 *tp_peer = netdev_priv(dev_peer); 2985 2986 if (tg3_flag(tp_peer, INIT_COMPLETE)) 2987 return; 2988 2989 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || 2990 tg3_flag(tp_peer, ENABLE_ASF)) 2991 need_vaux = true; 2992 } 2993 } 2994 2995 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || 2996 tg3_flag(tp, ENABLE_ASF)) 2997 need_vaux = true; 2998 2999 if (need_vaux) 3000 tg3_pwrsrc_switch_to_vaux(tp); 3001 else 3002 tg3_pwrsrc_die_with_vmain(tp); 3003 } 3004 3005 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) 3006 { 3007 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) 3008 return 1; 3009 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { 3010 if (speed != SPEED_10) 3011 return 1; 3012 } else if (speed == SPEED_10) 3013 return 1; 3014 3015 return 0; 3016 } 3017 3018 static bool tg3_phy_power_bug(struct tg3 *tp) 3019 { 3020 switch (tg3_asic_rev(tp)) { 3021 case ASIC_REV_5700: 3022 case ASIC_REV_5704: 3023 return true; 3024 case ASIC_REV_5780: 3025 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 3026 return true; 3027 return false; 3028 case ASIC_REV_5717: 3029 if (!tp->pci_fn) 3030 return true; 3031 return false; 3032 case ASIC_REV_5719: 3033 case ASIC_REV_5720: 3034 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 3035 !tp->pci_fn) 3036 return true; 3037 return false; 3038 } 3039 3040 return false; 3041 } 3042 3043 static bool tg3_phy_led_bug(struct tg3 *tp) 3044 { 3045 switch (tg3_asic_rev(tp)) { 3046 case ASIC_REV_5719: 3047 case ASIC_REV_5720: 3048 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 3049 !tp->pci_fn) 3050 return true; 3051 return false; 3052 } 3053 3054 return false; 3055 } 3056 3057 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) 3058 { 3059 u32 val; 3060 3061 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) 3062 return; 3063 3064 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 3065 if (tg3_asic_rev(tp) == ASIC_REV_5704) { 3066 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); 3067 u32 serdes_cfg = tr32(MAC_SERDES_CFG); 3068 3069 sg_dig_ctrl |= 3070 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; 3071 tw32(SG_DIG_CTRL, sg_dig_ctrl); 3072 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); 3073 } 3074 return; 3075 } 3076 3077 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3078 tg3_bmcr_reset(tp); 3079 val = tr32(GRC_MISC_CFG); 3080 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 3081 udelay(40); 3082 return; 3083 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 3084 u32 phytest; 3085 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 3086 u32 phy; 3087 3088 tg3_writephy(tp, MII_ADVERTISE, 0); 3089 tg3_writephy(tp, MII_BMCR, 3090 BMCR_ANENABLE | BMCR_ANRESTART); 3091 3092 tg3_writephy(tp, MII_TG3_FET_TEST, 3093 phytest | MII_TG3_FET_SHADOW_EN); 3094 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { 3095 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; 3096 tg3_writephy(tp, 3097 MII_TG3_FET_SHDW_AUXMODE4, 3098 phy); 3099 } 3100 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 3101 } 3102 return; 3103 } else if (do_low_power) { 3104 if (!tg3_phy_led_bug(tp)) 3105 tg3_writephy(tp, MII_TG3_EXT_CTRL, 3106 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 3107 3108 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | 3109 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | 3110 MII_TG3_AUXCTL_PCTL_VREG_11V; 3111 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); 3112 } 3113 3114 /* The PHY should not be powered down on some chips because 3115 * of bugs. 3116 */ 3117 if (tg3_phy_power_bug(tp)) 3118 return; 3119 3120 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 3121 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 3122 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 3123 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 3124 val |= CPMU_LSPD_1000MB_MACCLK_12_5; 3125 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 3126 } 3127 3128 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); 3129 } 3130 3131 /* tp->lock is held. */ 3132 static int tg3_nvram_lock(struct tg3 *tp) 3133 { 3134 if (tg3_flag(tp, NVRAM)) { 3135 int i; 3136 3137 if (tp->nvram_lock_cnt == 0) { 3138 tw32(NVRAM_SWARB, SWARB_REQ_SET1); 3139 for (i = 0; i < 8000; i++) { 3140 if (tr32(NVRAM_SWARB) & SWARB_GNT1) 3141 break; 3142 udelay(20); 3143 } 3144 if (i == 8000) { 3145 tw32(NVRAM_SWARB, SWARB_REQ_CLR1); 3146 return -ENODEV; 3147 } 3148 } 3149 tp->nvram_lock_cnt++; 3150 } 3151 return 0; 3152 } 3153 3154 /* tp->lock is held. */ 3155 static void tg3_nvram_unlock(struct tg3 *tp) 3156 { 3157 if (tg3_flag(tp, NVRAM)) { 3158 if (tp->nvram_lock_cnt > 0) 3159 tp->nvram_lock_cnt--; 3160 if (tp->nvram_lock_cnt == 0) 3161 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); 3162 } 3163 } 3164 3165 /* tp->lock is held. */ 3166 static void tg3_enable_nvram_access(struct tg3 *tp) 3167 { 3168 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3169 u32 nvaccess = tr32(NVRAM_ACCESS); 3170 3171 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); 3172 } 3173 } 3174 3175 /* tp->lock is held. */ 3176 static void tg3_disable_nvram_access(struct tg3 *tp) 3177 { 3178 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3179 u32 nvaccess = tr32(NVRAM_ACCESS); 3180 3181 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); 3182 } 3183 } 3184 3185 static int tg3_nvram_read_using_eeprom(struct tg3 *tp, 3186 u32 offset, u32 *val) 3187 { 3188 u32 tmp; 3189 int i; 3190 3191 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) 3192 return -EINVAL; 3193 3194 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | 3195 EEPROM_ADDR_DEVID_MASK | 3196 EEPROM_ADDR_READ); 3197 tw32(GRC_EEPROM_ADDR, 3198 tmp | 3199 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3200 ((offset << EEPROM_ADDR_ADDR_SHIFT) & 3201 EEPROM_ADDR_ADDR_MASK) | 3202 EEPROM_ADDR_READ | EEPROM_ADDR_START); 3203 3204 for (i = 0; i < 1000; i++) { 3205 tmp = tr32(GRC_EEPROM_ADDR); 3206 3207 if (tmp & EEPROM_ADDR_COMPLETE) 3208 break; 3209 msleep(1); 3210 } 3211 if (!(tmp & EEPROM_ADDR_COMPLETE)) 3212 return -EBUSY; 3213 3214 tmp = tr32(GRC_EEPROM_DATA); 3215 3216 /* 3217 * The data will always be opposite the native endian 3218 * format. Perform a blind byteswap to compensate. 3219 */ 3220 *val = swab32(tmp); 3221 3222 return 0; 3223 } 3224 3225 #define NVRAM_CMD_TIMEOUT 5000 3226 3227 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) 3228 { 3229 int i; 3230 3231 tw32(NVRAM_CMD, nvram_cmd); 3232 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { 3233 usleep_range(10, 40); 3234 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { 3235 udelay(10); 3236 break; 3237 } 3238 } 3239 3240 if (i == NVRAM_CMD_TIMEOUT) 3241 return -EBUSY; 3242 3243 return 0; 3244 } 3245 3246 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) 3247 { 3248 if (tg3_flag(tp, NVRAM) && 3249 tg3_flag(tp, NVRAM_BUFFERED) && 3250 tg3_flag(tp, FLASH) && 3251 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3252 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3253 3254 addr = ((addr / tp->nvram_pagesize) << 3255 ATMEL_AT45DB0X1B_PAGE_POS) + 3256 (addr % tp->nvram_pagesize); 3257 3258 return addr; 3259 } 3260 3261 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) 3262 { 3263 if (tg3_flag(tp, NVRAM) && 3264 tg3_flag(tp, NVRAM_BUFFERED) && 3265 tg3_flag(tp, FLASH) && 3266 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3267 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3268 3269 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * 3270 tp->nvram_pagesize) + 3271 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); 3272 3273 return addr; 3274 } 3275 3276 /* NOTE: Data read in from NVRAM is byteswapped according to 3277 * the byteswapping settings for all other register accesses. 3278 * tg3 devices are BE devices, so on a BE machine, the data 3279 * returned will be exactly as it is seen in NVRAM. On a LE 3280 * machine, the 32-bit value will be byteswapped. 3281 */ 3282 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) 3283 { 3284 int ret; 3285 3286 if (!tg3_flag(tp, NVRAM)) 3287 return tg3_nvram_read_using_eeprom(tp, offset, val); 3288 3289 offset = tg3_nvram_phys_addr(tp, offset); 3290 3291 if (offset > NVRAM_ADDR_MSK) 3292 return -EINVAL; 3293 3294 ret = tg3_nvram_lock(tp); 3295 if (ret) 3296 return ret; 3297 3298 tg3_enable_nvram_access(tp); 3299 3300 tw32(NVRAM_ADDR, offset); 3301 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | 3302 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); 3303 3304 if (ret == 0) 3305 *val = tr32(NVRAM_RDDATA); 3306 3307 tg3_disable_nvram_access(tp); 3308 3309 tg3_nvram_unlock(tp); 3310 3311 return ret; 3312 } 3313 3314 /* Ensures NVRAM data is in bytestream format. */ 3315 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) 3316 { 3317 u32 v; 3318 int res = tg3_nvram_read(tp, offset, &v); 3319 if (!res) 3320 *val = cpu_to_be32(v); 3321 return res; 3322 } 3323 3324 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, 3325 u32 offset, u32 len, u8 *buf) 3326 { 3327 int i, j, rc = 0; 3328 u32 val; 3329 3330 for (i = 0; i < len; i += 4) { 3331 u32 addr; 3332 __be32 data; 3333 3334 addr = offset + i; 3335 3336 memcpy(&data, buf + i, 4); 3337 3338 /* 3339 * The SEEPROM interface expects the data to always be opposite 3340 * the native endian format. We accomplish this by reversing 3341 * all the operations that would have been performed on the 3342 * data from a call to tg3_nvram_read_be32(). 3343 */ 3344 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); 3345 3346 val = tr32(GRC_EEPROM_ADDR); 3347 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); 3348 3349 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | 3350 EEPROM_ADDR_READ); 3351 tw32(GRC_EEPROM_ADDR, val | 3352 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3353 (addr & EEPROM_ADDR_ADDR_MASK) | 3354 EEPROM_ADDR_START | 3355 EEPROM_ADDR_WRITE); 3356 3357 for (j = 0; j < 1000; j++) { 3358 val = tr32(GRC_EEPROM_ADDR); 3359 3360 if (val & EEPROM_ADDR_COMPLETE) 3361 break; 3362 msleep(1); 3363 } 3364 if (!(val & EEPROM_ADDR_COMPLETE)) { 3365 rc = -EBUSY; 3366 break; 3367 } 3368 } 3369 3370 return rc; 3371 } 3372 3373 /* offset and length are dword aligned */ 3374 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, 3375 u8 *buf) 3376 { 3377 int ret = 0; 3378 u32 pagesize = tp->nvram_pagesize; 3379 u32 pagemask = pagesize - 1; 3380 u32 nvram_cmd; 3381 u8 *tmp; 3382 3383 tmp = kmalloc(pagesize, GFP_KERNEL); 3384 if (tmp == NULL) 3385 return -ENOMEM; 3386 3387 while (len) { 3388 int j; 3389 u32 phy_addr, page_off, size; 3390 3391 phy_addr = offset & ~pagemask; 3392 3393 for (j = 0; j < pagesize; j += 4) { 3394 ret = tg3_nvram_read_be32(tp, phy_addr + j, 3395 (__be32 *) (tmp + j)); 3396 if (ret) 3397 break; 3398 } 3399 if (ret) 3400 break; 3401 3402 page_off = offset & pagemask; 3403 size = pagesize; 3404 if (len < size) 3405 size = len; 3406 3407 len -= size; 3408 3409 memcpy(tmp + page_off, buf, size); 3410 3411 offset = offset + (pagesize - page_off); 3412 3413 tg3_enable_nvram_access(tp); 3414 3415 /* 3416 * Before we can erase the flash page, we need 3417 * to issue a special "write enable" command. 3418 */ 3419 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3420 3421 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3422 break; 3423 3424 /* Erase the target page */ 3425 tw32(NVRAM_ADDR, phy_addr); 3426 3427 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | 3428 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; 3429 3430 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3431 break; 3432 3433 /* Issue another write enable to start the write. */ 3434 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3435 3436 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3437 break; 3438 3439 for (j = 0; j < pagesize; j += 4) { 3440 __be32 data; 3441 3442 data = *((__be32 *) (tmp + j)); 3443 3444 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3445 3446 tw32(NVRAM_ADDR, phy_addr + j); 3447 3448 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | 3449 NVRAM_CMD_WR; 3450 3451 if (j == 0) 3452 nvram_cmd |= NVRAM_CMD_FIRST; 3453 else if (j == (pagesize - 4)) 3454 nvram_cmd |= NVRAM_CMD_LAST; 3455 3456 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3457 if (ret) 3458 break; 3459 } 3460 if (ret) 3461 break; 3462 } 3463 3464 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3465 tg3_nvram_exec_cmd(tp, nvram_cmd); 3466 3467 kfree(tmp); 3468 3469 return ret; 3470 } 3471 3472 /* offset and length are dword aligned */ 3473 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, 3474 u8 *buf) 3475 { 3476 int i, ret = 0; 3477 3478 for (i = 0; i < len; i += 4, offset += 4) { 3479 u32 page_off, phy_addr, nvram_cmd; 3480 __be32 data; 3481 3482 memcpy(&data, buf + i, 4); 3483 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3484 3485 page_off = offset % tp->nvram_pagesize; 3486 3487 phy_addr = tg3_nvram_phys_addr(tp, offset); 3488 3489 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; 3490 3491 if (page_off == 0 || i == 0) 3492 nvram_cmd |= NVRAM_CMD_FIRST; 3493 if (page_off == (tp->nvram_pagesize - 4)) 3494 nvram_cmd |= NVRAM_CMD_LAST; 3495 3496 if (i == (len - 4)) 3497 nvram_cmd |= NVRAM_CMD_LAST; 3498 3499 if ((nvram_cmd & NVRAM_CMD_FIRST) || 3500 !tg3_flag(tp, FLASH) || 3501 !tg3_flag(tp, 57765_PLUS)) 3502 tw32(NVRAM_ADDR, phy_addr); 3503 3504 if (tg3_asic_rev(tp) != ASIC_REV_5752 && 3505 !tg3_flag(tp, 5755_PLUS) && 3506 (tp->nvram_jedecnum == JEDEC_ST) && 3507 (nvram_cmd & NVRAM_CMD_FIRST)) { 3508 u32 cmd; 3509 3510 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3511 ret = tg3_nvram_exec_cmd(tp, cmd); 3512 if (ret) 3513 break; 3514 } 3515 if (!tg3_flag(tp, FLASH)) { 3516 /* We always do complete word writes to eeprom. */ 3517 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); 3518 } 3519 3520 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3521 if (ret) 3522 break; 3523 } 3524 return ret; 3525 } 3526 3527 /* offset and length are dword aligned */ 3528 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) 3529 { 3530 int ret; 3531 3532 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3533 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & 3534 ~GRC_LCLCTRL_GPIO_OUTPUT1); 3535 udelay(40); 3536 } 3537 3538 if (!tg3_flag(tp, NVRAM)) { 3539 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); 3540 } else { 3541 u32 grc_mode; 3542 3543 ret = tg3_nvram_lock(tp); 3544 if (ret) 3545 return ret; 3546 3547 tg3_enable_nvram_access(tp); 3548 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) 3549 tw32(NVRAM_WRITE1, 0x406); 3550 3551 grc_mode = tr32(GRC_MODE); 3552 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); 3553 3554 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { 3555 ret = tg3_nvram_write_block_buffered(tp, offset, len, 3556 buf); 3557 } else { 3558 ret = tg3_nvram_write_block_unbuffered(tp, offset, len, 3559 buf); 3560 } 3561 3562 grc_mode = tr32(GRC_MODE); 3563 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); 3564 3565 tg3_disable_nvram_access(tp); 3566 tg3_nvram_unlock(tp); 3567 } 3568 3569 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3570 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 3571 udelay(40); 3572 } 3573 3574 return ret; 3575 } 3576 3577 #define RX_CPU_SCRATCH_BASE 0x30000 3578 #define RX_CPU_SCRATCH_SIZE 0x04000 3579 #define TX_CPU_SCRATCH_BASE 0x34000 3580 #define TX_CPU_SCRATCH_SIZE 0x04000 3581 3582 /* tp->lock is held. */ 3583 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base) 3584 { 3585 int i; 3586 const int iters = 10000; 3587 3588 for (i = 0; i < iters; i++) { 3589 tw32(cpu_base + CPU_STATE, 0xffffffff); 3590 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3591 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) 3592 break; 3593 if (pci_channel_offline(tp->pdev)) 3594 return -EBUSY; 3595 } 3596 3597 return (i == iters) ? -EBUSY : 0; 3598 } 3599 3600 /* tp->lock is held. */ 3601 static int tg3_rxcpu_pause(struct tg3 *tp) 3602 { 3603 int rc = tg3_pause_cpu(tp, RX_CPU_BASE); 3604 3605 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 3606 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); 3607 udelay(10); 3608 3609 return rc; 3610 } 3611 3612 /* tp->lock is held. */ 3613 static int tg3_txcpu_pause(struct tg3 *tp) 3614 { 3615 return tg3_pause_cpu(tp, TX_CPU_BASE); 3616 } 3617 3618 /* tp->lock is held. */ 3619 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base) 3620 { 3621 tw32(cpu_base + CPU_STATE, 0xffffffff); 3622 tw32_f(cpu_base + CPU_MODE, 0x00000000); 3623 } 3624 3625 /* tp->lock is held. */ 3626 static void tg3_rxcpu_resume(struct tg3 *tp) 3627 { 3628 tg3_resume_cpu(tp, RX_CPU_BASE); 3629 } 3630 3631 /* tp->lock is held. */ 3632 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base) 3633 { 3634 int rc; 3635 3636 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); 3637 3638 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3639 u32 val = tr32(GRC_VCPU_EXT_CTRL); 3640 3641 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); 3642 return 0; 3643 } 3644 if (cpu_base == RX_CPU_BASE) { 3645 rc = tg3_rxcpu_pause(tp); 3646 } else { 3647 /* 3648 * There is only an Rx CPU for the 5750 derivative in the 3649 * BCM4785. 3650 */ 3651 if (tg3_flag(tp, IS_SSB_CORE)) 3652 return 0; 3653 3654 rc = tg3_txcpu_pause(tp); 3655 } 3656 3657 if (rc) { 3658 netdev_err(tp->dev, "%s timed out, %s CPU\n", 3659 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX"); 3660 return -ENODEV; 3661 } 3662 3663 /* Clear firmware's nvram arbitration. */ 3664 if (tg3_flag(tp, NVRAM)) 3665 tw32(NVRAM_SWARB, SWARB_REQ_CLR0); 3666 return 0; 3667 } 3668 3669 static int tg3_fw_data_len(struct tg3 *tp, 3670 const struct tg3_firmware_hdr *fw_hdr) 3671 { 3672 int fw_len; 3673 3674 /* Non fragmented firmware have one firmware header followed by a 3675 * contiguous chunk of data to be written. The length field in that 3676 * header is not the length of data to be written but the complete 3677 * length of the bss. The data length is determined based on 3678 * tp->fw->size minus headers. 3679 * 3680 * Fragmented firmware have a main header followed by multiple 3681 * fragments. Each fragment is identical to non fragmented firmware 3682 * with a firmware header followed by a contiguous chunk of data. In 3683 * the main header, the length field is unused and set to 0xffffffff. 3684 * In each fragment header the length is the entire size of that 3685 * fragment i.e. fragment data + header length. Data length is 3686 * therefore length field in the header minus TG3_FW_HDR_LEN. 3687 */ 3688 if (tp->fw_len == 0xffffffff) 3689 fw_len = be32_to_cpu(fw_hdr->len); 3690 else 3691 fw_len = tp->fw->size; 3692 3693 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32); 3694 } 3695 3696 /* tp->lock is held. */ 3697 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, 3698 u32 cpu_scratch_base, int cpu_scratch_size, 3699 const struct tg3_firmware_hdr *fw_hdr) 3700 { 3701 int err, i; 3702 void (*write_op)(struct tg3 *, u32, u32); 3703 int total_len = tp->fw->size; 3704 3705 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { 3706 netdev_err(tp->dev, 3707 "%s: Trying to load TX cpu firmware which is 5705\n", 3708 __func__); 3709 return -EINVAL; 3710 } 3711 3712 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766) 3713 write_op = tg3_write_mem; 3714 else 3715 write_op = tg3_write_indirect_reg32; 3716 3717 if (tg3_asic_rev(tp) != ASIC_REV_57766) { 3718 /* It is possible that bootcode is still loading at this point. 3719 * Get the nvram lock first before halting the cpu. 3720 */ 3721 int lock_err = tg3_nvram_lock(tp); 3722 err = tg3_halt_cpu(tp, cpu_base); 3723 if (!lock_err) 3724 tg3_nvram_unlock(tp); 3725 if (err) 3726 goto out; 3727 3728 for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) 3729 write_op(tp, cpu_scratch_base + i, 0); 3730 tw32(cpu_base + CPU_STATE, 0xffffffff); 3731 tw32(cpu_base + CPU_MODE, 3732 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT); 3733 } else { 3734 /* Subtract additional main header for fragmented firmware and 3735 * advance to the first fragment 3736 */ 3737 total_len -= TG3_FW_HDR_LEN; 3738 fw_hdr++; 3739 } 3740 3741 do { 3742 u32 *fw_data = (u32 *)(fw_hdr + 1); 3743 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++) 3744 write_op(tp, cpu_scratch_base + 3745 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) + 3746 (i * sizeof(u32)), 3747 be32_to_cpu(fw_data[i])); 3748 3749 total_len -= be32_to_cpu(fw_hdr->len); 3750 3751 /* Advance to next fragment */ 3752 fw_hdr = (struct tg3_firmware_hdr *) 3753 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len)); 3754 } while (total_len > 0); 3755 3756 err = 0; 3757 3758 out: 3759 return err; 3760 } 3761 3762 /* tp->lock is held. */ 3763 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc) 3764 { 3765 int i; 3766 const int iters = 5; 3767 3768 tw32(cpu_base + CPU_STATE, 0xffffffff); 3769 tw32_f(cpu_base + CPU_PC, pc); 3770 3771 for (i = 0; i < iters; i++) { 3772 if (tr32(cpu_base + CPU_PC) == pc) 3773 break; 3774 tw32(cpu_base + CPU_STATE, 0xffffffff); 3775 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3776 tw32_f(cpu_base + CPU_PC, pc); 3777 udelay(1000); 3778 } 3779 3780 return (i == iters) ? -EBUSY : 0; 3781 } 3782 3783 /* tp->lock is held. */ 3784 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) 3785 { 3786 const struct tg3_firmware_hdr *fw_hdr; 3787 int err; 3788 3789 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3790 3791 /* Firmware blob starts with version numbers, followed by 3792 start address and length. We are setting complete length. 3793 length = end_address_of_bss - start_address_of_text. 3794 Remainder is the blob to be loaded contiguously 3795 from start address. */ 3796 3797 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, 3798 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, 3799 fw_hdr); 3800 if (err) 3801 return err; 3802 3803 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, 3804 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, 3805 fw_hdr); 3806 if (err) 3807 return err; 3808 3809 /* Now startup only the RX cpu. */ 3810 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE, 3811 be32_to_cpu(fw_hdr->base_addr)); 3812 if (err) { 3813 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " 3814 "should be %08x\n", __func__, 3815 tr32(RX_CPU_BASE + CPU_PC), 3816 be32_to_cpu(fw_hdr->base_addr)); 3817 return -ENODEV; 3818 } 3819 3820 tg3_rxcpu_resume(tp); 3821 3822 return 0; 3823 } 3824 3825 static int tg3_validate_rxcpu_state(struct tg3 *tp) 3826 { 3827 const int iters = 1000; 3828 int i; 3829 u32 val; 3830 3831 /* Wait for boot code to complete initialization and enter service 3832 * loop. It is then safe to download service patches 3833 */ 3834 for (i = 0; i < iters; i++) { 3835 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP) 3836 break; 3837 3838 udelay(10); 3839 } 3840 3841 if (i == iters) { 3842 netdev_err(tp->dev, "Boot code not ready for service patches\n"); 3843 return -EBUSY; 3844 } 3845 3846 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE); 3847 if (val & 0xff) { 3848 netdev_warn(tp->dev, 3849 "Other patches exist. Not downloading EEE patch\n"); 3850 return -EEXIST; 3851 } 3852 3853 return 0; 3854 } 3855 3856 /* tp->lock is held. */ 3857 static void tg3_load_57766_firmware(struct tg3 *tp) 3858 { 3859 struct tg3_firmware_hdr *fw_hdr; 3860 3861 if (!tg3_flag(tp, NO_NVRAM)) 3862 return; 3863 3864 if (tg3_validate_rxcpu_state(tp)) 3865 return; 3866 3867 if (!tp->fw) 3868 return; 3869 3870 /* This firmware blob has a different format than older firmware 3871 * releases as given below. The main difference is we have fragmented 3872 * data to be written to non-contiguous locations. 3873 * 3874 * In the beginning we have a firmware header identical to other 3875 * firmware which consists of version, base addr and length. The length 3876 * here is unused and set to 0xffffffff. 3877 * 3878 * This is followed by a series of firmware fragments which are 3879 * individually identical to previous firmware. i.e. they have the 3880 * firmware header and followed by data for that fragment. The version 3881 * field of the individual fragment header is unused. 3882 */ 3883 3884 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3885 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR) 3886 return; 3887 3888 if (tg3_rxcpu_pause(tp)) 3889 return; 3890 3891 /* tg3_load_firmware_cpu() will always succeed for the 57766 */ 3892 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr); 3893 3894 tg3_rxcpu_resume(tp); 3895 } 3896 3897 /* tp->lock is held. */ 3898 static int tg3_load_tso_firmware(struct tg3 *tp) 3899 { 3900 const struct tg3_firmware_hdr *fw_hdr; 3901 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; 3902 int err; 3903 3904 if (!tg3_flag(tp, FW_TSO)) 3905 return 0; 3906 3907 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3908 3909 /* Firmware blob starts with version numbers, followed by 3910 start address and length. We are setting complete length. 3911 length = end_address_of_bss - start_address_of_text. 3912 Remainder is the blob to be loaded contiguously 3913 from start address. */ 3914 3915 cpu_scratch_size = tp->fw_len; 3916 3917 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 3918 cpu_base = RX_CPU_BASE; 3919 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; 3920 } else { 3921 cpu_base = TX_CPU_BASE; 3922 cpu_scratch_base = TX_CPU_SCRATCH_BASE; 3923 cpu_scratch_size = TX_CPU_SCRATCH_SIZE; 3924 } 3925 3926 err = tg3_load_firmware_cpu(tp, cpu_base, 3927 cpu_scratch_base, cpu_scratch_size, 3928 fw_hdr); 3929 if (err) 3930 return err; 3931 3932 /* Now startup the cpu. */ 3933 err = tg3_pause_cpu_and_set_pc(tp, cpu_base, 3934 be32_to_cpu(fw_hdr->base_addr)); 3935 if (err) { 3936 netdev_err(tp->dev, 3937 "%s fails to set CPU PC, is %08x should be %08x\n", 3938 __func__, tr32(cpu_base + CPU_PC), 3939 be32_to_cpu(fw_hdr->base_addr)); 3940 return -ENODEV; 3941 } 3942 3943 tg3_resume_cpu(tp, cpu_base); 3944 return 0; 3945 } 3946 3947 /* tp->lock is held. */ 3948 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index) 3949 { 3950 u32 addr_high, addr_low; 3951 3952 addr_high = ((mac_addr[0] << 8) | mac_addr[1]); 3953 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) | 3954 (mac_addr[4] << 8) | mac_addr[5]); 3955 3956 if (index < 4) { 3957 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high); 3958 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low); 3959 } else { 3960 index -= 4; 3961 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high); 3962 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low); 3963 } 3964 } 3965 3966 /* tp->lock is held. */ 3967 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1) 3968 { 3969 u32 addr_high; 3970 int i; 3971 3972 for (i = 0; i < 4; i++) { 3973 if (i == 1 && skip_mac_1) 3974 continue; 3975 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3976 } 3977 3978 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 3979 tg3_asic_rev(tp) == ASIC_REV_5704) { 3980 for (i = 4; i < 16; i++) 3981 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3982 } 3983 3984 addr_high = (tp->dev->dev_addr[0] + 3985 tp->dev->dev_addr[1] + 3986 tp->dev->dev_addr[2] + 3987 tp->dev->dev_addr[3] + 3988 tp->dev->dev_addr[4] + 3989 tp->dev->dev_addr[5]) & 3990 TX_BACKOFF_SEED_MASK; 3991 tw32(MAC_TX_BACKOFF_SEED, addr_high); 3992 } 3993 3994 static void tg3_enable_register_access(struct tg3 *tp) 3995 { 3996 /* 3997 * Make sure register accesses (indirect or otherwise) will function 3998 * correctly. 3999 */ 4000 pci_write_config_dword(tp->pdev, 4001 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); 4002 } 4003 4004 static int tg3_power_up(struct tg3 *tp) 4005 { 4006 int err; 4007 4008 tg3_enable_register_access(tp); 4009 4010 err = pci_set_power_state(tp->pdev, PCI_D0); 4011 if (!err) { 4012 /* Switch out of Vaux if it is a NIC */ 4013 tg3_pwrsrc_switch_to_vmain(tp); 4014 } else { 4015 netdev_err(tp->dev, "Transition to D0 failed\n"); 4016 } 4017 4018 return err; 4019 } 4020 4021 static int tg3_setup_phy(struct tg3 *, bool); 4022 4023 static int tg3_power_down_prepare(struct tg3 *tp) 4024 { 4025 u32 misc_host_ctrl; 4026 bool device_should_wake, do_low_power; 4027 4028 tg3_enable_register_access(tp); 4029 4030 /* Restore the CLKREQ setting. */ 4031 if (tg3_flag(tp, CLKREQ_BUG)) 4032 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 4033 PCI_EXP_LNKCTL_CLKREQ_EN); 4034 4035 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 4036 tw32(TG3PCI_MISC_HOST_CTRL, 4037 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 4038 4039 device_should_wake = device_may_wakeup(&tp->pdev->dev) && 4040 tg3_flag(tp, WOL_ENABLE); 4041 4042 if (tg3_flag(tp, USE_PHYLIB)) { 4043 do_low_power = false; 4044 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && 4045 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4046 struct phy_device *phydev; 4047 u32 phyid, advertising; 4048 4049 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 4050 4051 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4052 4053 tp->link_config.speed = phydev->speed; 4054 tp->link_config.duplex = phydev->duplex; 4055 tp->link_config.autoneg = phydev->autoneg; 4056 tp->link_config.advertising = phydev->advertising; 4057 4058 advertising = ADVERTISED_TP | 4059 ADVERTISED_Pause | 4060 ADVERTISED_Autoneg | 4061 ADVERTISED_10baseT_Half; 4062 4063 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { 4064 if (tg3_flag(tp, WOL_SPEED_100MB)) 4065 advertising |= 4066 ADVERTISED_100baseT_Half | 4067 ADVERTISED_100baseT_Full | 4068 ADVERTISED_10baseT_Full; 4069 else 4070 advertising |= ADVERTISED_10baseT_Full; 4071 } 4072 4073 phydev->advertising = advertising; 4074 4075 phy_start_aneg(phydev); 4076 4077 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; 4078 if (phyid != PHY_ID_BCMAC131) { 4079 phyid &= PHY_BCM_OUI_MASK; 4080 if (phyid == PHY_BCM_OUI_1 || 4081 phyid == PHY_BCM_OUI_2 || 4082 phyid == PHY_BCM_OUI_3) 4083 do_low_power = true; 4084 } 4085 } 4086 } else { 4087 do_low_power = true; 4088 4089 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) 4090 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4091 4092 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 4093 tg3_setup_phy(tp, false); 4094 } 4095 4096 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 4097 u32 val; 4098 4099 val = tr32(GRC_VCPU_EXT_CTRL); 4100 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); 4101 } else if (!tg3_flag(tp, ENABLE_ASF)) { 4102 int i; 4103 u32 val; 4104 4105 for (i = 0; i < 200; i++) { 4106 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); 4107 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 4108 break; 4109 msleep(1); 4110 } 4111 } 4112 if (tg3_flag(tp, WOL_CAP)) 4113 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | 4114 WOL_DRV_STATE_SHUTDOWN | 4115 WOL_DRV_WOL | 4116 WOL_SET_MAGIC_PKT); 4117 4118 if (device_should_wake) { 4119 u32 mac_mode; 4120 4121 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 4122 if (do_low_power && 4123 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 4124 tg3_phy_auxctl_write(tp, 4125 MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 4126 MII_TG3_AUXCTL_PCTL_WOL_EN | 4127 MII_TG3_AUXCTL_PCTL_100TX_LPWR | 4128 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); 4129 udelay(40); 4130 } 4131 4132 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4133 mac_mode = MAC_MODE_PORT_MODE_GMII; 4134 else if (tp->phy_flags & 4135 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) { 4136 if (tp->link_config.active_speed == SPEED_1000) 4137 mac_mode = MAC_MODE_PORT_MODE_GMII; 4138 else 4139 mac_mode = MAC_MODE_PORT_MODE_MII; 4140 } else 4141 mac_mode = MAC_MODE_PORT_MODE_MII; 4142 4143 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; 4144 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 4145 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? 4146 SPEED_100 : SPEED_10; 4147 if (tg3_5700_link_polarity(tp, speed)) 4148 mac_mode |= MAC_MODE_LINK_POLARITY; 4149 else 4150 mac_mode &= ~MAC_MODE_LINK_POLARITY; 4151 } 4152 } else { 4153 mac_mode = MAC_MODE_PORT_MODE_TBI; 4154 } 4155 4156 if (!tg3_flag(tp, 5750_PLUS)) 4157 tw32(MAC_LED_CTRL, tp->led_ctrl); 4158 4159 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; 4160 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && 4161 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) 4162 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; 4163 4164 if (tg3_flag(tp, ENABLE_APE)) 4165 mac_mode |= MAC_MODE_APE_TX_EN | 4166 MAC_MODE_APE_RX_EN | 4167 MAC_MODE_TDE_ENABLE; 4168 4169 tw32_f(MAC_MODE, mac_mode); 4170 udelay(100); 4171 4172 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); 4173 udelay(10); 4174 } 4175 4176 if (!tg3_flag(tp, WOL_SPEED_100MB) && 4177 (tg3_asic_rev(tp) == ASIC_REV_5700 || 4178 tg3_asic_rev(tp) == ASIC_REV_5701)) { 4179 u32 base_val; 4180 4181 base_val = tp->pci_clock_ctrl; 4182 base_val |= (CLOCK_CTRL_RXCLK_DISABLE | 4183 CLOCK_CTRL_TXCLK_DISABLE); 4184 4185 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | 4186 CLOCK_CTRL_PWRDOWN_PLL133, 40); 4187 } else if (tg3_flag(tp, 5780_CLASS) || 4188 tg3_flag(tp, CPMU_PRESENT) || 4189 tg3_asic_rev(tp) == ASIC_REV_5906) { 4190 /* do nothing */ 4191 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { 4192 u32 newbits1, newbits2; 4193 4194 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4195 tg3_asic_rev(tp) == ASIC_REV_5701) { 4196 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | 4197 CLOCK_CTRL_TXCLK_DISABLE | 4198 CLOCK_CTRL_ALTCLK); 4199 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4200 } else if (tg3_flag(tp, 5705_PLUS)) { 4201 newbits1 = CLOCK_CTRL_625_CORE; 4202 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; 4203 } else { 4204 newbits1 = CLOCK_CTRL_ALTCLK; 4205 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4206 } 4207 4208 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, 4209 40); 4210 4211 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, 4212 40); 4213 4214 if (!tg3_flag(tp, 5705_PLUS)) { 4215 u32 newbits3; 4216 4217 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4218 tg3_asic_rev(tp) == ASIC_REV_5701) { 4219 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | 4220 CLOCK_CTRL_TXCLK_DISABLE | 4221 CLOCK_CTRL_44MHZ_CORE); 4222 } else { 4223 newbits3 = CLOCK_CTRL_44MHZ_CORE; 4224 } 4225 4226 tw32_wait_f(TG3PCI_CLOCK_CTRL, 4227 tp->pci_clock_ctrl | newbits3, 40); 4228 } 4229 } 4230 4231 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) 4232 tg3_power_down_phy(tp, do_low_power); 4233 4234 tg3_frob_aux_power(tp, true); 4235 4236 /* Workaround for unstable PLL clock */ 4237 if ((!tg3_flag(tp, IS_SSB_CORE)) && 4238 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) || 4239 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) { 4240 u32 val = tr32(0x7d00); 4241 4242 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); 4243 tw32(0x7d00, val); 4244 if (!tg3_flag(tp, ENABLE_ASF)) { 4245 int err; 4246 4247 err = tg3_nvram_lock(tp); 4248 tg3_halt_cpu(tp, RX_CPU_BASE); 4249 if (!err) 4250 tg3_nvram_unlock(tp); 4251 } 4252 } 4253 4254 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 4255 4256 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); 4257 4258 return 0; 4259 } 4260 4261 static void tg3_power_down(struct tg3 *tp) 4262 { 4263 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); 4264 pci_set_power_state(tp->pdev, PCI_D3hot); 4265 } 4266 4267 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) 4268 { 4269 switch (val & MII_TG3_AUX_STAT_SPDMASK) { 4270 case MII_TG3_AUX_STAT_10HALF: 4271 *speed = SPEED_10; 4272 *duplex = DUPLEX_HALF; 4273 break; 4274 4275 case MII_TG3_AUX_STAT_10FULL: 4276 *speed = SPEED_10; 4277 *duplex = DUPLEX_FULL; 4278 break; 4279 4280 case MII_TG3_AUX_STAT_100HALF: 4281 *speed = SPEED_100; 4282 *duplex = DUPLEX_HALF; 4283 break; 4284 4285 case MII_TG3_AUX_STAT_100FULL: 4286 *speed = SPEED_100; 4287 *duplex = DUPLEX_FULL; 4288 break; 4289 4290 case MII_TG3_AUX_STAT_1000HALF: 4291 *speed = SPEED_1000; 4292 *duplex = DUPLEX_HALF; 4293 break; 4294 4295 case MII_TG3_AUX_STAT_1000FULL: 4296 *speed = SPEED_1000; 4297 *duplex = DUPLEX_FULL; 4298 break; 4299 4300 default: 4301 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4302 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : 4303 SPEED_10; 4304 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : 4305 DUPLEX_HALF; 4306 break; 4307 } 4308 *speed = SPEED_UNKNOWN; 4309 *duplex = DUPLEX_UNKNOWN; 4310 break; 4311 } 4312 } 4313 4314 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) 4315 { 4316 int err = 0; 4317 u32 val, new_adv; 4318 4319 new_adv = ADVERTISE_CSMA; 4320 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; 4321 new_adv |= mii_advertise_flowctrl(flowctrl); 4322 4323 err = tg3_writephy(tp, MII_ADVERTISE, new_adv); 4324 if (err) 4325 goto done; 4326 4327 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4328 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); 4329 4330 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4331 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) 4332 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4333 4334 err = tg3_writephy(tp, MII_CTRL1000, new_adv); 4335 if (err) 4336 goto done; 4337 } 4338 4339 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4340 goto done; 4341 4342 tw32(TG3_CPMU_EEE_MODE, 4343 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); 4344 4345 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 4346 if (!err) { 4347 u32 err2; 4348 4349 val = 0; 4350 /* Advertise 100-BaseTX EEE ability */ 4351 if (advertise & ADVERTISED_100baseT_Full) 4352 val |= MDIO_AN_EEE_ADV_100TX; 4353 /* Advertise 1000-BaseT EEE ability */ 4354 if (advertise & ADVERTISED_1000baseT_Full) 4355 val |= MDIO_AN_EEE_ADV_1000T; 4356 4357 if (!tp->eee.eee_enabled) { 4358 val = 0; 4359 tp->eee.advertised = 0; 4360 } else { 4361 tp->eee.advertised = advertise & 4362 (ADVERTISED_100baseT_Full | 4363 ADVERTISED_1000baseT_Full); 4364 } 4365 4366 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); 4367 if (err) 4368 val = 0; 4369 4370 switch (tg3_asic_rev(tp)) { 4371 case ASIC_REV_5717: 4372 case ASIC_REV_57765: 4373 case ASIC_REV_57766: 4374 case ASIC_REV_5719: 4375 /* If we advertised any eee advertisements above... */ 4376 if (val) 4377 val = MII_TG3_DSP_TAP26_ALNOKO | 4378 MII_TG3_DSP_TAP26_RMRXSTO | 4379 MII_TG3_DSP_TAP26_OPCSINPT; 4380 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 4381 /* Fall through */ 4382 case ASIC_REV_5720: 4383 case ASIC_REV_5762: 4384 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) 4385 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | 4386 MII_TG3_DSP_CH34TP2_HIBW01); 4387 } 4388 4389 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false); 4390 if (!err) 4391 err = err2; 4392 } 4393 4394 done: 4395 return err; 4396 } 4397 4398 static void tg3_phy_copper_begin(struct tg3 *tp) 4399 { 4400 if (tp->link_config.autoneg == AUTONEG_ENABLE || 4401 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4402 u32 adv, fc; 4403 4404 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4405 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4406 adv = ADVERTISED_10baseT_Half | 4407 ADVERTISED_10baseT_Full; 4408 if (tg3_flag(tp, WOL_SPEED_100MB)) 4409 adv |= ADVERTISED_100baseT_Half | 4410 ADVERTISED_100baseT_Full; 4411 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) { 4412 if (!(tp->phy_flags & 4413 TG3_PHYFLG_DISABLE_1G_HD_ADV)) 4414 adv |= ADVERTISED_1000baseT_Half; 4415 adv |= ADVERTISED_1000baseT_Full; 4416 } 4417 4418 fc = FLOW_CTRL_TX | FLOW_CTRL_RX; 4419 } else { 4420 adv = tp->link_config.advertising; 4421 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 4422 adv &= ~(ADVERTISED_1000baseT_Half | 4423 ADVERTISED_1000baseT_Full); 4424 4425 fc = tp->link_config.flowctrl; 4426 } 4427 4428 tg3_phy_autoneg_cfg(tp, adv, fc); 4429 4430 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4431 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4432 /* Normally during power down we want to autonegotiate 4433 * the lowest possible speed for WOL. However, to avoid 4434 * link flap, we leave it untouched. 4435 */ 4436 return; 4437 } 4438 4439 tg3_writephy(tp, MII_BMCR, 4440 BMCR_ANENABLE | BMCR_ANRESTART); 4441 } else { 4442 int i; 4443 u32 bmcr, orig_bmcr; 4444 4445 tp->link_config.active_speed = tp->link_config.speed; 4446 tp->link_config.active_duplex = tp->link_config.duplex; 4447 4448 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 4449 /* With autoneg disabled, 5715 only links up when the 4450 * advertisement register has the configured speed 4451 * enabled. 4452 */ 4453 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); 4454 } 4455 4456 bmcr = 0; 4457 switch (tp->link_config.speed) { 4458 default: 4459 case SPEED_10: 4460 break; 4461 4462 case SPEED_100: 4463 bmcr |= BMCR_SPEED100; 4464 break; 4465 4466 case SPEED_1000: 4467 bmcr |= BMCR_SPEED1000; 4468 break; 4469 } 4470 4471 if (tp->link_config.duplex == DUPLEX_FULL) 4472 bmcr |= BMCR_FULLDPLX; 4473 4474 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && 4475 (bmcr != orig_bmcr)) { 4476 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); 4477 for (i = 0; i < 1500; i++) { 4478 u32 tmp; 4479 4480 udelay(10); 4481 if (tg3_readphy(tp, MII_BMSR, &tmp) || 4482 tg3_readphy(tp, MII_BMSR, &tmp)) 4483 continue; 4484 if (!(tmp & BMSR_LSTATUS)) { 4485 udelay(40); 4486 break; 4487 } 4488 } 4489 tg3_writephy(tp, MII_BMCR, bmcr); 4490 udelay(40); 4491 } 4492 } 4493 } 4494 4495 static int tg3_phy_pull_config(struct tg3 *tp) 4496 { 4497 int err; 4498 u32 val; 4499 4500 err = tg3_readphy(tp, MII_BMCR, &val); 4501 if (err) 4502 goto done; 4503 4504 if (!(val & BMCR_ANENABLE)) { 4505 tp->link_config.autoneg = AUTONEG_DISABLE; 4506 tp->link_config.advertising = 0; 4507 tg3_flag_clear(tp, PAUSE_AUTONEG); 4508 4509 err = -EIO; 4510 4511 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) { 4512 case 0: 4513 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4514 goto done; 4515 4516 tp->link_config.speed = SPEED_10; 4517 break; 4518 case BMCR_SPEED100: 4519 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4520 goto done; 4521 4522 tp->link_config.speed = SPEED_100; 4523 break; 4524 case BMCR_SPEED1000: 4525 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4526 tp->link_config.speed = SPEED_1000; 4527 break; 4528 } 4529 /* Fall through */ 4530 default: 4531 goto done; 4532 } 4533 4534 if (val & BMCR_FULLDPLX) 4535 tp->link_config.duplex = DUPLEX_FULL; 4536 else 4537 tp->link_config.duplex = DUPLEX_HALF; 4538 4539 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; 4540 4541 err = 0; 4542 goto done; 4543 } 4544 4545 tp->link_config.autoneg = AUTONEG_ENABLE; 4546 tp->link_config.advertising = ADVERTISED_Autoneg; 4547 tg3_flag_set(tp, PAUSE_AUTONEG); 4548 4549 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4550 u32 adv; 4551 4552 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4553 if (err) 4554 goto done; 4555 4556 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL); 4557 tp->link_config.advertising |= adv | ADVERTISED_TP; 4558 4559 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val); 4560 } else { 4561 tp->link_config.advertising |= ADVERTISED_FIBRE; 4562 } 4563 4564 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4565 u32 adv; 4566 4567 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4568 err = tg3_readphy(tp, MII_CTRL1000, &val); 4569 if (err) 4570 goto done; 4571 4572 adv = mii_ctrl1000_to_ethtool_adv_t(val); 4573 } else { 4574 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4575 if (err) 4576 goto done; 4577 4578 adv = tg3_decode_flowctrl_1000X(val); 4579 tp->link_config.flowctrl = adv; 4580 4581 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL); 4582 adv = mii_adv_to_ethtool_adv_x(val); 4583 } 4584 4585 tp->link_config.advertising |= adv; 4586 } 4587 4588 done: 4589 return err; 4590 } 4591 4592 static int tg3_init_5401phy_dsp(struct tg3 *tp) 4593 { 4594 int err; 4595 4596 /* Turn off tap power management. */ 4597 /* Set Extended packet length bit */ 4598 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 4599 4600 err |= tg3_phydsp_write(tp, 0x0012, 0x1804); 4601 err |= tg3_phydsp_write(tp, 0x0013, 0x1204); 4602 err |= tg3_phydsp_write(tp, 0x8006, 0x0132); 4603 err |= tg3_phydsp_write(tp, 0x8006, 0x0232); 4604 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); 4605 4606 udelay(40); 4607 4608 return err; 4609 } 4610 4611 static bool tg3_phy_eee_config_ok(struct tg3 *tp) 4612 { 4613 struct ethtool_eee eee; 4614 4615 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4616 return true; 4617 4618 tg3_eee_pull_config(tp, &eee); 4619 4620 if (tp->eee.eee_enabled) { 4621 if (tp->eee.advertised != eee.advertised || 4622 tp->eee.tx_lpi_timer != eee.tx_lpi_timer || 4623 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) 4624 return false; 4625 } else { 4626 /* EEE is disabled but we're advertising */ 4627 if (eee.advertised) 4628 return false; 4629 } 4630 4631 return true; 4632 } 4633 4634 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) 4635 { 4636 u32 advmsk, tgtadv, advertising; 4637 4638 advertising = tp->link_config.advertising; 4639 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; 4640 4641 advmsk = ADVERTISE_ALL; 4642 if (tp->link_config.active_duplex == DUPLEX_FULL) { 4643 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); 4644 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4645 } 4646 4647 if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) 4648 return false; 4649 4650 if ((*lcladv & advmsk) != tgtadv) 4651 return false; 4652 4653 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4654 u32 tg3_ctrl; 4655 4656 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); 4657 4658 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) 4659 return false; 4660 4661 if (tgtadv && 4662 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4663 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) { 4664 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4665 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL | 4666 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 4667 } else { 4668 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); 4669 } 4670 4671 if (tg3_ctrl != tgtadv) 4672 return false; 4673 } 4674 4675 return true; 4676 } 4677 4678 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) 4679 { 4680 u32 lpeth = 0; 4681 4682 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4683 u32 val; 4684 4685 if (tg3_readphy(tp, MII_STAT1000, &val)) 4686 return false; 4687 4688 lpeth = mii_stat1000_to_ethtool_lpa_t(val); 4689 } 4690 4691 if (tg3_readphy(tp, MII_LPA, rmtadv)) 4692 return false; 4693 4694 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); 4695 tp->link_config.rmt_adv = lpeth; 4696 4697 return true; 4698 } 4699 4700 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up) 4701 { 4702 if (curr_link_up != tp->link_up) { 4703 if (curr_link_up) { 4704 netif_carrier_on(tp->dev); 4705 } else { 4706 netif_carrier_off(tp->dev); 4707 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4708 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 4709 } 4710 4711 tg3_link_report(tp); 4712 return true; 4713 } 4714 4715 return false; 4716 } 4717 4718 static void tg3_clear_mac_status(struct tg3 *tp) 4719 { 4720 tw32(MAC_EVENT, 0); 4721 4722 tw32_f(MAC_STATUS, 4723 MAC_STATUS_SYNC_CHANGED | 4724 MAC_STATUS_CFG_CHANGED | 4725 MAC_STATUS_MI_COMPLETION | 4726 MAC_STATUS_LNKSTATE_CHANGED); 4727 udelay(40); 4728 } 4729 4730 static void tg3_setup_eee(struct tg3 *tp) 4731 { 4732 u32 val; 4733 4734 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | 4735 TG3_CPMU_EEE_LNKIDL_UART_IDL; 4736 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 4737 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; 4738 4739 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); 4740 4741 tw32_f(TG3_CPMU_EEE_CTRL, 4742 TG3_CPMU_EEE_CTRL_EXIT_20_1_US); 4743 4744 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | 4745 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) | 4746 TG3_CPMU_EEEMD_LPI_IN_RX | 4747 TG3_CPMU_EEEMD_EEE_ENABLE; 4748 4749 if (tg3_asic_rev(tp) != ASIC_REV_5717) 4750 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; 4751 4752 if (tg3_flag(tp, ENABLE_APE)) 4753 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; 4754 4755 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0); 4756 4757 tw32_f(TG3_CPMU_EEE_DBTMR1, 4758 TG3_CPMU_DBTMR1_PCIEXIT_2047US | 4759 (tp->eee.tx_lpi_timer & 0xffff)); 4760 4761 tw32_f(TG3_CPMU_EEE_DBTMR2, 4762 TG3_CPMU_DBTMR2_APE_TX_2047US | 4763 TG3_CPMU_DBTMR2_TXIDXEQ_2047US); 4764 } 4765 4766 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) 4767 { 4768 bool current_link_up; 4769 u32 bmsr, val; 4770 u32 lcl_adv, rmt_adv; 4771 u16 current_speed; 4772 u8 current_duplex; 4773 int i, err; 4774 4775 tg3_clear_mac_status(tp); 4776 4777 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 4778 tw32_f(MAC_MI_MODE, 4779 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 4780 udelay(80); 4781 } 4782 4783 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); 4784 4785 /* Some third-party PHYs need to be reset on link going 4786 * down. 4787 */ 4788 if ((tg3_asic_rev(tp) == ASIC_REV_5703 || 4789 tg3_asic_rev(tp) == ASIC_REV_5704 || 4790 tg3_asic_rev(tp) == ASIC_REV_5705) && 4791 tp->link_up) { 4792 tg3_readphy(tp, MII_BMSR, &bmsr); 4793 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4794 !(bmsr & BMSR_LSTATUS)) 4795 force_reset = true; 4796 } 4797 if (force_reset) 4798 tg3_phy_reset(tp); 4799 4800 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 4801 tg3_readphy(tp, MII_BMSR, &bmsr); 4802 if (tg3_readphy(tp, MII_BMSR, &bmsr) || 4803 !tg3_flag(tp, INIT_COMPLETE)) 4804 bmsr = 0; 4805 4806 if (!(bmsr & BMSR_LSTATUS)) { 4807 err = tg3_init_5401phy_dsp(tp); 4808 if (err) 4809 return err; 4810 4811 tg3_readphy(tp, MII_BMSR, &bmsr); 4812 for (i = 0; i < 1000; i++) { 4813 udelay(10); 4814 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4815 (bmsr & BMSR_LSTATUS)) { 4816 udelay(40); 4817 break; 4818 } 4819 } 4820 4821 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == 4822 TG3_PHY_REV_BCM5401_B0 && 4823 !(bmsr & BMSR_LSTATUS) && 4824 tp->link_config.active_speed == SPEED_1000) { 4825 err = tg3_phy_reset(tp); 4826 if (!err) 4827 err = tg3_init_5401phy_dsp(tp); 4828 if (err) 4829 return err; 4830 } 4831 } 4832 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4833 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) { 4834 /* 5701 {A0,B0} CRC bug workaround */ 4835 tg3_writephy(tp, 0x15, 0x0a75); 4836 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4837 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 4838 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4839 } 4840 4841 /* Clear pending interrupts... */ 4842 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4843 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4844 4845 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) 4846 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); 4847 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) 4848 tg3_writephy(tp, MII_TG3_IMASK, ~0); 4849 4850 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4851 tg3_asic_rev(tp) == ASIC_REV_5701) { 4852 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) 4853 tg3_writephy(tp, MII_TG3_EXT_CTRL, 4854 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 4855 else 4856 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); 4857 } 4858 4859 current_link_up = false; 4860 current_speed = SPEED_UNKNOWN; 4861 current_duplex = DUPLEX_UNKNOWN; 4862 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; 4863 tp->link_config.rmt_adv = 0; 4864 4865 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { 4866 err = tg3_phy_auxctl_read(tp, 4867 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4868 &val); 4869 if (!err && !(val & (1 << 10))) { 4870 tg3_phy_auxctl_write(tp, 4871 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4872 val | (1 << 10)); 4873 goto relink; 4874 } 4875 } 4876 4877 bmsr = 0; 4878 for (i = 0; i < 100; i++) { 4879 tg3_readphy(tp, MII_BMSR, &bmsr); 4880 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4881 (bmsr & BMSR_LSTATUS)) 4882 break; 4883 udelay(40); 4884 } 4885 4886 if (bmsr & BMSR_LSTATUS) { 4887 u32 aux_stat, bmcr; 4888 4889 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); 4890 for (i = 0; i < 2000; i++) { 4891 udelay(10); 4892 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && 4893 aux_stat) 4894 break; 4895 } 4896 4897 tg3_aux_stat_to_speed_duplex(tp, aux_stat, 4898 ¤t_speed, 4899 ¤t_duplex); 4900 4901 bmcr = 0; 4902 for (i = 0; i < 200; i++) { 4903 tg3_readphy(tp, MII_BMCR, &bmcr); 4904 if (tg3_readphy(tp, MII_BMCR, &bmcr)) 4905 continue; 4906 if (bmcr && bmcr != 0x7fff) 4907 break; 4908 udelay(10); 4909 } 4910 4911 lcl_adv = 0; 4912 rmt_adv = 0; 4913 4914 tp->link_config.active_speed = current_speed; 4915 tp->link_config.active_duplex = current_duplex; 4916 4917 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 4918 bool eee_config_ok = tg3_phy_eee_config_ok(tp); 4919 4920 if ((bmcr & BMCR_ANENABLE) && 4921 eee_config_ok && 4922 tg3_phy_copper_an_config_ok(tp, &lcl_adv) && 4923 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) 4924 current_link_up = true; 4925 4926 /* EEE settings changes take effect only after a phy 4927 * reset. If we have skipped a reset due to Link Flap 4928 * Avoidance being enabled, do it now. 4929 */ 4930 if (!eee_config_ok && 4931 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 4932 !force_reset) { 4933 tg3_setup_eee(tp); 4934 tg3_phy_reset(tp); 4935 } 4936 } else { 4937 if (!(bmcr & BMCR_ANENABLE) && 4938 tp->link_config.speed == current_speed && 4939 tp->link_config.duplex == current_duplex) { 4940 current_link_up = true; 4941 } 4942 } 4943 4944 if (current_link_up && 4945 tp->link_config.active_duplex == DUPLEX_FULL) { 4946 u32 reg, bit; 4947 4948 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4949 reg = MII_TG3_FET_GEN_STAT; 4950 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; 4951 } else { 4952 reg = MII_TG3_EXT_STAT; 4953 bit = MII_TG3_EXT_STAT_MDIX; 4954 } 4955 4956 if (!tg3_readphy(tp, reg, &val) && (val & bit)) 4957 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; 4958 4959 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 4960 } 4961 } 4962 4963 relink: 4964 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4965 tg3_phy_copper_begin(tp); 4966 4967 if (tg3_flag(tp, ROBOSWITCH)) { 4968 current_link_up = true; 4969 /* FIXME: when BCM5325 switch is used use 100 MBit/s */ 4970 current_speed = SPEED_1000; 4971 current_duplex = DUPLEX_FULL; 4972 tp->link_config.active_speed = current_speed; 4973 tp->link_config.active_duplex = current_duplex; 4974 } 4975 4976 tg3_readphy(tp, MII_BMSR, &bmsr); 4977 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || 4978 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 4979 current_link_up = true; 4980 } 4981 4982 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 4983 if (current_link_up) { 4984 if (tp->link_config.active_speed == SPEED_100 || 4985 tp->link_config.active_speed == SPEED_10) 4986 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 4987 else 4988 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 4989 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) 4990 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 4991 else 4992 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 4993 4994 /* In order for the 5750 core in BCM4785 chip to work properly 4995 * in RGMII mode, the Led Control Register must be set up. 4996 */ 4997 if (tg3_flag(tp, RGMII_MODE)) { 4998 u32 led_ctrl = tr32(MAC_LED_CTRL); 4999 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON); 5000 5001 if (tp->link_config.active_speed == SPEED_10) 5002 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE; 5003 else if (tp->link_config.active_speed == SPEED_100) 5004 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5005 LED_CTRL_100MBPS_ON); 5006 else if (tp->link_config.active_speed == SPEED_1000) 5007 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5008 LED_CTRL_1000MBPS_ON); 5009 5010 tw32(MAC_LED_CTRL, led_ctrl); 5011 udelay(40); 5012 } 5013 5014 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5015 if (tp->link_config.active_duplex == DUPLEX_HALF) 5016 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5017 5018 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 5019 if (current_link_up && 5020 tg3_5700_link_polarity(tp, tp->link_config.active_speed)) 5021 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 5022 else 5023 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 5024 } 5025 5026 /* ??? Without this setting Netgear GA302T PHY does not 5027 * ??? send/receive packets... 5028 */ 5029 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && 5030 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) { 5031 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; 5032 tw32_f(MAC_MI_MODE, tp->mi_mode); 5033 udelay(80); 5034 } 5035 5036 tw32_f(MAC_MODE, tp->mac_mode); 5037 udelay(40); 5038 5039 tg3_phy_eee_adjust(tp, current_link_up); 5040 5041 if (tg3_flag(tp, USE_LINKCHG_REG)) { 5042 /* Polled via timer. */ 5043 tw32_f(MAC_EVENT, 0); 5044 } else { 5045 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5046 } 5047 udelay(40); 5048 5049 if (tg3_asic_rev(tp) == ASIC_REV_5700 && 5050 current_link_up && 5051 tp->link_config.active_speed == SPEED_1000 && 5052 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { 5053 udelay(120); 5054 tw32_f(MAC_STATUS, 5055 (MAC_STATUS_SYNC_CHANGED | 5056 MAC_STATUS_CFG_CHANGED)); 5057 udelay(40); 5058 tg3_write_mem(tp, 5059 NIC_SRAM_FIRMWARE_MBOX, 5060 NIC_SRAM_FIRMWARE_MBOX_MAGIC2); 5061 } 5062 5063 /* Prevent send BD corruption. */ 5064 if (tg3_flag(tp, CLKREQ_BUG)) { 5065 if (tp->link_config.active_speed == SPEED_100 || 5066 tp->link_config.active_speed == SPEED_10) 5067 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL, 5068 PCI_EXP_LNKCTL_CLKREQ_EN); 5069 else 5070 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 5071 PCI_EXP_LNKCTL_CLKREQ_EN); 5072 } 5073 5074 tg3_test_and_report_link_chg(tp, current_link_up); 5075 5076 return 0; 5077 } 5078 5079 struct tg3_fiber_aneginfo { 5080 int state; 5081 #define ANEG_STATE_UNKNOWN 0 5082 #define ANEG_STATE_AN_ENABLE 1 5083 #define ANEG_STATE_RESTART_INIT 2 5084 #define ANEG_STATE_RESTART 3 5085 #define ANEG_STATE_DISABLE_LINK_OK 4 5086 #define ANEG_STATE_ABILITY_DETECT_INIT 5 5087 #define ANEG_STATE_ABILITY_DETECT 6 5088 #define ANEG_STATE_ACK_DETECT_INIT 7 5089 #define ANEG_STATE_ACK_DETECT 8 5090 #define ANEG_STATE_COMPLETE_ACK_INIT 9 5091 #define ANEG_STATE_COMPLETE_ACK 10 5092 #define ANEG_STATE_IDLE_DETECT_INIT 11 5093 #define ANEG_STATE_IDLE_DETECT 12 5094 #define ANEG_STATE_LINK_OK 13 5095 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 5096 #define ANEG_STATE_NEXT_PAGE_WAIT 15 5097 5098 u32 flags; 5099 #define MR_AN_ENABLE 0x00000001 5100 #define MR_RESTART_AN 0x00000002 5101 #define MR_AN_COMPLETE 0x00000004 5102 #define MR_PAGE_RX 0x00000008 5103 #define MR_NP_LOADED 0x00000010 5104 #define MR_TOGGLE_TX 0x00000020 5105 #define MR_LP_ADV_FULL_DUPLEX 0x00000040 5106 #define MR_LP_ADV_HALF_DUPLEX 0x00000080 5107 #define MR_LP_ADV_SYM_PAUSE 0x00000100 5108 #define MR_LP_ADV_ASYM_PAUSE 0x00000200 5109 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400 5110 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800 5111 #define MR_LP_ADV_NEXT_PAGE 0x00001000 5112 #define MR_TOGGLE_RX 0x00002000 5113 #define MR_NP_RX 0x00004000 5114 5115 #define MR_LINK_OK 0x80000000 5116 5117 unsigned long link_time, cur_time; 5118 5119 u32 ability_match_cfg; 5120 int ability_match_count; 5121 5122 char ability_match, idle_match, ack_match; 5123 5124 u32 txconfig, rxconfig; 5125 #define ANEG_CFG_NP 0x00000080 5126 #define ANEG_CFG_ACK 0x00000040 5127 #define ANEG_CFG_RF2 0x00000020 5128 #define ANEG_CFG_RF1 0x00000010 5129 #define ANEG_CFG_PS2 0x00000001 5130 #define ANEG_CFG_PS1 0x00008000 5131 #define ANEG_CFG_HD 0x00004000 5132 #define ANEG_CFG_FD 0x00002000 5133 #define ANEG_CFG_INVAL 0x00001f06 5134 5135 }; 5136 #define ANEG_OK 0 5137 #define ANEG_DONE 1 5138 #define ANEG_TIMER_ENAB 2 5139 #define ANEG_FAILED -1 5140 5141 #define ANEG_STATE_SETTLE_TIME 10000 5142 5143 static int tg3_fiber_aneg_smachine(struct tg3 *tp, 5144 struct tg3_fiber_aneginfo *ap) 5145 { 5146 u16 flowctrl; 5147 unsigned long delta; 5148 u32 rx_cfg_reg; 5149 int ret; 5150 5151 if (ap->state == ANEG_STATE_UNKNOWN) { 5152 ap->rxconfig = 0; 5153 ap->link_time = 0; 5154 ap->cur_time = 0; 5155 ap->ability_match_cfg = 0; 5156 ap->ability_match_count = 0; 5157 ap->ability_match = 0; 5158 ap->idle_match = 0; 5159 ap->ack_match = 0; 5160 } 5161 ap->cur_time++; 5162 5163 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { 5164 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); 5165 5166 if (rx_cfg_reg != ap->ability_match_cfg) { 5167 ap->ability_match_cfg = rx_cfg_reg; 5168 ap->ability_match = 0; 5169 ap->ability_match_count = 0; 5170 } else { 5171 if (++ap->ability_match_count > 1) { 5172 ap->ability_match = 1; 5173 ap->ability_match_cfg = rx_cfg_reg; 5174 } 5175 } 5176 if (rx_cfg_reg & ANEG_CFG_ACK) 5177 ap->ack_match = 1; 5178 else 5179 ap->ack_match = 0; 5180 5181 ap->idle_match = 0; 5182 } else { 5183 ap->idle_match = 1; 5184 ap->ability_match_cfg = 0; 5185 ap->ability_match_count = 0; 5186 ap->ability_match = 0; 5187 ap->ack_match = 0; 5188 5189 rx_cfg_reg = 0; 5190 } 5191 5192 ap->rxconfig = rx_cfg_reg; 5193 ret = ANEG_OK; 5194 5195 switch (ap->state) { 5196 case ANEG_STATE_UNKNOWN: 5197 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) 5198 ap->state = ANEG_STATE_AN_ENABLE; 5199 5200 /* fallthru */ 5201 case ANEG_STATE_AN_ENABLE: 5202 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); 5203 if (ap->flags & MR_AN_ENABLE) { 5204 ap->link_time = 0; 5205 ap->cur_time = 0; 5206 ap->ability_match_cfg = 0; 5207 ap->ability_match_count = 0; 5208 ap->ability_match = 0; 5209 ap->idle_match = 0; 5210 ap->ack_match = 0; 5211 5212 ap->state = ANEG_STATE_RESTART_INIT; 5213 } else { 5214 ap->state = ANEG_STATE_DISABLE_LINK_OK; 5215 } 5216 break; 5217 5218 case ANEG_STATE_RESTART_INIT: 5219 ap->link_time = ap->cur_time; 5220 ap->flags &= ~(MR_NP_LOADED); 5221 ap->txconfig = 0; 5222 tw32(MAC_TX_AUTO_NEG, 0); 5223 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5224 tw32_f(MAC_MODE, tp->mac_mode); 5225 udelay(40); 5226 5227 ret = ANEG_TIMER_ENAB; 5228 ap->state = ANEG_STATE_RESTART; 5229 5230 /* fallthru */ 5231 case ANEG_STATE_RESTART: 5232 delta = ap->cur_time - ap->link_time; 5233 if (delta > ANEG_STATE_SETTLE_TIME) 5234 ap->state = ANEG_STATE_ABILITY_DETECT_INIT; 5235 else 5236 ret = ANEG_TIMER_ENAB; 5237 break; 5238 5239 case ANEG_STATE_DISABLE_LINK_OK: 5240 ret = ANEG_DONE; 5241 break; 5242 5243 case ANEG_STATE_ABILITY_DETECT_INIT: 5244 ap->flags &= ~(MR_TOGGLE_TX); 5245 ap->txconfig = ANEG_CFG_FD; 5246 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5247 if (flowctrl & ADVERTISE_1000XPAUSE) 5248 ap->txconfig |= ANEG_CFG_PS1; 5249 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5250 ap->txconfig |= ANEG_CFG_PS2; 5251 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5252 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5253 tw32_f(MAC_MODE, tp->mac_mode); 5254 udelay(40); 5255 5256 ap->state = ANEG_STATE_ABILITY_DETECT; 5257 break; 5258 5259 case ANEG_STATE_ABILITY_DETECT: 5260 if (ap->ability_match != 0 && ap->rxconfig != 0) 5261 ap->state = ANEG_STATE_ACK_DETECT_INIT; 5262 break; 5263 5264 case ANEG_STATE_ACK_DETECT_INIT: 5265 ap->txconfig |= ANEG_CFG_ACK; 5266 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5267 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5268 tw32_f(MAC_MODE, tp->mac_mode); 5269 udelay(40); 5270 5271 ap->state = ANEG_STATE_ACK_DETECT; 5272 5273 /* fallthru */ 5274 case ANEG_STATE_ACK_DETECT: 5275 if (ap->ack_match != 0) { 5276 if ((ap->rxconfig & ~ANEG_CFG_ACK) == 5277 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { 5278 ap->state = ANEG_STATE_COMPLETE_ACK_INIT; 5279 } else { 5280 ap->state = ANEG_STATE_AN_ENABLE; 5281 } 5282 } else if (ap->ability_match != 0 && 5283 ap->rxconfig == 0) { 5284 ap->state = ANEG_STATE_AN_ENABLE; 5285 } 5286 break; 5287 5288 case ANEG_STATE_COMPLETE_ACK_INIT: 5289 if (ap->rxconfig & ANEG_CFG_INVAL) { 5290 ret = ANEG_FAILED; 5291 break; 5292 } 5293 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | 5294 MR_LP_ADV_HALF_DUPLEX | 5295 MR_LP_ADV_SYM_PAUSE | 5296 MR_LP_ADV_ASYM_PAUSE | 5297 MR_LP_ADV_REMOTE_FAULT1 | 5298 MR_LP_ADV_REMOTE_FAULT2 | 5299 MR_LP_ADV_NEXT_PAGE | 5300 MR_TOGGLE_RX | 5301 MR_NP_RX); 5302 if (ap->rxconfig & ANEG_CFG_FD) 5303 ap->flags |= MR_LP_ADV_FULL_DUPLEX; 5304 if (ap->rxconfig & ANEG_CFG_HD) 5305 ap->flags |= MR_LP_ADV_HALF_DUPLEX; 5306 if (ap->rxconfig & ANEG_CFG_PS1) 5307 ap->flags |= MR_LP_ADV_SYM_PAUSE; 5308 if (ap->rxconfig & ANEG_CFG_PS2) 5309 ap->flags |= MR_LP_ADV_ASYM_PAUSE; 5310 if (ap->rxconfig & ANEG_CFG_RF1) 5311 ap->flags |= MR_LP_ADV_REMOTE_FAULT1; 5312 if (ap->rxconfig & ANEG_CFG_RF2) 5313 ap->flags |= MR_LP_ADV_REMOTE_FAULT2; 5314 if (ap->rxconfig & ANEG_CFG_NP) 5315 ap->flags |= MR_LP_ADV_NEXT_PAGE; 5316 5317 ap->link_time = ap->cur_time; 5318 5319 ap->flags ^= (MR_TOGGLE_TX); 5320 if (ap->rxconfig & 0x0008) 5321 ap->flags |= MR_TOGGLE_RX; 5322 if (ap->rxconfig & ANEG_CFG_NP) 5323 ap->flags |= MR_NP_RX; 5324 ap->flags |= MR_PAGE_RX; 5325 5326 ap->state = ANEG_STATE_COMPLETE_ACK; 5327 ret = ANEG_TIMER_ENAB; 5328 break; 5329 5330 case ANEG_STATE_COMPLETE_ACK: 5331 if (ap->ability_match != 0 && 5332 ap->rxconfig == 0) { 5333 ap->state = ANEG_STATE_AN_ENABLE; 5334 break; 5335 } 5336 delta = ap->cur_time - ap->link_time; 5337 if (delta > ANEG_STATE_SETTLE_TIME) { 5338 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { 5339 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5340 } else { 5341 if ((ap->txconfig & ANEG_CFG_NP) == 0 && 5342 !(ap->flags & MR_NP_RX)) { 5343 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5344 } else { 5345 ret = ANEG_FAILED; 5346 } 5347 } 5348 } 5349 break; 5350 5351 case ANEG_STATE_IDLE_DETECT_INIT: 5352 ap->link_time = ap->cur_time; 5353 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5354 tw32_f(MAC_MODE, tp->mac_mode); 5355 udelay(40); 5356 5357 ap->state = ANEG_STATE_IDLE_DETECT; 5358 ret = ANEG_TIMER_ENAB; 5359 break; 5360 5361 case ANEG_STATE_IDLE_DETECT: 5362 if (ap->ability_match != 0 && 5363 ap->rxconfig == 0) { 5364 ap->state = ANEG_STATE_AN_ENABLE; 5365 break; 5366 } 5367 delta = ap->cur_time - ap->link_time; 5368 if (delta > ANEG_STATE_SETTLE_TIME) { 5369 /* XXX another gem from the Broadcom driver :( */ 5370 ap->state = ANEG_STATE_LINK_OK; 5371 } 5372 break; 5373 5374 case ANEG_STATE_LINK_OK: 5375 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); 5376 ret = ANEG_DONE; 5377 break; 5378 5379 case ANEG_STATE_NEXT_PAGE_WAIT_INIT: 5380 /* ??? unimplemented */ 5381 break; 5382 5383 case ANEG_STATE_NEXT_PAGE_WAIT: 5384 /* ??? unimplemented */ 5385 break; 5386 5387 default: 5388 ret = ANEG_FAILED; 5389 break; 5390 } 5391 5392 return ret; 5393 } 5394 5395 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) 5396 { 5397 int res = 0; 5398 struct tg3_fiber_aneginfo aninfo; 5399 int status = ANEG_FAILED; 5400 unsigned int tick; 5401 u32 tmp; 5402 5403 tw32_f(MAC_TX_AUTO_NEG, 0); 5404 5405 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 5406 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); 5407 udelay(40); 5408 5409 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); 5410 udelay(40); 5411 5412 memset(&aninfo, 0, sizeof(aninfo)); 5413 aninfo.flags |= MR_AN_ENABLE; 5414 aninfo.state = ANEG_STATE_UNKNOWN; 5415 aninfo.cur_time = 0; 5416 tick = 0; 5417 while (++tick < 195000) { 5418 status = tg3_fiber_aneg_smachine(tp, &aninfo); 5419 if (status == ANEG_DONE || status == ANEG_FAILED) 5420 break; 5421 5422 udelay(1); 5423 } 5424 5425 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5426 tw32_f(MAC_MODE, tp->mac_mode); 5427 udelay(40); 5428 5429 *txflags = aninfo.txconfig; 5430 *rxflags = aninfo.flags; 5431 5432 if (status == ANEG_DONE && 5433 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | 5434 MR_LP_ADV_FULL_DUPLEX))) 5435 res = 1; 5436 5437 return res; 5438 } 5439 5440 static void tg3_init_bcm8002(struct tg3 *tp) 5441 { 5442 u32 mac_status = tr32(MAC_STATUS); 5443 int i; 5444 5445 /* Reset when initting first time or we have a link. */ 5446 if (tg3_flag(tp, INIT_COMPLETE) && 5447 !(mac_status & MAC_STATUS_PCS_SYNCED)) 5448 return; 5449 5450 /* Set PLL lock range. */ 5451 tg3_writephy(tp, 0x16, 0x8007); 5452 5453 /* SW reset */ 5454 tg3_writephy(tp, MII_BMCR, BMCR_RESET); 5455 5456 /* Wait for reset to complete. */ 5457 /* XXX schedule_timeout() ... */ 5458 for (i = 0; i < 500; i++) 5459 udelay(10); 5460 5461 /* Config mode; select PMA/Ch 1 regs. */ 5462 tg3_writephy(tp, 0x10, 0x8411); 5463 5464 /* Enable auto-lock and comdet, select txclk for tx. */ 5465 tg3_writephy(tp, 0x11, 0x0a10); 5466 5467 tg3_writephy(tp, 0x18, 0x00a0); 5468 tg3_writephy(tp, 0x16, 0x41ff); 5469 5470 /* Assert and deassert POR. */ 5471 tg3_writephy(tp, 0x13, 0x0400); 5472 udelay(40); 5473 tg3_writephy(tp, 0x13, 0x0000); 5474 5475 tg3_writephy(tp, 0x11, 0x0a50); 5476 udelay(40); 5477 tg3_writephy(tp, 0x11, 0x0a10); 5478 5479 /* Wait for signal to stabilize */ 5480 /* XXX schedule_timeout() ... */ 5481 for (i = 0; i < 15000; i++) 5482 udelay(10); 5483 5484 /* Deselect the channel register so we can read the PHYID 5485 * later. 5486 */ 5487 tg3_writephy(tp, 0x10, 0x8011); 5488 } 5489 5490 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) 5491 { 5492 u16 flowctrl; 5493 bool current_link_up; 5494 u32 sg_dig_ctrl, sg_dig_status; 5495 u32 serdes_cfg, expected_sg_dig_ctrl; 5496 int workaround, port_a; 5497 5498 serdes_cfg = 0; 5499 expected_sg_dig_ctrl = 0; 5500 workaround = 0; 5501 port_a = 1; 5502 current_link_up = false; 5503 5504 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 && 5505 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) { 5506 workaround = 1; 5507 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 5508 port_a = 0; 5509 5510 /* preserve bits 0-11,13,14 for signal pre-emphasis */ 5511 /* preserve bits 20-23 for voltage regulator */ 5512 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; 5513 } 5514 5515 sg_dig_ctrl = tr32(SG_DIG_CTRL); 5516 5517 if (tp->link_config.autoneg != AUTONEG_ENABLE) { 5518 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { 5519 if (workaround) { 5520 u32 val = serdes_cfg; 5521 5522 if (port_a) 5523 val |= 0xc010000; 5524 else 5525 val |= 0x4010000; 5526 tw32_f(MAC_SERDES_CFG, val); 5527 } 5528 5529 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5530 } 5531 if (mac_status & MAC_STATUS_PCS_SYNCED) { 5532 tg3_setup_flow_control(tp, 0, 0); 5533 current_link_up = true; 5534 } 5535 goto out; 5536 } 5537 5538 /* Want auto-negotiation. */ 5539 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; 5540 5541 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5542 if (flowctrl & ADVERTISE_1000XPAUSE) 5543 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; 5544 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5545 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; 5546 5547 if (sg_dig_ctrl != expected_sg_dig_ctrl) { 5548 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && 5549 tp->serdes_counter && 5550 ((mac_status & (MAC_STATUS_PCS_SYNCED | 5551 MAC_STATUS_RCVD_CFG)) == 5552 MAC_STATUS_PCS_SYNCED)) { 5553 tp->serdes_counter--; 5554 current_link_up = true; 5555 goto out; 5556 } 5557 restart_autoneg: 5558 if (workaround) 5559 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); 5560 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); 5561 udelay(5); 5562 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); 5563 5564 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5565 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5566 } else if (mac_status & (MAC_STATUS_PCS_SYNCED | 5567 MAC_STATUS_SIGNAL_DET)) { 5568 sg_dig_status = tr32(SG_DIG_STATUS); 5569 mac_status = tr32(MAC_STATUS); 5570 5571 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && 5572 (mac_status & MAC_STATUS_PCS_SYNCED)) { 5573 u32 local_adv = 0, remote_adv = 0; 5574 5575 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) 5576 local_adv |= ADVERTISE_1000XPAUSE; 5577 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) 5578 local_adv |= ADVERTISE_1000XPSE_ASYM; 5579 5580 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) 5581 remote_adv |= LPA_1000XPAUSE; 5582 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) 5583 remote_adv |= LPA_1000XPAUSE_ASYM; 5584 5585 tp->link_config.rmt_adv = 5586 mii_adv_to_ethtool_adv_x(remote_adv); 5587 5588 tg3_setup_flow_control(tp, local_adv, remote_adv); 5589 current_link_up = true; 5590 tp->serdes_counter = 0; 5591 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5592 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { 5593 if (tp->serdes_counter) 5594 tp->serdes_counter--; 5595 else { 5596 if (workaround) { 5597 u32 val = serdes_cfg; 5598 5599 if (port_a) 5600 val |= 0xc010000; 5601 else 5602 val |= 0x4010000; 5603 5604 tw32_f(MAC_SERDES_CFG, val); 5605 } 5606 5607 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5608 udelay(40); 5609 5610 /* Link parallel detection - link is up */ 5611 /* only if we have PCS_SYNC and not */ 5612 /* receiving config code words */ 5613 mac_status = tr32(MAC_STATUS); 5614 if ((mac_status & MAC_STATUS_PCS_SYNCED) && 5615 !(mac_status & MAC_STATUS_RCVD_CFG)) { 5616 tg3_setup_flow_control(tp, 0, 0); 5617 current_link_up = true; 5618 tp->phy_flags |= 5619 TG3_PHYFLG_PARALLEL_DETECT; 5620 tp->serdes_counter = 5621 SERDES_PARALLEL_DET_TIMEOUT; 5622 } else 5623 goto restart_autoneg; 5624 } 5625 } 5626 } else { 5627 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5628 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5629 } 5630 5631 out: 5632 return current_link_up; 5633 } 5634 5635 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) 5636 { 5637 bool current_link_up = false; 5638 5639 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) 5640 goto out; 5641 5642 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5643 u32 txflags, rxflags; 5644 int i; 5645 5646 if (fiber_autoneg(tp, &txflags, &rxflags)) { 5647 u32 local_adv = 0, remote_adv = 0; 5648 5649 if (txflags & ANEG_CFG_PS1) 5650 local_adv |= ADVERTISE_1000XPAUSE; 5651 if (txflags & ANEG_CFG_PS2) 5652 local_adv |= ADVERTISE_1000XPSE_ASYM; 5653 5654 if (rxflags & MR_LP_ADV_SYM_PAUSE) 5655 remote_adv |= LPA_1000XPAUSE; 5656 if (rxflags & MR_LP_ADV_ASYM_PAUSE) 5657 remote_adv |= LPA_1000XPAUSE_ASYM; 5658 5659 tp->link_config.rmt_adv = 5660 mii_adv_to_ethtool_adv_x(remote_adv); 5661 5662 tg3_setup_flow_control(tp, local_adv, remote_adv); 5663 5664 current_link_up = true; 5665 } 5666 for (i = 0; i < 30; i++) { 5667 udelay(20); 5668 tw32_f(MAC_STATUS, 5669 (MAC_STATUS_SYNC_CHANGED | 5670 MAC_STATUS_CFG_CHANGED)); 5671 udelay(40); 5672 if ((tr32(MAC_STATUS) & 5673 (MAC_STATUS_SYNC_CHANGED | 5674 MAC_STATUS_CFG_CHANGED)) == 0) 5675 break; 5676 } 5677 5678 mac_status = tr32(MAC_STATUS); 5679 if (!current_link_up && 5680 (mac_status & MAC_STATUS_PCS_SYNCED) && 5681 !(mac_status & MAC_STATUS_RCVD_CFG)) 5682 current_link_up = true; 5683 } else { 5684 tg3_setup_flow_control(tp, 0, 0); 5685 5686 /* Forcing 1000FD link up. */ 5687 current_link_up = true; 5688 5689 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); 5690 udelay(40); 5691 5692 tw32_f(MAC_MODE, tp->mac_mode); 5693 udelay(40); 5694 } 5695 5696 out: 5697 return current_link_up; 5698 } 5699 5700 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) 5701 { 5702 u32 orig_pause_cfg; 5703 u16 orig_active_speed; 5704 u8 orig_active_duplex; 5705 u32 mac_status; 5706 bool current_link_up; 5707 int i; 5708 5709 orig_pause_cfg = tp->link_config.active_flowctrl; 5710 orig_active_speed = tp->link_config.active_speed; 5711 orig_active_duplex = tp->link_config.active_duplex; 5712 5713 if (!tg3_flag(tp, HW_AUTONEG) && 5714 tp->link_up && 5715 tg3_flag(tp, INIT_COMPLETE)) { 5716 mac_status = tr32(MAC_STATUS); 5717 mac_status &= (MAC_STATUS_PCS_SYNCED | 5718 MAC_STATUS_SIGNAL_DET | 5719 MAC_STATUS_CFG_CHANGED | 5720 MAC_STATUS_RCVD_CFG); 5721 if (mac_status == (MAC_STATUS_PCS_SYNCED | 5722 MAC_STATUS_SIGNAL_DET)) { 5723 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5724 MAC_STATUS_CFG_CHANGED)); 5725 return 0; 5726 } 5727 } 5728 5729 tw32_f(MAC_TX_AUTO_NEG, 0); 5730 5731 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 5732 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; 5733 tw32_f(MAC_MODE, tp->mac_mode); 5734 udelay(40); 5735 5736 if (tp->phy_id == TG3_PHY_ID_BCM8002) 5737 tg3_init_bcm8002(tp); 5738 5739 /* Enable link change event even when serdes polling. */ 5740 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5741 udelay(40); 5742 5743 current_link_up = false; 5744 tp->link_config.rmt_adv = 0; 5745 mac_status = tr32(MAC_STATUS); 5746 5747 if (tg3_flag(tp, HW_AUTONEG)) 5748 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); 5749 else 5750 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); 5751 5752 tp->napi[0].hw_status->status = 5753 (SD_STATUS_UPDATED | 5754 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); 5755 5756 for (i = 0; i < 100; i++) { 5757 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5758 MAC_STATUS_CFG_CHANGED)); 5759 udelay(5); 5760 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | 5761 MAC_STATUS_CFG_CHANGED | 5762 MAC_STATUS_LNKSTATE_CHANGED)) == 0) 5763 break; 5764 } 5765 5766 mac_status = tr32(MAC_STATUS); 5767 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { 5768 current_link_up = false; 5769 if (tp->link_config.autoneg == AUTONEG_ENABLE && 5770 tp->serdes_counter == 0) { 5771 tw32_f(MAC_MODE, (tp->mac_mode | 5772 MAC_MODE_SEND_CONFIGS)); 5773 udelay(1); 5774 tw32_f(MAC_MODE, tp->mac_mode); 5775 } 5776 } 5777 5778 if (current_link_up) { 5779 tp->link_config.active_speed = SPEED_1000; 5780 tp->link_config.active_duplex = DUPLEX_FULL; 5781 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5782 LED_CTRL_LNKLED_OVERRIDE | 5783 LED_CTRL_1000MBPS_ON)); 5784 } else { 5785 tp->link_config.active_speed = SPEED_UNKNOWN; 5786 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 5787 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5788 LED_CTRL_LNKLED_OVERRIDE | 5789 LED_CTRL_TRAFFIC_OVERRIDE)); 5790 } 5791 5792 if (!tg3_test_and_report_link_chg(tp, current_link_up)) { 5793 u32 now_pause_cfg = tp->link_config.active_flowctrl; 5794 if (orig_pause_cfg != now_pause_cfg || 5795 orig_active_speed != tp->link_config.active_speed || 5796 orig_active_duplex != tp->link_config.active_duplex) 5797 tg3_link_report(tp); 5798 } 5799 5800 return 0; 5801 } 5802 5803 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) 5804 { 5805 int err = 0; 5806 u32 bmsr, bmcr; 5807 u16 current_speed = SPEED_UNKNOWN; 5808 u8 current_duplex = DUPLEX_UNKNOWN; 5809 bool current_link_up = false; 5810 u32 local_adv, remote_adv, sgsr; 5811 5812 if ((tg3_asic_rev(tp) == ASIC_REV_5719 || 5813 tg3_asic_rev(tp) == ASIC_REV_5720) && 5814 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) && 5815 (sgsr & SERDES_TG3_SGMII_MODE)) { 5816 5817 if (force_reset) 5818 tg3_phy_reset(tp); 5819 5820 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 5821 5822 if (!(sgsr & SERDES_TG3_LINK_UP)) { 5823 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5824 } else { 5825 current_link_up = true; 5826 if (sgsr & SERDES_TG3_SPEED_1000) { 5827 current_speed = SPEED_1000; 5828 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5829 } else if (sgsr & SERDES_TG3_SPEED_100) { 5830 current_speed = SPEED_100; 5831 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5832 } else { 5833 current_speed = SPEED_10; 5834 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5835 } 5836 5837 if (sgsr & SERDES_TG3_FULL_DUPLEX) 5838 current_duplex = DUPLEX_FULL; 5839 else 5840 current_duplex = DUPLEX_HALF; 5841 } 5842 5843 tw32_f(MAC_MODE, tp->mac_mode); 5844 udelay(40); 5845 5846 tg3_clear_mac_status(tp); 5847 5848 goto fiber_setup_done; 5849 } 5850 5851 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5852 tw32_f(MAC_MODE, tp->mac_mode); 5853 udelay(40); 5854 5855 tg3_clear_mac_status(tp); 5856 5857 if (force_reset) 5858 tg3_phy_reset(tp); 5859 5860 tp->link_config.rmt_adv = 0; 5861 5862 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5863 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5864 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5865 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5866 bmsr |= BMSR_LSTATUS; 5867 else 5868 bmsr &= ~BMSR_LSTATUS; 5869 } 5870 5871 err |= tg3_readphy(tp, MII_BMCR, &bmcr); 5872 5873 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && 5874 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 5875 /* do nothing, just check for link up at the end */ 5876 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5877 u32 adv, newadv; 5878 5879 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5880 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | 5881 ADVERTISE_1000XPAUSE | 5882 ADVERTISE_1000XPSE_ASYM | 5883 ADVERTISE_SLCT); 5884 5885 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5886 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); 5887 5888 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { 5889 tg3_writephy(tp, MII_ADVERTISE, newadv); 5890 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; 5891 tg3_writephy(tp, MII_BMCR, bmcr); 5892 5893 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5894 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; 5895 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5896 5897 return err; 5898 } 5899 } else { 5900 u32 new_bmcr; 5901 5902 bmcr &= ~BMCR_SPEED1000; 5903 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); 5904 5905 if (tp->link_config.duplex == DUPLEX_FULL) 5906 new_bmcr |= BMCR_FULLDPLX; 5907 5908 if (new_bmcr != bmcr) { 5909 /* BMCR_SPEED1000 is a reserved bit that needs 5910 * to be set on write. 5911 */ 5912 new_bmcr |= BMCR_SPEED1000; 5913 5914 /* Force a linkdown */ 5915 if (tp->link_up) { 5916 u32 adv; 5917 5918 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5919 adv &= ~(ADVERTISE_1000XFULL | 5920 ADVERTISE_1000XHALF | 5921 ADVERTISE_SLCT); 5922 tg3_writephy(tp, MII_ADVERTISE, adv); 5923 tg3_writephy(tp, MII_BMCR, bmcr | 5924 BMCR_ANRESTART | 5925 BMCR_ANENABLE); 5926 udelay(10); 5927 tg3_carrier_off(tp); 5928 } 5929 tg3_writephy(tp, MII_BMCR, new_bmcr); 5930 bmcr = new_bmcr; 5931 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5932 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5933 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5934 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5935 bmsr |= BMSR_LSTATUS; 5936 else 5937 bmsr &= ~BMSR_LSTATUS; 5938 } 5939 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5940 } 5941 } 5942 5943 if (bmsr & BMSR_LSTATUS) { 5944 current_speed = SPEED_1000; 5945 current_link_up = true; 5946 if (bmcr & BMCR_FULLDPLX) 5947 current_duplex = DUPLEX_FULL; 5948 else 5949 current_duplex = DUPLEX_HALF; 5950 5951 local_adv = 0; 5952 remote_adv = 0; 5953 5954 if (bmcr & BMCR_ANENABLE) { 5955 u32 common; 5956 5957 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); 5958 err |= tg3_readphy(tp, MII_LPA, &remote_adv); 5959 common = local_adv & remote_adv; 5960 if (common & (ADVERTISE_1000XHALF | 5961 ADVERTISE_1000XFULL)) { 5962 if (common & ADVERTISE_1000XFULL) 5963 current_duplex = DUPLEX_FULL; 5964 else 5965 current_duplex = DUPLEX_HALF; 5966 5967 tp->link_config.rmt_adv = 5968 mii_adv_to_ethtool_adv_x(remote_adv); 5969 } else if (!tg3_flag(tp, 5780_CLASS)) { 5970 /* Link is up via parallel detect */ 5971 } else { 5972 current_link_up = false; 5973 } 5974 } 5975 } 5976 5977 fiber_setup_done: 5978 if (current_link_up && current_duplex == DUPLEX_FULL) 5979 tg3_setup_flow_control(tp, local_adv, remote_adv); 5980 5981 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5982 if (tp->link_config.active_duplex == DUPLEX_HALF) 5983 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5984 5985 tw32_f(MAC_MODE, tp->mac_mode); 5986 udelay(40); 5987 5988 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5989 5990 tp->link_config.active_speed = current_speed; 5991 tp->link_config.active_duplex = current_duplex; 5992 5993 tg3_test_and_report_link_chg(tp, current_link_up); 5994 return err; 5995 } 5996 5997 static void tg3_serdes_parallel_detect(struct tg3 *tp) 5998 { 5999 if (tp->serdes_counter) { 6000 /* Give autoneg time to complete. */ 6001 tp->serdes_counter--; 6002 return; 6003 } 6004 6005 if (!tp->link_up && 6006 (tp->link_config.autoneg == AUTONEG_ENABLE)) { 6007 u32 bmcr; 6008 6009 tg3_readphy(tp, MII_BMCR, &bmcr); 6010 if (bmcr & BMCR_ANENABLE) { 6011 u32 phy1, phy2; 6012 6013 /* Select shadow register 0x1f */ 6014 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); 6015 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); 6016 6017 /* Select expansion interrupt status register */ 6018 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6019 MII_TG3_DSP_EXP1_INT_STAT); 6020 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6021 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6022 6023 if ((phy1 & 0x10) && !(phy2 & 0x20)) { 6024 /* We have signal detect and not receiving 6025 * config code words, link is up by parallel 6026 * detection. 6027 */ 6028 6029 bmcr &= ~BMCR_ANENABLE; 6030 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; 6031 tg3_writephy(tp, MII_BMCR, bmcr); 6032 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; 6033 } 6034 } 6035 } else if (tp->link_up && 6036 (tp->link_config.autoneg == AUTONEG_ENABLE) && 6037 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 6038 u32 phy2; 6039 6040 /* Select expansion interrupt status register */ 6041 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6042 MII_TG3_DSP_EXP1_INT_STAT); 6043 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6044 if (phy2 & 0x20) { 6045 u32 bmcr; 6046 6047 /* Config code words received, turn on autoneg. */ 6048 tg3_readphy(tp, MII_BMCR, &bmcr); 6049 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); 6050 6051 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 6052 6053 } 6054 } 6055 } 6056 6057 static int tg3_setup_phy(struct tg3 *tp, bool force_reset) 6058 { 6059 u32 val; 6060 int err; 6061 6062 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 6063 err = tg3_setup_fiber_phy(tp, force_reset); 6064 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 6065 err = tg3_setup_fiber_mii_phy(tp, force_reset); 6066 else 6067 err = tg3_setup_copper_phy(tp, force_reset); 6068 6069 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 6070 u32 scale; 6071 6072 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; 6073 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) 6074 scale = 65; 6075 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) 6076 scale = 6; 6077 else 6078 scale = 12; 6079 6080 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; 6081 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); 6082 tw32(GRC_MISC_CFG, val); 6083 } 6084 6085 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 6086 (6 << TX_LENGTHS_IPG_SHIFT); 6087 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 6088 tg3_asic_rev(tp) == ASIC_REV_5762) 6089 val |= tr32(MAC_TX_LENGTHS) & 6090 (TX_LENGTHS_JMB_FRM_LEN_MSK | 6091 TX_LENGTHS_CNT_DWN_VAL_MSK); 6092 6093 if (tp->link_config.active_speed == SPEED_1000 && 6094 tp->link_config.active_duplex == DUPLEX_HALF) 6095 tw32(MAC_TX_LENGTHS, val | 6096 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); 6097 else 6098 tw32(MAC_TX_LENGTHS, val | 6099 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); 6100 6101 if (!tg3_flag(tp, 5705_PLUS)) { 6102 if (tp->link_up) { 6103 tw32(HOSTCC_STAT_COAL_TICKS, 6104 tp->coal.stats_block_coalesce_usecs); 6105 } else { 6106 tw32(HOSTCC_STAT_COAL_TICKS, 0); 6107 } 6108 } 6109 6110 if (tg3_flag(tp, ASPM_WORKAROUND)) { 6111 val = tr32(PCIE_PWR_MGMT_THRESH); 6112 if (!tp->link_up) 6113 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | 6114 tp->pwrmgmt_thresh; 6115 else 6116 val |= PCIE_PWR_MGMT_L1_THRESH_MSK; 6117 tw32(PCIE_PWR_MGMT_THRESH, val); 6118 } 6119 6120 return err; 6121 } 6122 6123 /* tp->lock must be held */ 6124 static u64 tg3_refclk_read(struct tg3 *tp) 6125 { 6126 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB); 6127 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32; 6128 } 6129 6130 /* tp->lock must be held */ 6131 static void tg3_refclk_write(struct tg3 *tp, u64 newval) 6132 { 6133 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6134 6135 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP); 6136 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff); 6137 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32); 6138 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME); 6139 } 6140 6141 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); 6142 static inline void tg3_full_unlock(struct tg3 *tp); 6143 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) 6144 { 6145 struct tg3 *tp = netdev_priv(dev); 6146 6147 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 6148 SOF_TIMESTAMPING_RX_SOFTWARE | 6149 SOF_TIMESTAMPING_SOFTWARE; 6150 6151 if (tg3_flag(tp, PTP_CAPABLE)) { 6152 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 6153 SOF_TIMESTAMPING_RX_HARDWARE | 6154 SOF_TIMESTAMPING_RAW_HARDWARE; 6155 } 6156 6157 if (tp->ptp_clock) 6158 info->phc_index = ptp_clock_index(tp->ptp_clock); 6159 else 6160 info->phc_index = -1; 6161 6162 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 6163 6164 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 6165 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 6166 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 6167 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 6168 return 0; 6169 } 6170 6171 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 6172 { 6173 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6174 bool neg_adj = false; 6175 u32 correction = 0; 6176 6177 if (ppb < 0) { 6178 neg_adj = true; 6179 ppb = -ppb; 6180 } 6181 6182 /* Frequency adjustment is performed using hardware with a 24 bit 6183 * accumulator and a programmable correction value. On each clk, the 6184 * correction value gets added to the accumulator and when it 6185 * overflows, the time counter is incremented/decremented. 6186 * 6187 * So conversion from ppb to correction value is 6188 * ppb * (1 << 24) / 1000000000 6189 */ 6190 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) & 6191 TG3_EAV_REF_CLK_CORRECT_MASK; 6192 6193 tg3_full_lock(tp, 0); 6194 6195 if (correction) 6196 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 6197 TG3_EAV_REF_CLK_CORRECT_EN | 6198 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction); 6199 else 6200 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0); 6201 6202 tg3_full_unlock(tp); 6203 6204 return 0; 6205 } 6206 6207 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 6208 { 6209 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6210 6211 tg3_full_lock(tp, 0); 6212 tp->ptp_adjust += delta; 6213 tg3_full_unlock(tp); 6214 6215 return 0; 6216 } 6217 6218 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) 6219 { 6220 u64 ns; 6221 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6222 6223 tg3_full_lock(tp, 0); 6224 ns = tg3_refclk_read(tp); 6225 ns += tp->ptp_adjust; 6226 tg3_full_unlock(tp); 6227 6228 *ts = ns_to_timespec64(ns); 6229 6230 return 0; 6231 } 6232 6233 static int tg3_ptp_settime(struct ptp_clock_info *ptp, 6234 const struct timespec64 *ts) 6235 { 6236 u64 ns; 6237 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6238 6239 ns = timespec64_to_ns(ts); 6240 6241 tg3_full_lock(tp, 0); 6242 tg3_refclk_write(tp, ns); 6243 tp->ptp_adjust = 0; 6244 tg3_full_unlock(tp); 6245 6246 return 0; 6247 } 6248 6249 static int tg3_ptp_enable(struct ptp_clock_info *ptp, 6250 struct ptp_clock_request *rq, int on) 6251 { 6252 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6253 u32 clock_ctl; 6254 int rval = 0; 6255 6256 switch (rq->type) { 6257 case PTP_CLK_REQ_PEROUT: 6258 if (rq->perout.index != 0) 6259 return -EINVAL; 6260 6261 tg3_full_lock(tp, 0); 6262 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6263 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK; 6264 6265 if (on) { 6266 u64 nsec; 6267 6268 nsec = rq->perout.start.sec * 1000000000ULL + 6269 rq->perout.start.nsec; 6270 6271 if (rq->perout.period.sec || rq->perout.period.nsec) { 6272 netdev_warn(tp->dev, 6273 "Device supports only a one-shot timesync output, period must be 0\n"); 6274 rval = -EINVAL; 6275 goto err_out; 6276 } 6277 6278 if (nsec & (1ULL << 63)) { 6279 netdev_warn(tp->dev, 6280 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n"); 6281 rval = -EINVAL; 6282 goto err_out; 6283 } 6284 6285 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff)); 6286 tw32(TG3_EAV_WATCHDOG0_MSB, 6287 TG3_EAV_WATCHDOG0_EN | 6288 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK)); 6289 6290 tw32(TG3_EAV_REF_CLCK_CTL, 6291 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0); 6292 } else { 6293 tw32(TG3_EAV_WATCHDOG0_MSB, 0); 6294 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl); 6295 } 6296 6297 err_out: 6298 tg3_full_unlock(tp); 6299 return rval; 6300 6301 default: 6302 break; 6303 } 6304 6305 return -EOPNOTSUPP; 6306 } 6307 6308 static const struct ptp_clock_info tg3_ptp_caps = { 6309 .owner = THIS_MODULE, 6310 .name = "tg3 clock", 6311 .max_adj = 250000000, 6312 .n_alarm = 0, 6313 .n_ext_ts = 0, 6314 .n_per_out = 1, 6315 .n_pins = 0, 6316 .pps = 0, 6317 .adjfreq = tg3_ptp_adjfreq, 6318 .adjtime = tg3_ptp_adjtime, 6319 .gettime64 = tg3_ptp_gettime, 6320 .settime64 = tg3_ptp_settime, 6321 .enable = tg3_ptp_enable, 6322 }; 6323 6324 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, 6325 struct skb_shared_hwtstamps *timestamp) 6326 { 6327 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); 6328 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + 6329 tp->ptp_adjust); 6330 } 6331 6332 /* tp->lock must be held */ 6333 static void tg3_ptp_init(struct tg3 *tp) 6334 { 6335 if (!tg3_flag(tp, PTP_CAPABLE)) 6336 return; 6337 6338 /* Initialize the hardware clock to the system time. */ 6339 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real())); 6340 tp->ptp_adjust = 0; 6341 tp->ptp_info = tg3_ptp_caps; 6342 } 6343 6344 /* tp->lock must be held */ 6345 static void tg3_ptp_resume(struct tg3 *tp) 6346 { 6347 if (!tg3_flag(tp, PTP_CAPABLE)) 6348 return; 6349 6350 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust); 6351 tp->ptp_adjust = 0; 6352 } 6353 6354 static void tg3_ptp_fini(struct tg3 *tp) 6355 { 6356 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock) 6357 return; 6358 6359 ptp_clock_unregister(tp->ptp_clock); 6360 tp->ptp_clock = NULL; 6361 tp->ptp_adjust = 0; 6362 } 6363 6364 static inline int tg3_irq_sync(struct tg3 *tp) 6365 { 6366 return tp->irq_sync; 6367 } 6368 6369 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) 6370 { 6371 int i; 6372 6373 dst = (u32 *)((u8 *)dst + off); 6374 for (i = 0; i < len; i += sizeof(u32)) 6375 *dst++ = tr32(off + i); 6376 } 6377 6378 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) 6379 { 6380 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); 6381 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); 6382 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); 6383 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); 6384 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); 6385 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); 6386 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); 6387 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); 6388 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); 6389 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); 6390 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); 6391 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); 6392 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); 6393 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); 6394 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); 6395 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); 6396 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); 6397 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); 6398 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); 6399 6400 if (tg3_flag(tp, SUPPORT_MSIX)) 6401 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); 6402 6403 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); 6404 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); 6405 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); 6406 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); 6407 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); 6408 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); 6409 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); 6410 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); 6411 6412 if (!tg3_flag(tp, 5705_PLUS)) { 6413 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); 6414 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); 6415 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); 6416 } 6417 6418 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); 6419 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); 6420 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); 6421 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); 6422 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); 6423 6424 if (tg3_flag(tp, NVRAM)) 6425 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); 6426 } 6427 6428 static void tg3_dump_state(struct tg3 *tp) 6429 { 6430 int i; 6431 u32 *regs; 6432 6433 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); 6434 if (!regs) 6435 return; 6436 6437 if (tg3_flag(tp, PCI_EXPRESS)) { 6438 /* Read up to but not including private PCI registers */ 6439 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) 6440 regs[i / sizeof(u32)] = tr32(i); 6441 } else 6442 tg3_dump_legacy_regs(tp, regs); 6443 6444 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { 6445 if (!regs[i + 0] && !regs[i + 1] && 6446 !regs[i + 2] && !regs[i + 3]) 6447 continue; 6448 6449 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", 6450 i * 4, 6451 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); 6452 } 6453 6454 kfree(regs); 6455 6456 for (i = 0; i < tp->irq_cnt; i++) { 6457 struct tg3_napi *tnapi = &tp->napi[i]; 6458 6459 /* SW status block */ 6460 netdev_err(tp->dev, 6461 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", 6462 i, 6463 tnapi->hw_status->status, 6464 tnapi->hw_status->status_tag, 6465 tnapi->hw_status->rx_jumbo_consumer, 6466 tnapi->hw_status->rx_consumer, 6467 tnapi->hw_status->rx_mini_consumer, 6468 tnapi->hw_status->idx[0].rx_producer, 6469 tnapi->hw_status->idx[0].tx_consumer); 6470 6471 netdev_err(tp->dev, 6472 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", 6473 i, 6474 tnapi->last_tag, tnapi->last_irq_tag, 6475 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, 6476 tnapi->rx_rcb_ptr, 6477 tnapi->prodring.rx_std_prod_idx, 6478 tnapi->prodring.rx_std_cons_idx, 6479 tnapi->prodring.rx_jmb_prod_idx, 6480 tnapi->prodring.rx_jmb_cons_idx); 6481 } 6482 } 6483 6484 /* This is called whenever we suspect that the system chipset is re- 6485 * ordering the sequence of MMIO to the tx send mailbox. The symptom 6486 * is bogus tx completions. We try to recover by setting the 6487 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later 6488 * in the workqueue. 6489 */ 6490 static void tg3_tx_recover(struct tg3 *tp) 6491 { 6492 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || 6493 tp->write32_tx_mbox == tg3_write_indirect_mbox); 6494 6495 netdev_warn(tp->dev, 6496 "The system may be re-ordering memory-mapped I/O " 6497 "cycles to the network device, attempting to recover. " 6498 "Please report the problem to the driver maintainer " 6499 "and include system chipset information.\n"); 6500 6501 tg3_flag_set(tp, TX_RECOVERY_PENDING); 6502 } 6503 6504 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) 6505 { 6506 /* Tell compiler to fetch tx indices from memory. */ 6507 barrier(); 6508 return tnapi->tx_pending - 6509 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); 6510 } 6511 6512 /* Tigon3 never reports partial packet sends. So we do not 6513 * need special logic to handle SKBs that have not had all 6514 * of their frags sent yet, like SunGEM does. 6515 */ 6516 static void tg3_tx(struct tg3_napi *tnapi) 6517 { 6518 struct tg3 *tp = tnapi->tp; 6519 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; 6520 u32 sw_idx = tnapi->tx_cons; 6521 struct netdev_queue *txq; 6522 int index = tnapi - tp->napi; 6523 unsigned int pkts_compl = 0, bytes_compl = 0; 6524 6525 if (tg3_flag(tp, ENABLE_TSS)) 6526 index--; 6527 6528 txq = netdev_get_tx_queue(tp->dev, index); 6529 6530 while (sw_idx != hw_idx) { 6531 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; 6532 struct sk_buff *skb = ri->skb; 6533 int i, tx_bug = 0; 6534 6535 if (unlikely(skb == NULL)) { 6536 tg3_tx_recover(tp); 6537 return; 6538 } 6539 6540 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) { 6541 struct skb_shared_hwtstamps timestamp; 6542 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB); 6543 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; 6544 6545 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); 6546 6547 skb_tstamp_tx(skb, ×tamp); 6548 } 6549 6550 pci_unmap_single(tp->pdev, 6551 dma_unmap_addr(ri, mapping), 6552 skb_headlen(skb), 6553 PCI_DMA_TODEVICE); 6554 6555 ri->skb = NULL; 6556 6557 while (ri->fragmented) { 6558 ri->fragmented = false; 6559 sw_idx = NEXT_TX(sw_idx); 6560 ri = &tnapi->tx_buffers[sw_idx]; 6561 } 6562 6563 sw_idx = NEXT_TX(sw_idx); 6564 6565 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 6566 ri = &tnapi->tx_buffers[sw_idx]; 6567 if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) 6568 tx_bug = 1; 6569 6570 pci_unmap_page(tp->pdev, 6571 dma_unmap_addr(ri, mapping), 6572 skb_frag_size(&skb_shinfo(skb)->frags[i]), 6573 PCI_DMA_TODEVICE); 6574 6575 while (ri->fragmented) { 6576 ri->fragmented = false; 6577 sw_idx = NEXT_TX(sw_idx); 6578 ri = &tnapi->tx_buffers[sw_idx]; 6579 } 6580 6581 sw_idx = NEXT_TX(sw_idx); 6582 } 6583 6584 pkts_compl++; 6585 bytes_compl += skb->len; 6586 6587 dev_kfree_skb_any(skb); 6588 6589 if (unlikely(tx_bug)) { 6590 tg3_tx_recover(tp); 6591 return; 6592 } 6593 } 6594 6595 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); 6596 6597 tnapi->tx_cons = sw_idx; 6598 6599 /* Need to make the tx_cons update visible to tg3_start_xmit() 6600 * before checking for netif_queue_stopped(). Without the 6601 * memory barrier, there is a small possibility that tg3_start_xmit() 6602 * will miss it and cause the queue to be stopped forever. 6603 */ 6604 smp_mb(); 6605 6606 if (unlikely(netif_tx_queue_stopped(txq) && 6607 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { 6608 __netif_tx_lock(txq, smp_processor_id()); 6609 if (netif_tx_queue_stopped(txq) && 6610 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) 6611 netif_tx_wake_queue(txq); 6612 __netif_tx_unlock(txq); 6613 } 6614 } 6615 6616 static void tg3_frag_free(bool is_frag, void *data) 6617 { 6618 if (is_frag) 6619 skb_free_frag(data); 6620 else 6621 kfree(data); 6622 } 6623 6624 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) 6625 { 6626 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) + 6627 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6628 6629 if (!ri->data) 6630 return; 6631 6632 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), 6633 map_sz, PCI_DMA_FROMDEVICE); 6634 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data); 6635 ri->data = NULL; 6636 } 6637 6638 6639 /* Returns size of skb allocated or < 0 on error. 6640 * 6641 * We only need to fill in the address because the other members 6642 * of the RX descriptor are invariant, see tg3_init_rings. 6643 * 6644 * Note the purposeful assymetry of cpu vs. chip accesses. For 6645 * posting buffers we only dirty the first cache line of the RX 6646 * descriptor (containing the address). Whereas for the RX status 6647 * buffers the cpu only reads the last cacheline of the RX descriptor 6648 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 6649 */ 6650 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, 6651 u32 opaque_key, u32 dest_idx_unmasked, 6652 unsigned int *frag_size) 6653 { 6654 struct tg3_rx_buffer_desc *desc; 6655 struct ring_info *map; 6656 u8 *data; 6657 dma_addr_t mapping; 6658 int skb_size, data_size, dest_idx; 6659 6660 switch (opaque_key) { 6661 case RXD_OPAQUE_RING_STD: 6662 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6663 desc = &tpr->rx_std[dest_idx]; 6664 map = &tpr->rx_std_buffers[dest_idx]; 6665 data_size = tp->rx_pkt_map_sz; 6666 break; 6667 6668 case RXD_OPAQUE_RING_JUMBO: 6669 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6670 desc = &tpr->rx_jmb[dest_idx].std; 6671 map = &tpr->rx_jmb_buffers[dest_idx]; 6672 data_size = TG3_RX_JMB_MAP_SZ; 6673 break; 6674 6675 default: 6676 return -EINVAL; 6677 } 6678 6679 /* Do not overwrite any of the map or rp information 6680 * until we are sure we can commit to a new buffer. 6681 * 6682 * Callers depend upon this behavior and assume that 6683 * we leave everything unchanged if we fail. 6684 */ 6685 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + 6686 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6687 if (skb_size <= PAGE_SIZE) { 6688 data = netdev_alloc_frag(skb_size); 6689 *frag_size = skb_size; 6690 } else { 6691 data = kmalloc(skb_size, GFP_ATOMIC); 6692 *frag_size = 0; 6693 } 6694 if (!data) 6695 return -ENOMEM; 6696 6697 mapping = pci_map_single(tp->pdev, 6698 data + TG3_RX_OFFSET(tp), 6699 data_size, 6700 PCI_DMA_FROMDEVICE); 6701 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) { 6702 tg3_frag_free(skb_size <= PAGE_SIZE, data); 6703 return -EIO; 6704 } 6705 6706 map->data = data; 6707 dma_unmap_addr_set(map, mapping, mapping); 6708 6709 desc->addr_hi = ((u64)mapping >> 32); 6710 desc->addr_lo = ((u64)mapping & 0xffffffff); 6711 6712 return data_size; 6713 } 6714 6715 /* We only need to move over in the address because the other 6716 * members of the RX descriptor are invariant. See notes above 6717 * tg3_alloc_rx_data for full details. 6718 */ 6719 static void tg3_recycle_rx(struct tg3_napi *tnapi, 6720 struct tg3_rx_prodring_set *dpr, 6721 u32 opaque_key, int src_idx, 6722 u32 dest_idx_unmasked) 6723 { 6724 struct tg3 *tp = tnapi->tp; 6725 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 6726 struct ring_info *src_map, *dest_map; 6727 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; 6728 int dest_idx; 6729 6730 switch (opaque_key) { 6731 case RXD_OPAQUE_RING_STD: 6732 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6733 dest_desc = &dpr->rx_std[dest_idx]; 6734 dest_map = &dpr->rx_std_buffers[dest_idx]; 6735 src_desc = &spr->rx_std[src_idx]; 6736 src_map = &spr->rx_std_buffers[src_idx]; 6737 break; 6738 6739 case RXD_OPAQUE_RING_JUMBO: 6740 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6741 dest_desc = &dpr->rx_jmb[dest_idx].std; 6742 dest_map = &dpr->rx_jmb_buffers[dest_idx]; 6743 src_desc = &spr->rx_jmb[src_idx].std; 6744 src_map = &spr->rx_jmb_buffers[src_idx]; 6745 break; 6746 6747 default: 6748 return; 6749 } 6750 6751 dest_map->data = src_map->data; 6752 dma_unmap_addr_set(dest_map, mapping, 6753 dma_unmap_addr(src_map, mapping)); 6754 dest_desc->addr_hi = src_desc->addr_hi; 6755 dest_desc->addr_lo = src_desc->addr_lo; 6756 6757 /* Ensure that the update to the skb happens after the physical 6758 * addresses have been transferred to the new BD location. 6759 */ 6760 smp_wmb(); 6761 6762 src_map->data = NULL; 6763 } 6764 6765 /* The RX ring scheme is composed of multiple rings which post fresh 6766 * buffers to the chip, and one special ring the chip uses to report 6767 * status back to the host. 6768 * 6769 * The special ring reports the status of received packets to the 6770 * host. The chip does not write into the original descriptor the 6771 * RX buffer was obtained from. The chip simply takes the original 6772 * descriptor as provided by the host, updates the status and length 6773 * field, then writes this into the next status ring entry. 6774 * 6775 * Each ring the host uses to post buffers to the chip is described 6776 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, 6777 * it is first placed into the on-chip ram. When the packet's length 6778 * is known, it walks down the TG3_BDINFO entries to select the ring. 6779 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO 6780 * which is within the range of the new packet's length is chosen. 6781 * 6782 * The "separate ring for rx status" scheme may sound queer, but it makes 6783 * sense from a cache coherency perspective. If only the host writes 6784 * to the buffer post rings, and only the chip writes to the rx status 6785 * rings, then cache lines never move beyond shared-modified state. 6786 * If both the host and chip were to write into the same ring, cache line 6787 * eviction could occur since both entities want it in an exclusive state. 6788 */ 6789 static int tg3_rx(struct tg3_napi *tnapi, int budget) 6790 { 6791 struct tg3 *tp = tnapi->tp; 6792 u32 work_mask, rx_std_posted = 0; 6793 u32 std_prod_idx, jmb_prod_idx; 6794 u32 sw_idx = tnapi->rx_rcb_ptr; 6795 u16 hw_idx; 6796 int received; 6797 struct tg3_rx_prodring_set *tpr = &tnapi->prodring; 6798 6799 hw_idx = *(tnapi->rx_rcb_prod_idx); 6800 /* 6801 * We need to order the read of hw_idx and the read of 6802 * the opaque cookie. 6803 */ 6804 rmb(); 6805 work_mask = 0; 6806 received = 0; 6807 std_prod_idx = tpr->rx_std_prod_idx; 6808 jmb_prod_idx = tpr->rx_jmb_prod_idx; 6809 while (sw_idx != hw_idx && budget > 0) { 6810 struct ring_info *ri; 6811 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; 6812 unsigned int len; 6813 struct sk_buff *skb; 6814 dma_addr_t dma_addr; 6815 u32 opaque_key, desc_idx, *post_ptr; 6816 u8 *data; 6817 u64 tstamp = 0; 6818 6819 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 6820 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 6821 if (opaque_key == RXD_OPAQUE_RING_STD) { 6822 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; 6823 dma_addr = dma_unmap_addr(ri, mapping); 6824 data = ri->data; 6825 post_ptr = &std_prod_idx; 6826 rx_std_posted++; 6827 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 6828 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; 6829 dma_addr = dma_unmap_addr(ri, mapping); 6830 data = ri->data; 6831 post_ptr = &jmb_prod_idx; 6832 } else 6833 goto next_pkt_nopost; 6834 6835 work_mask |= opaque_key; 6836 6837 if (desc->err_vlan & RXD_ERR_MASK) { 6838 drop_it: 6839 tg3_recycle_rx(tnapi, tpr, opaque_key, 6840 desc_idx, *post_ptr); 6841 drop_it_no_recycle: 6842 /* Other statistics kept track of by card. */ 6843 tp->rx_dropped++; 6844 goto next_pkt; 6845 } 6846 6847 prefetch(data + TG3_RX_OFFSET(tp)); 6848 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 6849 ETH_FCS_LEN; 6850 6851 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6852 RXD_FLAG_PTPSTAT_PTPV1 || 6853 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6854 RXD_FLAG_PTPSTAT_PTPV2) { 6855 tstamp = tr32(TG3_RX_TSTAMP_LSB); 6856 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32; 6857 } 6858 6859 if (len > TG3_RX_COPY_THRESH(tp)) { 6860 int skb_size; 6861 unsigned int frag_size; 6862 6863 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, 6864 *post_ptr, &frag_size); 6865 if (skb_size < 0) 6866 goto drop_it; 6867 6868 pci_unmap_single(tp->pdev, dma_addr, skb_size, 6869 PCI_DMA_FROMDEVICE); 6870 6871 /* Ensure that the update to the data happens 6872 * after the usage of the old DMA mapping. 6873 */ 6874 smp_wmb(); 6875 6876 ri->data = NULL; 6877 6878 skb = build_skb(data, frag_size); 6879 if (!skb) { 6880 tg3_frag_free(frag_size != 0, data); 6881 goto drop_it_no_recycle; 6882 } 6883 skb_reserve(skb, TG3_RX_OFFSET(tp)); 6884 } else { 6885 tg3_recycle_rx(tnapi, tpr, opaque_key, 6886 desc_idx, *post_ptr); 6887 6888 skb = netdev_alloc_skb(tp->dev, 6889 len + TG3_RAW_IP_ALIGN); 6890 if (skb == NULL) 6891 goto drop_it_no_recycle; 6892 6893 skb_reserve(skb, TG3_RAW_IP_ALIGN); 6894 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 6895 memcpy(skb->data, 6896 data + TG3_RX_OFFSET(tp), 6897 len); 6898 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 6899 } 6900 6901 skb_put(skb, len); 6902 if (tstamp) 6903 tg3_hwclock_to_timestamp(tp, tstamp, 6904 skb_hwtstamps(skb)); 6905 6906 if ((tp->dev->features & NETIF_F_RXCSUM) && 6907 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 6908 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 6909 >> RXD_TCPCSUM_SHIFT) == 0xffff)) 6910 skb->ip_summed = CHECKSUM_UNNECESSARY; 6911 else 6912 skb_checksum_none_assert(skb); 6913 6914 skb->protocol = eth_type_trans(skb, tp->dev); 6915 6916 if (len > (tp->dev->mtu + ETH_HLEN) && 6917 skb->protocol != htons(ETH_P_8021Q) && 6918 skb->protocol != htons(ETH_P_8021AD)) { 6919 dev_kfree_skb_any(skb); 6920 goto drop_it_no_recycle; 6921 } 6922 6923 if (desc->type_flags & RXD_FLAG_VLAN && 6924 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) 6925 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 6926 desc->err_vlan & RXD_VLAN_MASK); 6927 6928 napi_gro_receive(&tnapi->napi, skb); 6929 6930 received++; 6931 budget--; 6932 6933 next_pkt: 6934 (*post_ptr)++; 6935 6936 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 6937 tpr->rx_std_prod_idx = std_prod_idx & 6938 tp->rx_std_ring_mask; 6939 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 6940 tpr->rx_std_prod_idx); 6941 work_mask &= ~RXD_OPAQUE_RING_STD; 6942 rx_std_posted = 0; 6943 } 6944 next_pkt_nopost: 6945 sw_idx++; 6946 sw_idx &= tp->rx_ret_ring_mask; 6947 6948 /* Refresh hw_idx to see if there is new work */ 6949 if (sw_idx == hw_idx) { 6950 hw_idx = *(tnapi->rx_rcb_prod_idx); 6951 rmb(); 6952 } 6953 } 6954 6955 /* ACK the status ring. */ 6956 tnapi->rx_rcb_ptr = sw_idx; 6957 tw32_rx_mbox(tnapi->consmbox, sw_idx); 6958 6959 /* Refill RX ring(s). */ 6960 if (!tg3_flag(tp, ENABLE_RSS)) { 6961 /* Sync BD data before updating mailbox */ 6962 wmb(); 6963 6964 if (work_mask & RXD_OPAQUE_RING_STD) { 6965 tpr->rx_std_prod_idx = std_prod_idx & 6966 tp->rx_std_ring_mask; 6967 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 6968 tpr->rx_std_prod_idx); 6969 } 6970 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 6971 tpr->rx_jmb_prod_idx = jmb_prod_idx & 6972 tp->rx_jmb_ring_mask; 6973 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 6974 tpr->rx_jmb_prod_idx); 6975 } 6976 mmiowb(); 6977 } else if (work_mask) { 6978 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be 6979 * updated before the producer indices can be updated. 6980 */ 6981 smp_wmb(); 6982 6983 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; 6984 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; 6985 6986 if (tnapi != &tp->napi[1]) { 6987 tp->rx_refill = true; 6988 napi_schedule(&tp->napi[1].napi); 6989 } 6990 } 6991 6992 return received; 6993 } 6994 6995 static void tg3_poll_link(struct tg3 *tp) 6996 { 6997 /* handle link change and other phy events */ 6998 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 6999 struct tg3_hw_status *sblk = tp->napi[0].hw_status; 7000 7001 if (sblk->status & SD_STATUS_LINK_CHG) { 7002 sblk->status = SD_STATUS_UPDATED | 7003 (sblk->status & ~SD_STATUS_LINK_CHG); 7004 spin_lock(&tp->lock); 7005 if (tg3_flag(tp, USE_PHYLIB)) { 7006 tw32_f(MAC_STATUS, 7007 (MAC_STATUS_SYNC_CHANGED | 7008 MAC_STATUS_CFG_CHANGED | 7009 MAC_STATUS_MI_COMPLETION | 7010 MAC_STATUS_LNKSTATE_CHANGED)); 7011 udelay(40); 7012 } else 7013 tg3_setup_phy(tp, false); 7014 spin_unlock(&tp->lock); 7015 } 7016 } 7017 } 7018 7019 static int tg3_rx_prodring_xfer(struct tg3 *tp, 7020 struct tg3_rx_prodring_set *dpr, 7021 struct tg3_rx_prodring_set *spr) 7022 { 7023 u32 si, di, cpycnt, src_prod_idx; 7024 int i, err = 0; 7025 7026 while (1) { 7027 src_prod_idx = spr->rx_std_prod_idx; 7028 7029 /* Make sure updates to the rx_std_buffers[] entries and the 7030 * standard producer index are seen in the correct order. 7031 */ 7032 smp_rmb(); 7033 7034 if (spr->rx_std_cons_idx == src_prod_idx) 7035 break; 7036 7037 if (spr->rx_std_cons_idx < src_prod_idx) 7038 cpycnt = src_prod_idx - spr->rx_std_cons_idx; 7039 else 7040 cpycnt = tp->rx_std_ring_mask + 1 - 7041 spr->rx_std_cons_idx; 7042 7043 cpycnt = min(cpycnt, 7044 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); 7045 7046 si = spr->rx_std_cons_idx; 7047 di = dpr->rx_std_prod_idx; 7048 7049 for (i = di; i < di + cpycnt; i++) { 7050 if (dpr->rx_std_buffers[i].data) { 7051 cpycnt = i - di; 7052 err = -ENOSPC; 7053 break; 7054 } 7055 } 7056 7057 if (!cpycnt) 7058 break; 7059 7060 /* Ensure that updates to the rx_std_buffers ring and the 7061 * shadowed hardware producer ring from tg3_recycle_skb() are 7062 * ordered correctly WRT the skb check above. 7063 */ 7064 smp_rmb(); 7065 7066 memcpy(&dpr->rx_std_buffers[di], 7067 &spr->rx_std_buffers[si], 7068 cpycnt * sizeof(struct ring_info)); 7069 7070 for (i = 0; i < cpycnt; i++, di++, si++) { 7071 struct tg3_rx_buffer_desc *sbd, *dbd; 7072 sbd = &spr->rx_std[si]; 7073 dbd = &dpr->rx_std[di]; 7074 dbd->addr_hi = sbd->addr_hi; 7075 dbd->addr_lo = sbd->addr_lo; 7076 } 7077 7078 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & 7079 tp->rx_std_ring_mask; 7080 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & 7081 tp->rx_std_ring_mask; 7082 } 7083 7084 while (1) { 7085 src_prod_idx = spr->rx_jmb_prod_idx; 7086 7087 /* Make sure updates to the rx_jmb_buffers[] entries and 7088 * the jumbo producer index are seen in the correct order. 7089 */ 7090 smp_rmb(); 7091 7092 if (spr->rx_jmb_cons_idx == src_prod_idx) 7093 break; 7094 7095 if (spr->rx_jmb_cons_idx < src_prod_idx) 7096 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; 7097 else 7098 cpycnt = tp->rx_jmb_ring_mask + 1 - 7099 spr->rx_jmb_cons_idx; 7100 7101 cpycnt = min(cpycnt, 7102 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); 7103 7104 si = spr->rx_jmb_cons_idx; 7105 di = dpr->rx_jmb_prod_idx; 7106 7107 for (i = di; i < di + cpycnt; i++) { 7108 if (dpr->rx_jmb_buffers[i].data) { 7109 cpycnt = i - di; 7110 err = -ENOSPC; 7111 break; 7112 } 7113 } 7114 7115 if (!cpycnt) 7116 break; 7117 7118 /* Ensure that updates to the rx_jmb_buffers ring and the 7119 * shadowed hardware producer ring from tg3_recycle_skb() are 7120 * ordered correctly WRT the skb check above. 7121 */ 7122 smp_rmb(); 7123 7124 memcpy(&dpr->rx_jmb_buffers[di], 7125 &spr->rx_jmb_buffers[si], 7126 cpycnt * sizeof(struct ring_info)); 7127 7128 for (i = 0; i < cpycnt; i++, di++, si++) { 7129 struct tg3_rx_buffer_desc *sbd, *dbd; 7130 sbd = &spr->rx_jmb[si].std; 7131 dbd = &dpr->rx_jmb[di].std; 7132 dbd->addr_hi = sbd->addr_hi; 7133 dbd->addr_lo = sbd->addr_lo; 7134 } 7135 7136 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & 7137 tp->rx_jmb_ring_mask; 7138 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & 7139 tp->rx_jmb_ring_mask; 7140 } 7141 7142 return err; 7143 } 7144 7145 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) 7146 { 7147 struct tg3 *tp = tnapi->tp; 7148 7149 /* run TX completion thread */ 7150 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { 7151 tg3_tx(tnapi); 7152 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7153 return work_done; 7154 } 7155 7156 if (!tnapi->rx_rcb_prod_idx) 7157 return work_done; 7158 7159 /* run RX thread, within the bounds set by NAPI. 7160 * All RX "locking" is done by ensuring outside 7161 * code synchronizes with tg3->napi.poll() 7162 */ 7163 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 7164 work_done += tg3_rx(tnapi, budget - work_done); 7165 7166 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { 7167 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; 7168 int i, err = 0; 7169 u32 std_prod_idx = dpr->rx_std_prod_idx; 7170 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; 7171 7172 tp->rx_refill = false; 7173 for (i = 1; i <= tp->rxq_cnt; i++) 7174 err |= tg3_rx_prodring_xfer(tp, dpr, 7175 &tp->napi[i].prodring); 7176 7177 wmb(); 7178 7179 if (std_prod_idx != dpr->rx_std_prod_idx) 7180 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 7181 dpr->rx_std_prod_idx); 7182 7183 if (jmb_prod_idx != dpr->rx_jmb_prod_idx) 7184 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 7185 dpr->rx_jmb_prod_idx); 7186 7187 mmiowb(); 7188 7189 if (err) 7190 tw32_f(HOSTCC_MODE, tp->coal_now); 7191 } 7192 7193 return work_done; 7194 } 7195 7196 static inline void tg3_reset_task_schedule(struct tg3 *tp) 7197 { 7198 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) 7199 schedule_work(&tp->reset_task); 7200 } 7201 7202 static inline void tg3_reset_task_cancel(struct tg3 *tp) 7203 { 7204 cancel_work_sync(&tp->reset_task); 7205 tg3_flag_clear(tp, RESET_TASK_PENDING); 7206 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 7207 } 7208 7209 static int tg3_poll_msix(struct napi_struct *napi, int budget) 7210 { 7211 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7212 struct tg3 *tp = tnapi->tp; 7213 int work_done = 0; 7214 struct tg3_hw_status *sblk = tnapi->hw_status; 7215 7216 while (1) { 7217 work_done = tg3_poll_work(tnapi, work_done, budget); 7218 7219 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7220 goto tx_recovery; 7221 7222 if (unlikely(work_done >= budget)) 7223 break; 7224 7225 /* tp->last_tag is used in tg3_int_reenable() below 7226 * to tell the hw how much work has been processed, 7227 * so we must read it before checking for more work. 7228 */ 7229 tnapi->last_tag = sblk->status_tag; 7230 tnapi->last_irq_tag = tnapi->last_tag; 7231 rmb(); 7232 7233 /* check for RX/TX work to do */ 7234 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && 7235 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { 7236 7237 /* This test here is not race free, but will reduce 7238 * the number of interrupts by looping again. 7239 */ 7240 if (tnapi == &tp->napi[1] && tp->rx_refill) 7241 continue; 7242 7243 napi_complete_done(napi, work_done); 7244 /* Reenable interrupts. */ 7245 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 7246 7247 /* This test here is synchronized by napi_schedule() 7248 * and napi_complete() to close the race condition. 7249 */ 7250 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) { 7251 tw32(HOSTCC_MODE, tp->coalesce_mode | 7252 HOSTCC_MODE_ENABLE | 7253 tnapi->coal_now); 7254 } 7255 mmiowb(); 7256 break; 7257 } 7258 } 7259 7260 return work_done; 7261 7262 tx_recovery: 7263 /* work_done is guaranteed to be less than budget. */ 7264 napi_complete(napi); 7265 tg3_reset_task_schedule(tp); 7266 return work_done; 7267 } 7268 7269 static void tg3_process_error(struct tg3 *tp) 7270 { 7271 u32 val; 7272 bool real_error = false; 7273 7274 if (tg3_flag(tp, ERROR_PROCESSED)) 7275 return; 7276 7277 /* Check Flow Attention register */ 7278 val = tr32(HOSTCC_FLOW_ATTN); 7279 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { 7280 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); 7281 real_error = true; 7282 } 7283 7284 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { 7285 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); 7286 real_error = true; 7287 } 7288 7289 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { 7290 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); 7291 real_error = true; 7292 } 7293 7294 if (!real_error) 7295 return; 7296 7297 tg3_dump_state(tp); 7298 7299 tg3_flag_set(tp, ERROR_PROCESSED); 7300 tg3_reset_task_schedule(tp); 7301 } 7302 7303 static int tg3_poll(struct napi_struct *napi, int budget) 7304 { 7305 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7306 struct tg3 *tp = tnapi->tp; 7307 int work_done = 0; 7308 struct tg3_hw_status *sblk = tnapi->hw_status; 7309 7310 while (1) { 7311 if (sblk->status & SD_STATUS_ERROR) 7312 tg3_process_error(tp); 7313 7314 tg3_poll_link(tp); 7315 7316 work_done = tg3_poll_work(tnapi, work_done, budget); 7317 7318 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7319 goto tx_recovery; 7320 7321 if (unlikely(work_done >= budget)) 7322 break; 7323 7324 if (tg3_flag(tp, TAGGED_STATUS)) { 7325 /* tp->last_tag is used in tg3_int_reenable() below 7326 * to tell the hw how much work has been processed, 7327 * so we must read it before checking for more work. 7328 */ 7329 tnapi->last_tag = sblk->status_tag; 7330 tnapi->last_irq_tag = tnapi->last_tag; 7331 rmb(); 7332 } else 7333 sblk->status &= ~SD_STATUS_UPDATED; 7334 7335 if (likely(!tg3_has_work(tnapi))) { 7336 napi_complete_done(napi, work_done); 7337 tg3_int_reenable(tnapi); 7338 break; 7339 } 7340 } 7341 7342 return work_done; 7343 7344 tx_recovery: 7345 /* work_done is guaranteed to be less than budget. */ 7346 napi_complete(napi); 7347 tg3_reset_task_schedule(tp); 7348 return work_done; 7349 } 7350 7351 static void tg3_napi_disable(struct tg3 *tp) 7352 { 7353 int i; 7354 7355 for (i = tp->irq_cnt - 1; i >= 0; i--) 7356 napi_disable(&tp->napi[i].napi); 7357 } 7358 7359 static void tg3_napi_enable(struct tg3 *tp) 7360 { 7361 int i; 7362 7363 for (i = 0; i < tp->irq_cnt; i++) 7364 napi_enable(&tp->napi[i].napi); 7365 } 7366 7367 static void tg3_napi_init(struct tg3 *tp) 7368 { 7369 int i; 7370 7371 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64); 7372 for (i = 1; i < tp->irq_cnt; i++) 7373 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64); 7374 } 7375 7376 static void tg3_napi_fini(struct tg3 *tp) 7377 { 7378 int i; 7379 7380 for (i = 0; i < tp->irq_cnt; i++) 7381 netif_napi_del(&tp->napi[i].napi); 7382 } 7383 7384 static inline void tg3_netif_stop(struct tg3 *tp) 7385 { 7386 tp->dev->trans_start = jiffies; /* prevent tx timeout */ 7387 tg3_napi_disable(tp); 7388 netif_carrier_off(tp->dev); 7389 netif_tx_disable(tp->dev); 7390 } 7391 7392 /* tp->lock must be held */ 7393 static inline void tg3_netif_start(struct tg3 *tp) 7394 { 7395 tg3_ptp_resume(tp); 7396 7397 /* NOTE: unconditional netif_tx_wake_all_queues is only 7398 * appropriate so long as all callers are assured to 7399 * have free tx slots (such as after tg3_init_hw) 7400 */ 7401 netif_tx_wake_all_queues(tp->dev); 7402 7403 if (tp->link_up) 7404 netif_carrier_on(tp->dev); 7405 7406 tg3_napi_enable(tp); 7407 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; 7408 tg3_enable_ints(tp); 7409 } 7410 7411 static void tg3_irq_quiesce(struct tg3 *tp) 7412 __releases(tp->lock) 7413 __acquires(tp->lock) 7414 { 7415 int i; 7416 7417 BUG_ON(tp->irq_sync); 7418 7419 tp->irq_sync = 1; 7420 smp_mb(); 7421 7422 spin_unlock_bh(&tp->lock); 7423 7424 for (i = 0; i < tp->irq_cnt; i++) 7425 synchronize_irq(tp->napi[i].irq_vec); 7426 7427 spin_lock_bh(&tp->lock); 7428 } 7429 7430 /* Fully shutdown all tg3 driver activity elsewhere in the system. 7431 * If irq_sync is non-zero, then the IRQ handler must be synchronized 7432 * with as well. Most of the time, this is not necessary except when 7433 * shutting down the device. 7434 */ 7435 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) 7436 { 7437 spin_lock_bh(&tp->lock); 7438 if (irq_sync) 7439 tg3_irq_quiesce(tp); 7440 } 7441 7442 static inline void tg3_full_unlock(struct tg3 *tp) 7443 { 7444 spin_unlock_bh(&tp->lock); 7445 } 7446 7447 /* One-shot MSI handler - Chip automatically disables interrupt 7448 * after sending MSI so driver doesn't have to do it. 7449 */ 7450 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) 7451 { 7452 struct tg3_napi *tnapi = dev_id; 7453 struct tg3 *tp = tnapi->tp; 7454 7455 prefetch(tnapi->hw_status); 7456 if (tnapi->rx_rcb) 7457 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7458 7459 if (likely(!tg3_irq_sync(tp))) 7460 napi_schedule(&tnapi->napi); 7461 7462 return IRQ_HANDLED; 7463 } 7464 7465 /* MSI ISR - No need to check for interrupt sharing and no need to 7466 * flush status block and interrupt mailbox. PCI ordering rules 7467 * guarantee that MSI will arrive after the status block. 7468 */ 7469 static irqreturn_t tg3_msi(int irq, void *dev_id) 7470 { 7471 struct tg3_napi *tnapi = dev_id; 7472 struct tg3 *tp = tnapi->tp; 7473 7474 prefetch(tnapi->hw_status); 7475 if (tnapi->rx_rcb) 7476 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7477 /* 7478 * Writing any value to intr-mbox-0 clears PCI INTA# and 7479 * chip-internal interrupt pending events. 7480 * Writing non-zero to intr-mbox-0 additional tells the 7481 * NIC to stop sending us irqs, engaging "in-intr-handler" 7482 * event coalescing. 7483 */ 7484 tw32_mailbox(tnapi->int_mbox, 0x00000001); 7485 if (likely(!tg3_irq_sync(tp))) 7486 napi_schedule(&tnapi->napi); 7487 7488 return IRQ_RETVAL(1); 7489 } 7490 7491 static irqreturn_t tg3_interrupt(int irq, void *dev_id) 7492 { 7493 struct tg3_napi *tnapi = dev_id; 7494 struct tg3 *tp = tnapi->tp; 7495 struct tg3_hw_status *sblk = tnapi->hw_status; 7496 unsigned int handled = 1; 7497 7498 /* In INTx mode, it is possible for the interrupt to arrive at 7499 * the CPU before the status block posted prior to the interrupt. 7500 * Reading the PCI State register will confirm whether the 7501 * interrupt is ours and will flush the status block. 7502 */ 7503 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { 7504 if (tg3_flag(tp, CHIP_RESETTING) || 7505 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7506 handled = 0; 7507 goto out; 7508 } 7509 } 7510 7511 /* 7512 * Writing any value to intr-mbox-0 clears PCI INTA# and 7513 * chip-internal interrupt pending events. 7514 * Writing non-zero to intr-mbox-0 additional tells the 7515 * NIC to stop sending us irqs, engaging "in-intr-handler" 7516 * event coalescing. 7517 * 7518 * Flush the mailbox to de-assert the IRQ immediately to prevent 7519 * spurious interrupts. The flush impacts performance but 7520 * excessive spurious interrupts can be worse in some cases. 7521 */ 7522 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7523 if (tg3_irq_sync(tp)) 7524 goto out; 7525 sblk->status &= ~SD_STATUS_UPDATED; 7526 if (likely(tg3_has_work(tnapi))) { 7527 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7528 napi_schedule(&tnapi->napi); 7529 } else { 7530 /* No work, shared interrupt perhaps? re-enable 7531 * interrupts, and flush that PCI write 7532 */ 7533 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 7534 0x00000000); 7535 } 7536 out: 7537 return IRQ_RETVAL(handled); 7538 } 7539 7540 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) 7541 { 7542 struct tg3_napi *tnapi = dev_id; 7543 struct tg3 *tp = tnapi->tp; 7544 struct tg3_hw_status *sblk = tnapi->hw_status; 7545 unsigned int handled = 1; 7546 7547 /* In INTx mode, it is possible for the interrupt to arrive at 7548 * the CPU before the status block posted prior to the interrupt. 7549 * Reading the PCI State register will confirm whether the 7550 * interrupt is ours and will flush the status block. 7551 */ 7552 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { 7553 if (tg3_flag(tp, CHIP_RESETTING) || 7554 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7555 handled = 0; 7556 goto out; 7557 } 7558 } 7559 7560 /* 7561 * writing any value to intr-mbox-0 clears PCI INTA# and 7562 * chip-internal interrupt pending events. 7563 * writing non-zero to intr-mbox-0 additional tells the 7564 * NIC to stop sending us irqs, engaging "in-intr-handler" 7565 * event coalescing. 7566 * 7567 * Flush the mailbox to de-assert the IRQ immediately to prevent 7568 * spurious interrupts. The flush impacts performance but 7569 * excessive spurious interrupts can be worse in some cases. 7570 */ 7571 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7572 7573 /* 7574 * In a shared interrupt configuration, sometimes other devices' 7575 * interrupts will scream. We record the current status tag here 7576 * so that the above check can report that the screaming interrupts 7577 * are unhandled. Eventually they will be silenced. 7578 */ 7579 tnapi->last_irq_tag = sblk->status_tag; 7580 7581 if (tg3_irq_sync(tp)) 7582 goto out; 7583 7584 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7585 7586 napi_schedule(&tnapi->napi); 7587 7588 out: 7589 return IRQ_RETVAL(handled); 7590 } 7591 7592 /* ISR for interrupt test */ 7593 static irqreturn_t tg3_test_isr(int irq, void *dev_id) 7594 { 7595 struct tg3_napi *tnapi = dev_id; 7596 struct tg3 *tp = tnapi->tp; 7597 struct tg3_hw_status *sblk = tnapi->hw_status; 7598 7599 if ((sblk->status & SD_STATUS_UPDATED) || 7600 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7601 tg3_disable_ints(tp); 7602 return IRQ_RETVAL(1); 7603 } 7604 return IRQ_RETVAL(0); 7605 } 7606 7607 #ifdef CONFIG_NET_POLL_CONTROLLER 7608 static void tg3_poll_controller(struct net_device *dev) 7609 { 7610 int i; 7611 struct tg3 *tp = netdev_priv(dev); 7612 7613 if (tg3_irq_sync(tp)) 7614 return; 7615 7616 for (i = 0; i < tp->irq_cnt; i++) 7617 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); 7618 } 7619 #endif 7620 7621 static void tg3_tx_timeout(struct net_device *dev) 7622 { 7623 struct tg3 *tp = netdev_priv(dev); 7624 7625 if (netif_msg_tx_err(tp)) { 7626 netdev_err(dev, "transmit timed out, resetting\n"); 7627 tg3_dump_state(tp); 7628 } 7629 7630 tg3_reset_task_schedule(tp); 7631 } 7632 7633 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ 7634 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) 7635 { 7636 u32 base = (u32) mapping & 0xffffffff; 7637 7638 return base + len + 8 < base; 7639 } 7640 7641 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes 7642 * of any 4GB boundaries: 4G, 8G, etc 7643 */ 7644 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7645 u32 len, u32 mss) 7646 { 7647 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) { 7648 u32 base = (u32) mapping & 0xffffffff; 7649 7650 return ((base + len + (mss & 0x3fff)) < base); 7651 } 7652 return 0; 7653 } 7654 7655 /* Test for DMA addresses > 40-bit */ 7656 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7657 int len) 7658 { 7659 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) 7660 if (tg3_flag(tp, 40BIT_DMA_BUG)) 7661 return ((u64) mapping + len) > DMA_BIT_MASK(40); 7662 return 0; 7663 #else 7664 return 0; 7665 #endif 7666 } 7667 7668 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, 7669 dma_addr_t mapping, u32 len, u32 flags, 7670 u32 mss, u32 vlan) 7671 { 7672 txbd->addr_hi = ((u64) mapping >> 32); 7673 txbd->addr_lo = ((u64) mapping & 0xffffffff); 7674 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); 7675 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); 7676 } 7677 7678 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, 7679 dma_addr_t map, u32 len, u32 flags, 7680 u32 mss, u32 vlan) 7681 { 7682 struct tg3 *tp = tnapi->tp; 7683 bool hwbug = false; 7684 7685 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) 7686 hwbug = true; 7687 7688 if (tg3_4g_overflow_test(map, len)) 7689 hwbug = true; 7690 7691 if (tg3_4g_tso_overflow_test(tp, map, len, mss)) 7692 hwbug = true; 7693 7694 if (tg3_40bit_overflow_test(tp, map, len)) 7695 hwbug = true; 7696 7697 if (tp->dma_limit) { 7698 u32 prvidx = *entry; 7699 u32 tmp_flag = flags & ~TXD_FLAG_END; 7700 while (len > tp->dma_limit && *budget) { 7701 u32 frag_len = tp->dma_limit; 7702 len -= tp->dma_limit; 7703 7704 /* Avoid the 8byte DMA problem */ 7705 if (len <= 8) { 7706 len += tp->dma_limit / 2; 7707 frag_len = tp->dma_limit / 2; 7708 } 7709 7710 tnapi->tx_buffers[*entry].fragmented = true; 7711 7712 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7713 frag_len, tmp_flag, mss, vlan); 7714 *budget -= 1; 7715 prvidx = *entry; 7716 *entry = NEXT_TX(*entry); 7717 7718 map += frag_len; 7719 } 7720 7721 if (len) { 7722 if (*budget) { 7723 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7724 len, flags, mss, vlan); 7725 *budget -= 1; 7726 *entry = NEXT_TX(*entry); 7727 } else { 7728 hwbug = true; 7729 tnapi->tx_buffers[prvidx].fragmented = false; 7730 } 7731 } 7732 } else { 7733 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7734 len, flags, mss, vlan); 7735 *entry = NEXT_TX(*entry); 7736 } 7737 7738 return hwbug; 7739 } 7740 7741 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) 7742 { 7743 int i; 7744 struct sk_buff *skb; 7745 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; 7746 7747 skb = txb->skb; 7748 txb->skb = NULL; 7749 7750 pci_unmap_single(tnapi->tp->pdev, 7751 dma_unmap_addr(txb, mapping), 7752 skb_headlen(skb), 7753 PCI_DMA_TODEVICE); 7754 7755 while (txb->fragmented) { 7756 txb->fragmented = false; 7757 entry = NEXT_TX(entry); 7758 txb = &tnapi->tx_buffers[entry]; 7759 } 7760 7761 for (i = 0; i <= last; i++) { 7762 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 7763 7764 entry = NEXT_TX(entry); 7765 txb = &tnapi->tx_buffers[entry]; 7766 7767 pci_unmap_page(tnapi->tp->pdev, 7768 dma_unmap_addr(txb, mapping), 7769 skb_frag_size(frag), PCI_DMA_TODEVICE); 7770 7771 while (txb->fragmented) { 7772 txb->fragmented = false; 7773 entry = NEXT_TX(entry); 7774 txb = &tnapi->tx_buffers[entry]; 7775 } 7776 } 7777 } 7778 7779 /* Workaround 4GB and 40-bit hardware DMA bugs. */ 7780 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, 7781 struct sk_buff **pskb, 7782 u32 *entry, u32 *budget, 7783 u32 base_flags, u32 mss, u32 vlan) 7784 { 7785 struct tg3 *tp = tnapi->tp; 7786 struct sk_buff *new_skb, *skb = *pskb; 7787 dma_addr_t new_addr = 0; 7788 int ret = 0; 7789 7790 if (tg3_asic_rev(tp) != ASIC_REV_5701) 7791 new_skb = skb_copy(skb, GFP_ATOMIC); 7792 else { 7793 int more_headroom = 4 - ((unsigned long)skb->data & 3); 7794 7795 new_skb = skb_copy_expand(skb, 7796 skb_headroom(skb) + more_headroom, 7797 skb_tailroom(skb), GFP_ATOMIC); 7798 } 7799 7800 if (!new_skb) { 7801 ret = -1; 7802 } else { 7803 /* New SKB is guaranteed to be linear. */ 7804 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, 7805 PCI_DMA_TODEVICE); 7806 /* Make sure the mapping succeeded */ 7807 if (pci_dma_mapping_error(tp->pdev, new_addr)) { 7808 dev_kfree_skb_any(new_skb); 7809 ret = -1; 7810 } else { 7811 u32 save_entry = *entry; 7812 7813 base_flags |= TXD_FLAG_END; 7814 7815 tnapi->tx_buffers[*entry].skb = new_skb; 7816 dma_unmap_addr_set(&tnapi->tx_buffers[*entry], 7817 mapping, new_addr); 7818 7819 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, 7820 new_skb->len, base_flags, 7821 mss, vlan)) { 7822 tg3_tx_skb_unmap(tnapi, save_entry, -1); 7823 dev_kfree_skb_any(new_skb); 7824 ret = -1; 7825 } 7826 } 7827 } 7828 7829 dev_kfree_skb_any(skb); 7830 *pskb = new_skb; 7831 return ret; 7832 } 7833 7834 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); 7835 7836 /* Use GSO to workaround all TSO packets that meet HW bug conditions 7837 * indicated in tg3_tx_frag_set() 7838 */ 7839 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, 7840 struct netdev_queue *txq, struct sk_buff *skb) 7841 { 7842 struct sk_buff *segs, *nskb; 7843 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; 7844 7845 /* Estimate the number of fragments in the worst case */ 7846 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) { 7847 netif_tx_stop_queue(txq); 7848 7849 /* netif_tx_stop_queue() must be done before checking 7850 * checking tx index in tg3_tx_avail() below, because in 7851 * tg3_tx(), we update tx index before checking for 7852 * netif_tx_queue_stopped(). 7853 */ 7854 smp_mb(); 7855 if (tg3_tx_avail(tnapi) <= frag_cnt_est) 7856 return NETDEV_TX_BUSY; 7857 7858 netif_tx_wake_queue(txq); 7859 } 7860 7861 segs = skb_gso_segment(skb, tp->dev->features & 7862 ~(NETIF_F_TSO | NETIF_F_TSO6)); 7863 if (IS_ERR(segs) || !segs) 7864 goto tg3_tso_bug_end; 7865 7866 do { 7867 nskb = segs; 7868 segs = segs->next; 7869 nskb->next = NULL; 7870 tg3_start_xmit(nskb, tp->dev); 7871 } while (segs); 7872 7873 tg3_tso_bug_end: 7874 dev_kfree_skb_any(skb); 7875 7876 return NETDEV_TX_OK; 7877 } 7878 7879 /* hard_start_xmit for all devices */ 7880 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 7881 { 7882 struct tg3 *tp = netdev_priv(dev); 7883 u32 len, entry, base_flags, mss, vlan = 0; 7884 u32 budget; 7885 int i = -1, would_hit_hwbug; 7886 dma_addr_t mapping; 7887 struct tg3_napi *tnapi; 7888 struct netdev_queue *txq; 7889 unsigned int last; 7890 struct iphdr *iph = NULL; 7891 struct tcphdr *tcph = NULL; 7892 __sum16 tcp_csum = 0, ip_csum = 0; 7893 __be16 ip_tot_len = 0; 7894 7895 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 7896 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 7897 if (tg3_flag(tp, ENABLE_TSS)) 7898 tnapi++; 7899 7900 budget = tg3_tx_avail(tnapi); 7901 7902 /* We are running in BH disabled context with netif_tx_lock 7903 * and TX reclaim runs via tp->napi.poll inside of a software 7904 * interrupt. Furthermore, IRQ processing runs lockless so we have 7905 * no IRQ context deadlocks to worry about either. Rejoice! 7906 */ 7907 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { 7908 if (!netif_tx_queue_stopped(txq)) { 7909 netif_tx_stop_queue(txq); 7910 7911 /* This is a hard error, log it. */ 7912 netdev_err(dev, 7913 "BUG! Tx Ring full when queue awake!\n"); 7914 } 7915 return NETDEV_TX_BUSY; 7916 } 7917 7918 entry = tnapi->tx_prod; 7919 base_flags = 0; 7920 7921 mss = skb_shinfo(skb)->gso_size; 7922 if (mss) { 7923 u32 tcp_opt_len, hdr_len; 7924 7925 if (skb_cow_head(skb, 0)) 7926 goto drop; 7927 7928 iph = ip_hdr(skb); 7929 tcp_opt_len = tcp_optlen(skb); 7930 7931 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; 7932 7933 /* HW/FW can not correctly segment packets that have been 7934 * vlan encapsulated. 7935 */ 7936 if (skb->protocol == htons(ETH_P_8021Q) || 7937 skb->protocol == htons(ETH_P_8021AD)) 7938 return tg3_tso_bug(tp, tnapi, txq, skb); 7939 7940 if (!skb_is_gso_v6(skb)) { 7941 if (unlikely((ETH_HLEN + hdr_len) > 80) && 7942 tg3_flag(tp, TSO_BUG)) 7943 return tg3_tso_bug(tp, tnapi, txq, skb); 7944 7945 ip_csum = iph->check; 7946 ip_tot_len = iph->tot_len; 7947 iph->check = 0; 7948 iph->tot_len = htons(mss + hdr_len); 7949 } 7950 7951 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 7952 TXD_FLAG_CPU_POST_DMA); 7953 7954 tcph = tcp_hdr(skb); 7955 tcp_csum = tcph->check; 7956 7957 if (tg3_flag(tp, HW_TSO_1) || 7958 tg3_flag(tp, HW_TSO_2) || 7959 tg3_flag(tp, HW_TSO_3)) { 7960 tcph->check = 0; 7961 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 7962 } else { 7963 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 7964 0, IPPROTO_TCP, 0); 7965 } 7966 7967 if (tg3_flag(tp, HW_TSO_3)) { 7968 mss |= (hdr_len & 0xc) << 12; 7969 if (hdr_len & 0x10) 7970 base_flags |= 0x00000010; 7971 base_flags |= (hdr_len & 0x3e0) << 5; 7972 } else if (tg3_flag(tp, HW_TSO_2)) 7973 mss |= hdr_len << 9; 7974 else if (tg3_flag(tp, HW_TSO_1) || 7975 tg3_asic_rev(tp) == ASIC_REV_5705) { 7976 if (tcp_opt_len || iph->ihl > 5) { 7977 int tsflags; 7978 7979 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 7980 mss |= (tsflags << 11); 7981 } 7982 } else { 7983 if (tcp_opt_len || iph->ihl > 5) { 7984 int tsflags; 7985 7986 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 7987 base_flags |= tsflags << 12; 7988 } 7989 } 7990 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 7991 /* HW/FW can not correctly checksum packets that have been 7992 * vlan encapsulated. 7993 */ 7994 if (skb->protocol == htons(ETH_P_8021Q) || 7995 skb->protocol == htons(ETH_P_8021AD)) { 7996 if (skb_checksum_help(skb)) 7997 goto drop; 7998 } else { 7999 base_flags |= TXD_FLAG_TCPUDP_CSUM; 8000 } 8001 } 8002 8003 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 8004 !mss && skb->len > VLAN_ETH_FRAME_LEN) 8005 base_flags |= TXD_FLAG_JMB_PKT; 8006 8007 if (skb_vlan_tag_present(skb)) { 8008 base_flags |= TXD_FLAG_VLAN; 8009 vlan = skb_vlan_tag_get(skb); 8010 } 8011 8012 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && 8013 tg3_flag(tp, TX_TSTAMP_EN)) { 8014 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 8015 base_flags |= TXD_FLAG_HWTSTAMP; 8016 } 8017 8018 len = skb_headlen(skb); 8019 8020 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); 8021 if (pci_dma_mapping_error(tp->pdev, mapping)) 8022 goto drop; 8023 8024 8025 tnapi->tx_buffers[entry].skb = skb; 8026 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 8027 8028 would_hit_hwbug = 0; 8029 8030 if (tg3_flag(tp, 5701_DMA_BUG)) 8031 would_hit_hwbug = 1; 8032 8033 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | 8034 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), 8035 mss, vlan)) { 8036 would_hit_hwbug = 1; 8037 } else if (skb_shinfo(skb)->nr_frags > 0) { 8038 u32 tmp_mss = mss; 8039 8040 if (!tg3_flag(tp, HW_TSO_1) && 8041 !tg3_flag(tp, HW_TSO_2) && 8042 !tg3_flag(tp, HW_TSO_3)) 8043 tmp_mss = 0; 8044 8045 /* Now loop through additional data 8046 * fragments, and queue them. 8047 */ 8048 last = skb_shinfo(skb)->nr_frags - 1; 8049 for (i = 0; i <= last; i++) { 8050 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 8051 8052 len = skb_frag_size(frag); 8053 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, 8054 len, DMA_TO_DEVICE); 8055 8056 tnapi->tx_buffers[entry].skb = NULL; 8057 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 8058 mapping); 8059 if (dma_mapping_error(&tp->pdev->dev, mapping)) 8060 goto dma_error; 8061 8062 if (!budget || 8063 tg3_tx_frag_set(tnapi, &entry, &budget, mapping, 8064 len, base_flags | 8065 ((i == last) ? TXD_FLAG_END : 0), 8066 tmp_mss, vlan)) { 8067 would_hit_hwbug = 1; 8068 break; 8069 } 8070 } 8071 } 8072 8073 if (would_hit_hwbug) { 8074 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 8075 8076 if (mss) { 8077 /* If it's a TSO packet, do GSO instead of 8078 * allocating and copying to a large linear SKB 8079 */ 8080 if (ip_tot_len) { 8081 iph->check = ip_csum; 8082 iph->tot_len = ip_tot_len; 8083 } 8084 tcph->check = tcp_csum; 8085 return tg3_tso_bug(tp, tnapi, txq, skb); 8086 } 8087 8088 /* If the workaround fails due to memory/mapping 8089 * failure, silently drop this packet. 8090 */ 8091 entry = tnapi->tx_prod; 8092 budget = tg3_tx_avail(tnapi); 8093 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, 8094 base_flags, mss, vlan)) 8095 goto drop_nofree; 8096 } 8097 8098 skb_tx_timestamp(skb); 8099 netdev_tx_sent_queue(txq, skb->len); 8100 8101 /* Sync BD data before updating mailbox */ 8102 wmb(); 8103 8104 tnapi->tx_prod = entry; 8105 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 8106 netif_tx_stop_queue(txq); 8107 8108 /* netif_tx_stop_queue() must be done before checking 8109 * checking tx index in tg3_tx_avail() below, because in 8110 * tg3_tx(), we update tx index before checking for 8111 * netif_tx_queue_stopped(). 8112 */ 8113 smp_mb(); 8114 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 8115 netif_tx_wake_queue(txq); 8116 } 8117 8118 if (!skb->xmit_more || netif_xmit_stopped(txq)) { 8119 /* Packets are ready, update Tx producer idx on card. */ 8120 tw32_tx_mbox(tnapi->prodmbox, entry); 8121 mmiowb(); 8122 } 8123 8124 return NETDEV_TX_OK; 8125 8126 dma_error: 8127 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); 8128 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; 8129 drop: 8130 dev_kfree_skb_any(skb); 8131 drop_nofree: 8132 tp->tx_dropped++; 8133 return NETDEV_TX_OK; 8134 } 8135 8136 static void tg3_mac_loopback(struct tg3 *tp, bool enable) 8137 { 8138 if (enable) { 8139 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | 8140 MAC_MODE_PORT_MODE_MASK); 8141 8142 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; 8143 8144 if (!tg3_flag(tp, 5705_PLUS)) 8145 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 8146 8147 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 8148 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 8149 else 8150 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 8151 } else { 8152 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; 8153 8154 if (tg3_flag(tp, 5705_PLUS) || 8155 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || 8156 tg3_asic_rev(tp) == ASIC_REV_5700) 8157 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 8158 } 8159 8160 tw32(MAC_MODE, tp->mac_mode); 8161 udelay(40); 8162 } 8163 8164 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) 8165 { 8166 u32 val, bmcr, mac_mode, ptest = 0; 8167 8168 tg3_phy_toggle_apd(tp, false); 8169 tg3_phy_toggle_automdix(tp, false); 8170 8171 if (extlpbk && tg3_phy_set_extloopbk(tp)) 8172 return -EIO; 8173 8174 bmcr = BMCR_FULLDPLX; 8175 switch (speed) { 8176 case SPEED_10: 8177 break; 8178 case SPEED_100: 8179 bmcr |= BMCR_SPEED100; 8180 break; 8181 case SPEED_1000: 8182 default: 8183 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 8184 speed = SPEED_100; 8185 bmcr |= BMCR_SPEED100; 8186 } else { 8187 speed = SPEED_1000; 8188 bmcr |= BMCR_SPEED1000; 8189 } 8190 } 8191 8192 if (extlpbk) { 8193 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 8194 tg3_readphy(tp, MII_CTRL1000, &val); 8195 val |= CTL1000_AS_MASTER | 8196 CTL1000_ENABLE_MASTER; 8197 tg3_writephy(tp, MII_CTRL1000, val); 8198 } else { 8199 ptest = MII_TG3_FET_PTEST_TRIM_SEL | 8200 MII_TG3_FET_PTEST_TRIM_2; 8201 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); 8202 } 8203 } else 8204 bmcr |= BMCR_LOOPBACK; 8205 8206 tg3_writephy(tp, MII_BMCR, bmcr); 8207 8208 /* The write needs to be flushed for the FETs */ 8209 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 8210 tg3_readphy(tp, MII_BMCR, &bmcr); 8211 8212 udelay(40); 8213 8214 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 8215 tg3_asic_rev(tp) == ASIC_REV_5785) { 8216 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | 8217 MII_TG3_FET_PTEST_FRC_TX_LINK | 8218 MII_TG3_FET_PTEST_FRC_TX_LOCK); 8219 8220 /* The write needs to be flushed for the AC131 */ 8221 tg3_readphy(tp, MII_TG3_FET_PTEST, &val); 8222 } 8223 8224 /* Reset to prevent losing 1st rx packet intermittently */ 8225 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 8226 tg3_flag(tp, 5780_CLASS)) { 8227 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 8228 udelay(10); 8229 tw32_f(MAC_RX_MODE, tp->rx_mode); 8230 } 8231 8232 mac_mode = tp->mac_mode & 8233 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 8234 if (speed == SPEED_1000) 8235 mac_mode |= MAC_MODE_PORT_MODE_GMII; 8236 else 8237 mac_mode |= MAC_MODE_PORT_MODE_MII; 8238 8239 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 8240 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; 8241 8242 if (masked_phy_id == TG3_PHY_ID_BCM5401) 8243 mac_mode &= ~MAC_MODE_LINK_POLARITY; 8244 else if (masked_phy_id == TG3_PHY_ID_BCM5411) 8245 mac_mode |= MAC_MODE_LINK_POLARITY; 8246 8247 tg3_writephy(tp, MII_TG3_EXT_CTRL, 8248 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 8249 } 8250 8251 tw32(MAC_MODE, mac_mode); 8252 udelay(40); 8253 8254 return 0; 8255 } 8256 8257 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) 8258 { 8259 struct tg3 *tp = netdev_priv(dev); 8260 8261 if (features & NETIF_F_LOOPBACK) { 8262 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) 8263 return; 8264 8265 spin_lock_bh(&tp->lock); 8266 tg3_mac_loopback(tp, true); 8267 netif_carrier_on(tp->dev); 8268 spin_unlock_bh(&tp->lock); 8269 netdev_info(dev, "Internal MAC loopback mode enabled.\n"); 8270 } else { 8271 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 8272 return; 8273 8274 spin_lock_bh(&tp->lock); 8275 tg3_mac_loopback(tp, false); 8276 /* Force link status check */ 8277 tg3_setup_phy(tp, true); 8278 spin_unlock_bh(&tp->lock); 8279 netdev_info(dev, "Internal MAC loopback mode disabled.\n"); 8280 } 8281 } 8282 8283 static netdev_features_t tg3_fix_features(struct net_device *dev, 8284 netdev_features_t features) 8285 { 8286 struct tg3 *tp = netdev_priv(dev); 8287 8288 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) 8289 features &= ~NETIF_F_ALL_TSO; 8290 8291 return features; 8292 } 8293 8294 static int tg3_set_features(struct net_device *dev, netdev_features_t features) 8295 { 8296 netdev_features_t changed = dev->features ^ features; 8297 8298 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) 8299 tg3_set_loopback(dev, features); 8300 8301 return 0; 8302 } 8303 8304 static void tg3_rx_prodring_free(struct tg3 *tp, 8305 struct tg3_rx_prodring_set *tpr) 8306 { 8307 int i; 8308 8309 if (tpr != &tp->napi[0].prodring) { 8310 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; 8311 i = (i + 1) & tp->rx_std_ring_mask) 8312 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8313 tp->rx_pkt_map_sz); 8314 8315 if (tg3_flag(tp, JUMBO_CAPABLE)) { 8316 for (i = tpr->rx_jmb_cons_idx; 8317 i != tpr->rx_jmb_prod_idx; 8318 i = (i + 1) & tp->rx_jmb_ring_mask) { 8319 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8320 TG3_RX_JMB_MAP_SZ); 8321 } 8322 } 8323 8324 return; 8325 } 8326 8327 for (i = 0; i <= tp->rx_std_ring_mask; i++) 8328 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8329 tp->rx_pkt_map_sz); 8330 8331 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8332 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) 8333 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8334 TG3_RX_JMB_MAP_SZ); 8335 } 8336 } 8337 8338 /* Initialize rx rings for packet processing. 8339 * 8340 * The chip has been shut down and the driver detached from 8341 * the networking, so no interrupts or new tx packets will 8342 * end up in the driver. tp->{tx,}lock are held and thus 8343 * we may not sleep. 8344 */ 8345 static int tg3_rx_prodring_alloc(struct tg3 *tp, 8346 struct tg3_rx_prodring_set *tpr) 8347 { 8348 u32 i, rx_pkt_dma_sz; 8349 8350 tpr->rx_std_cons_idx = 0; 8351 tpr->rx_std_prod_idx = 0; 8352 tpr->rx_jmb_cons_idx = 0; 8353 tpr->rx_jmb_prod_idx = 0; 8354 8355 if (tpr != &tp->napi[0].prodring) { 8356 memset(&tpr->rx_std_buffers[0], 0, 8357 TG3_RX_STD_BUFF_RING_SIZE(tp)); 8358 if (tpr->rx_jmb_buffers) 8359 memset(&tpr->rx_jmb_buffers[0], 0, 8360 TG3_RX_JMB_BUFF_RING_SIZE(tp)); 8361 goto done; 8362 } 8363 8364 /* Zero out all descriptors. */ 8365 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); 8366 8367 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; 8368 if (tg3_flag(tp, 5780_CLASS) && 8369 tp->dev->mtu > ETH_DATA_LEN) 8370 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; 8371 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); 8372 8373 /* Initialize invariants of the rings, we only set this 8374 * stuff once. This works because the card does not 8375 * write into the rx buffer posting rings. 8376 */ 8377 for (i = 0; i <= tp->rx_std_ring_mask; i++) { 8378 struct tg3_rx_buffer_desc *rxd; 8379 8380 rxd = &tpr->rx_std[i]; 8381 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; 8382 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); 8383 rxd->opaque = (RXD_OPAQUE_RING_STD | 8384 (i << RXD_OPAQUE_INDEX_SHIFT)); 8385 } 8386 8387 /* Now allocate fresh SKBs for each rx ring. */ 8388 for (i = 0; i < tp->rx_pending; i++) { 8389 unsigned int frag_size; 8390 8391 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i, 8392 &frag_size) < 0) { 8393 netdev_warn(tp->dev, 8394 "Using a smaller RX standard ring. Only " 8395 "%d out of %d buffers were allocated " 8396 "successfully\n", i, tp->rx_pending); 8397 if (i == 0) 8398 goto initfail; 8399 tp->rx_pending = i; 8400 break; 8401 } 8402 } 8403 8404 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 8405 goto done; 8406 8407 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); 8408 8409 if (!tg3_flag(tp, JUMBO_RING_ENABLE)) 8410 goto done; 8411 8412 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { 8413 struct tg3_rx_buffer_desc *rxd; 8414 8415 rxd = &tpr->rx_jmb[i].std; 8416 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; 8417 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | 8418 RXD_FLAG_JUMBO; 8419 rxd->opaque = (RXD_OPAQUE_RING_JUMBO | 8420 (i << RXD_OPAQUE_INDEX_SHIFT)); 8421 } 8422 8423 for (i = 0; i < tp->rx_jumbo_pending; i++) { 8424 unsigned int frag_size; 8425 8426 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i, 8427 &frag_size) < 0) { 8428 netdev_warn(tp->dev, 8429 "Using a smaller RX jumbo ring. Only %d " 8430 "out of %d buffers were allocated " 8431 "successfully\n", i, tp->rx_jumbo_pending); 8432 if (i == 0) 8433 goto initfail; 8434 tp->rx_jumbo_pending = i; 8435 break; 8436 } 8437 } 8438 8439 done: 8440 return 0; 8441 8442 initfail: 8443 tg3_rx_prodring_free(tp, tpr); 8444 return -ENOMEM; 8445 } 8446 8447 static void tg3_rx_prodring_fini(struct tg3 *tp, 8448 struct tg3_rx_prodring_set *tpr) 8449 { 8450 kfree(tpr->rx_std_buffers); 8451 tpr->rx_std_buffers = NULL; 8452 kfree(tpr->rx_jmb_buffers); 8453 tpr->rx_jmb_buffers = NULL; 8454 if (tpr->rx_std) { 8455 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), 8456 tpr->rx_std, tpr->rx_std_mapping); 8457 tpr->rx_std = NULL; 8458 } 8459 if (tpr->rx_jmb) { 8460 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), 8461 tpr->rx_jmb, tpr->rx_jmb_mapping); 8462 tpr->rx_jmb = NULL; 8463 } 8464 } 8465 8466 static int tg3_rx_prodring_init(struct tg3 *tp, 8467 struct tg3_rx_prodring_set *tpr) 8468 { 8469 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), 8470 GFP_KERNEL); 8471 if (!tpr->rx_std_buffers) 8472 return -ENOMEM; 8473 8474 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, 8475 TG3_RX_STD_RING_BYTES(tp), 8476 &tpr->rx_std_mapping, 8477 GFP_KERNEL); 8478 if (!tpr->rx_std) 8479 goto err_out; 8480 8481 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8482 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), 8483 GFP_KERNEL); 8484 if (!tpr->rx_jmb_buffers) 8485 goto err_out; 8486 8487 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, 8488 TG3_RX_JMB_RING_BYTES(tp), 8489 &tpr->rx_jmb_mapping, 8490 GFP_KERNEL); 8491 if (!tpr->rx_jmb) 8492 goto err_out; 8493 } 8494 8495 return 0; 8496 8497 err_out: 8498 tg3_rx_prodring_fini(tp, tpr); 8499 return -ENOMEM; 8500 } 8501 8502 /* Free up pending packets in all rx/tx rings. 8503 * 8504 * The chip has been shut down and the driver detached from 8505 * the networking, so no interrupts or new tx packets will 8506 * end up in the driver. tp->{tx,}lock is not held and we are not 8507 * in an interrupt context and thus may sleep. 8508 */ 8509 static void tg3_free_rings(struct tg3 *tp) 8510 { 8511 int i, j; 8512 8513 for (j = 0; j < tp->irq_cnt; j++) { 8514 struct tg3_napi *tnapi = &tp->napi[j]; 8515 8516 tg3_rx_prodring_free(tp, &tnapi->prodring); 8517 8518 if (!tnapi->tx_buffers) 8519 continue; 8520 8521 for (i = 0; i < TG3_TX_RING_SIZE; i++) { 8522 struct sk_buff *skb = tnapi->tx_buffers[i].skb; 8523 8524 if (!skb) 8525 continue; 8526 8527 tg3_tx_skb_unmap(tnapi, i, 8528 skb_shinfo(skb)->nr_frags - 1); 8529 8530 dev_kfree_skb_any(skb); 8531 } 8532 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j)); 8533 } 8534 } 8535 8536 /* Initialize tx/rx rings for packet processing. 8537 * 8538 * The chip has been shut down and the driver detached from 8539 * the networking, so no interrupts or new tx packets will 8540 * end up in the driver. tp->{tx,}lock are held and thus 8541 * we may not sleep. 8542 */ 8543 static int tg3_init_rings(struct tg3 *tp) 8544 { 8545 int i; 8546 8547 /* Free up all the SKBs. */ 8548 tg3_free_rings(tp); 8549 8550 for (i = 0; i < tp->irq_cnt; i++) { 8551 struct tg3_napi *tnapi = &tp->napi[i]; 8552 8553 tnapi->last_tag = 0; 8554 tnapi->last_irq_tag = 0; 8555 tnapi->hw_status->status = 0; 8556 tnapi->hw_status->status_tag = 0; 8557 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8558 8559 tnapi->tx_prod = 0; 8560 tnapi->tx_cons = 0; 8561 if (tnapi->tx_ring) 8562 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); 8563 8564 tnapi->rx_rcb_ptr = 0; 8565 if (tnapi->rx_rcb) 8566 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 8567 8568 if (tnapi->prodring.rx_std && 8569 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { 8570 tg3_free_rings(tp); 8571 return -ENOMEM; 8572 } 8573 } 8574 8575 return 0; 8576 } 8577 8578 static void tg3_mem_tx_release(struct tg3 *tp) 8579 { 8580 int i; 8581 8582 for (i = 0; i < tp->irq_max; i++) { 8583 struct tg3_napi *tnapi = &tp->napi[i]; 8584 8585 if (tnapi->tx_ring) { 8586 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, 8587 tnapi->tx_ring, tnapi->tx_desc_mapping); 8588 tnapi->tx_ring = NULL; 8589 } 8590 8591 kfree(tnapi->tx_buffers); 8592 tnapi->tx_buffers = NULL; 8593 } 8594 } 8595 8596 static int tg3_mem_tx_acquire(struct tg3 *tp) 8597 { 8598 int i; 8599 struct tg3_napi *tnapi = &tp->napi[0]; 8600 8601 /* If multivector TSS is enabled, vector 0 does not handle 8602 * tx interrupts. Don't allocate any resources for it. 8603 */ 8604 if (tg3_flag(tp, ENABLE_TSS)) 8605 tnapi++; 8606 8607 for (i = 0; i < tp->txq_cnt; i++, tnapi++) { 8608 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) * 8609 TG3_TX_RING_SIZE, GFP_KERNEL); 8610 if (!tnapi->tx_buffers) 8611 goto err_out; 8612 8613 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, 8614 TG3_TX_RING_BYTES, 8615 &tnapi->tx_desc_mapping, 8616 GFP_KERNEL); 8617 if (!tnapi->tx_ring) 8618 goto err_out; 8619 } 8620 8621 return 0; 8622 8623 err_out: 8624 tg3_mem_tx_release(tp); 8625 return -ENOMEM; 8626 } 8627 8628 static void tg3_mem_rx_release(struct tg3 *tp) 8629 { 8630 int i; 8631 8632 for (i = 0; i < tp->irq_max; i++) { 8633 struct tg3_napi *tnapi = &tp->napi[i]; 8634 8635 tg3_rx_prodring_fini(tp, &tnapi->prodring); 8636 8637 if (!tnapi->rx_rcb) 8638 continue; 8639 8640 dma_free_coherent(&tp->pdev->dev, 8641 TG3_RX_RCB_RING_BYTES(tp), 8642 tnapi->rx_rcb, 8643 tnapi->rx_rcb_mapping); 8644 tnapi->rx_rcb = NULL; 8645 } 8646 } 8647 8648 static int tg3_mem_rx_acquire(struct tg3 *tp) 8649 { 8650 unsigned int i, limit; 8651 8652 limit = tp->rxq_cnt; 8653 8654 /* If RSS is enabled, we need a (dummy) producer ring 8655 * set on vector zero. This is the true hw prodring. 8656 */ 8657 if (tg3_flag(tp, ENABLE_RSS)) 8658 limit++; 8659 8660 for (i = 0; i < limit; i++) { 8661 struct tg3_napi *tnapi = &tp->napi[i]; 8662 8663 if (tg3_rx_prodring_init(tp, &tnapi->prodring)) 8664 goto err_out; 8665 8666 /* If multivector RSS is enabled, vector 0 8667 * does not handle rx or tx interrupts. 8668 * Don't allocate any resources for it. 8669 */ 8670 if (!i && tg3_flag(tp, ENABLE_RSS)) 8671 continue; 8672 8673 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev, 8674 TG3_RX_RCB_RING_BYTES(tp), 8675 &tnapi->rx_rcb_mapping, 8676 GFP_KERNEL); 8677 if (!tnapi->rx_rcb) 8678 goto err_out; 8679 } 8680 8681 return 0; 8682 8683 err_out: 8684 tg3_mem_rx_release(tp); 8685 return -ENOMEM; 8686 } 8687 8688 /* 8689 * Must not be invoked with interrupt sources disabled and 8690 * the hardware shutdown down. 8691 */ 8692 static void tg3_free_consistent(struct tg3 *tp) 8693 { 8694 int i; 8695 8696 for (i = 0; i < tp->irq_cnt; i++) { 8697 struct tg3_napi *tnapi = &tp->napi[i]; 8698 8699 if (tnapi->hw_status) { 8700 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, 8701 tnapi->hw_status, 8702 tnapi->status_mapping); 8703 tnapi->hw_status = NULL; 8704 } 8705 } 8706 8707 tg3_mem_rx_release(tp); 8708 tg3_mem_tx_release(tp); 8709 8710 if (tp->hw_stats) { 8711 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), 8712 tp->hw_stats, tp->stats_mapping); 8713 tp->hw_stats = NULL; 8714 } 8715 } 8716 8717 /* 8718 * Must not be invoked with interrupt sources disabled and 8719 * the hardware shutdown down. Can sleep. 8720 */ 8721 static int tg3_alloc_consistent(struct tg3 *tp) 8722 { 8723 int i; 8724 8725 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev, 8726 sizeof(struct tg3_hw_stats), 8727 &tp->stats_mapping, GFP_KERNEL); 8728 if (!tp->hw_stats) 8729 goto err_out; 8730 8731 for (i = 0; i < tp->irq_cnt; i++) { 8732 struct tg3_napi *tnapi = &tp->napi[i]; 8733 struct tg3_hw_status *sblk; 8734 8735 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev, 8736 TG3_HW_STATUS_SIZE, 8737 &tnapi->status_mapping, 8738 GFP_KERNEL); 8739 if (!tnapi->hw_status) 8740 goto err_out; 8741 8742 sblk = tnapi->hw_status; 8743 8744 if (tg3_flag(tp, ENABLE_RSS)) { 8745 u16 *prodptr = NULL; 8746 8747 /* 8748 * When RSS is enabled, the status block format changes 8749 * slightly. The "rx_jumbo_consumer", "reserved", 8750 * and "rx_mini_consumer" members get mapped to the 8751 * other three rx return ring producer indexes. 8752 */ 8753 switch (i) { 8754 case 1: 8755 prodptr = &sblk->idx[0].rx_producer; 8756 break; 8757 case 2: 8758 prodptr = &sblk->rx_jumbo_consumer; 8759 break; 8760 case 3: 8761 prodptr = &sblk->reserved; 8762 break; 8763 case 4: 8764 prodptr = &sblk->rx_mini_consumer; 8765 break; 8766 } 8767 tnapi->rx_rcb_prod_idx = prodptr; 8768 } else { 8769 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; 8770 } 8771 } 8772 8773 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp)) 8774 goto err_out; 8775 8776 return 0; 8777 8778 err_out: 8779 tg3_free_consistent(tp); 8780 return -ENOMEM; 8781 } 8782 8783 #define MAX_WAIT_CNT 1000 8784 8785 /* To stop a block, clear the enable bit and poll till it 8786 * clears. tp->lock is held. 8787 */ 8788 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent) 8789 { 8790 unsigned int i; 8791 u32 val; 8792 8793 if (tg3_flag(tp, 5705_PLUS)) { 8794 switch (ofs) { 8795 case RCVLSC_MODE: 8796 case DMAC_MODE: 8797 case MBFREE_MODE: 8798 case BUFMGR_MODE: 8799 case MEMARB_MODE: 8800 /* We can't enable/disable these bits of the 8801 * 5705/5750, just say success. 8802 */ 8803 return 0; 8804 8805 default: 8806 break; 8807 } 8808 } 8809 8810 val = tr32(ofs); 8811 val &= ~enable_bit; 8812 tw32_f(ofs, val); 8813 8814 for (i = 0; i < MAX_WAIT_CNT; i++) { 8815 if (pci_channel_offline(tp->pdev)) { 8816 dev_err(&tp->pdev->dev, 8817 "tg3_stop_block device offline, " 8818 "ofs=%lx enable_bit=%x\n", 8819 ofs, enable_bit); 8820 return -ENODEV; 8821 } 8822 8823 udelay(100); 8824 val = tr32(ofs); 8825 if ((val & enable_bit) == 0) 8826 break; 8827 } 8828 8829 if (i == MAX_WAIT_CNT && !silent) { 8830 dev_err(&tp->pdev->dev, 8831 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", 8832 ofs, enable_bit); 8833 return -ENODEV; 8834 } 8835 8836 return 0; 8837 } 8838 8839 /* tp->lock is held. */ 8840 static int tg3_abort_hw(struct tg3 *tp, bool silent) 8841 { 8842 int i, err; 8843 8844 tg3_disable_ints(tp); 8845 8846 if (pci_channel_offline(tp->pdev)) { 8847 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE); 8848 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8849 err = -ENODEV; 8850 goto err_no_dev; 8851 } 8852 8853 tp->rx_mode &= ~RX_MODE_ENABLE; 8854 tw32_f(MAC_RX_MODE, tp->rx_mode); 8855 udelay(10); 8856 8857 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); 8858 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); 8859 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); 8860 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); 8861 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); 8862 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); 8863 8864 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); 8865 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); 8866 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); 8867 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); 8868 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); 8869 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); 8870 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); 8871 8872 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8873 tw32_f(MAC_MODE, tp->mac_mode); 8874 udelay(40); 8875 8876 tp->tx_mode &= ~TX_MODE_ENABLE; 8877 tw32_f(MAC_TX_MODE, tp->tx_mode); 8878 8879 for (i = 0; i < MAX_WAIT_CNT; i++) { 8880 udelay(100); 8881 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) 8882 break; 8883 } 8884 if (i >= MAX_WAIT_CNT) { 8885 dev_err(&tp->pdev->dev, 8886 "%s timed out, TX_MODE_ENABLE will not clear " 8887 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); 8888 err |= -ENODEV; 8889 } 8890 8891 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); 8892 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); 8893 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); 8894 8895 tw32(FTQ_RESET, 0xffffffff); 8896 tw32(FTQ_RESET, 0x00000000); 8897 8898 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); 8899 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); 8900 8901 err_no_dev: 8902 for (i = 0; i < tp->irq_cnt; i++) { 8903 struct tg3_napi *tnapi = &tp->napi[i]; 8904 if (tnapi->hw_status) 8905 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8906 } 8907 8908 return err; 8909 } 8910 8911 /* Save PCI command register before chip reset */ 8912 static void tg3_save_pci_state(struct tg3 *tp) 8913 { 8914 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); 8915 } 8916 8917 /* Restore PCI state after chip reset */ 8918 static void tg3_restore_pci_state(struct tg3 *tp) 8919 { 8920 u32 val; 8921 8922 /* Re-enable indirect register accesses. */ 8923 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 8924 tp->misc_host_ctrl); 8925 8926 /* Set MAX PCI retry to zero. */ 8927 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); 8928 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 8929 tg3_flag(tp, PCIX_MODE)) 8930 val |= PCISTATE_RETRY_SAME_DMA; 8931 /* Allow reads and writes to the APE register and memory space. */ 8932 if (tg3_flag(tp, ENABLE_APE)) 8933 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 8934 PCISTATE_ALLOW_APE_SHMEM_WR | 8935 PCISTATE_ALLOW_APE_PSPACE_WR; 8936 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); 8937 8938 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); 8939 8940 if (!tg3_flag(tp, PCI_EXPRESS)) { 8941 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 8942 tp->pci_cacheline_sz); 8943 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 8944 tp->pci_lat_timer); 8945 } 8946 8947 /* Make sure PCI-X relaxed ordering bit is clear. */ 8948 if (tg3_flag(tp, PCIX_MODE)) { 8949 u16 pcix_cmd; 8950 8951 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 8952 &pcix_cmd); 8953 pcix_cmd &= ~PCI_X_CMD_ERO; 8954 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 8955 pcix_cmd); 8956 } 8957 8958 if (tg3_flag(tp, 5780_CLASS)) { 8959 8960 /* Chip reset on 5780 will reset MSI enable bit, 8961 * so need to restore it. 8962 */ 8963 if (tg3_flag(tp, USING_MSI)) { 8964 u16 ctrl; 8965 8966 pci_read_config_word(tp->pdev, 8967 tp->msi_cap + PCI_MSI_FLAGS, 8968 &ctrl); 8969 pci_write_config_word(tp->pdev, 8970 tp->msi_cap + PCI_MSI_FLAGS, 8971 ctrl | PCI_MSI_FLAGS_ENABLE); 8972 val = tr32(MSGINT_MODE); 8973 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); 8974 } 8975 } 8976 } 8977 8978 static void tg3_override_clk(struct tg3 *tp) 8979 { 8980 u32 val; 8981 8982 switch (tg3_asic_rev(tp)) { 8983 case ASIC_REV_5717: 8984 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 8985 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 8986 TG3_CPMU_MAC_ORIDE_ENABLE); 8987 break; 8988 8989 case ASIC_REV_5719: 8990 case ASIC_REV_5720: 8991 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 8992 break; 8993 8994 default: 8995 return; 8996 } 8997 } 8998 8999 static void tg3_restore_clk(struct tg3 *tp) 9000 { 9001 u32 val; 9002 9003 switch (tg3_asic_rev(tp)) { 9004 case ASIC_REV_5717: 9005 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9006 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, 9007 val & ~TG3_CPMU_MAC_ORIDE_ENABLE); 9008 break; 9009 9010 case ASIC_REV_5719: 9011 case ASIC_REV_5720: 9012 val = tr32(TG3_CPMU_CLCK_ORIDE); 9013 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 9014 break; 9015 9016 default: 9017 return; 9018 } 9019 } 9020 9021 /* tp->lock is held. */ 9022 static int tg3_chip_reset(struct tg3 *tp) 9023 __releases(tp->lock) 9024 __acquires(tp->lock) 9025 { 9026 u32 val; 9027 void (*write_op)(struct tg3 *, u32, u32); 9028 int i, err; 9029 9030 if (!pci_device_is_present(tp->pdev)) 9031 return -ENODEV; 9032 9033 tg3_nvram_lock(tp); 9034 9035 tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 9036 9037 /* No matching tg3_nvram_unlock() after this because 9038 * chip reset below will undo the nvram lock. 9039 */ 9040 tp->nvram_lock_cnt = 0; 9041 9042 /* GRC_MISC_CFG core clock reset will clear the memory 9043 * enable bit in PCI register 4 and the MSI enable bit 9044 * on some chips, so we save relevant registers here. 9045 */ 9046 tg3_save_pci_state(tp); 9047 9048 if (tg3_asic_rev(tp) == ASIC_REV_5752 || 9049 tg3_flag(tp, 5755_PLUS)) 9050 tw32(GRC_FASTBOOT_PC, 0); 9051 9052 /* 9053 * We must avoid the readl() that normally takes place. 9054 * It locks machines, causes machine checks, and other 9055 * fun things. So, temporarily disable the 5701 9056 * hardware workaround, while we do the reset. 9057 */ 9058 write_op = tp->write32; 9059 if (write_op == tg3_write_flush_reg32) 9060 tp->write32 = tg3_write32; 9061 9062 /* Prevent the irq handler from reading or writing PCI registers 9063 * during chip reset when the memory enable bit in the PCI command 9064 * register may be cleared. The chip does not generate interrupt 9065 * at this time, but the irq handler may still be called due to irq 9066 * sharing or irqpoll. 9067 */ 9068 tg3_flag_set(tp, CHIP_RESETTING); 9069 for (i = 0; i < tp->irq_cnt; i++) { 9070 struct tg3_napi *tnapi = &tp->napi[i]; 9071 if (tnapi->hw_status) { 9072 tnapi->hw_status->status = 0; 9073 tnapi->hw_status->status_tag = 0; 9074 } 9075 tnapi->last_tag = 0; 9076 tnapi->last_irq_tag = 0; 9077 } 9078 smp_mb(); 9079 9080 tg3_full_unlock(tp); 9081 9082 for (i = 0; i < tp->irq_cnt; i++) 9083 synchronize_irq(tp->napi[i].irq_vec); 9084 9085 tg3_full_lock(tp, 0); 9086 9087 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9088 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9089 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9090 } 9091 9092 /* do the reset */ 9093 val = GRC_MISC_CFG_CORECLK_RESET; 9094 9095 if (tg3_flag(tp, PCI_EXPRESS)) { 9096 /* Force PCIe 1.0a mode */ 9097 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 9098 !tg3_flag(tp, 57765_PLUS) && 9099 tr32(TG3_PCIE_PHY_TSTCTL) == 9100 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) 9101 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); 9102 9103 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) { 9104 tw32(GRC_MISC_CFG, (1 << 29)); 9105 val |= (1 << 29); 9106 } 9107 } 9108 9109 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 9110 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); 9111 tw32(GRC_VCPU_EXT_CTRL, 9112 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); 9113 } 9114 9115 /* Set the clock to the highest frequency to avoid timeouts. With link 9116 * aware mode, the clock speed could be slow and bootcode does not 9117 * complete within the expected time. Override the clock to allow the 9118 * bootcode to finish sooner and then restore it. 9119 */ 9120 tg3_override_clk(tp); 9121 9122 /* Manage gphy power for all CPMU absent PCIe devices. */ 9123 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) 9124 val |= GRC_MISC_CFG_KEEP_GPHY_POWER; 9125 9126 tw32(GRC_MISC_CFG, val); 9127 9128 /* restore 5701 hardware bug workaround write method */ 9129 tp->write32 = write_op; 9130 9131 /* Unfortunately, we have to delay before the PCI read back. 9132 * Some 575X chips even will not respond to a PCI cfg access 9133 * when the reset command is given to the chip. 9134 * 9135 * How do these hardware designers expect things to work 9136 * properly if the PCI write is posted for a long period 9137 * of time? It is always necessary to have some method by 9138 * which a register read back can occur to push the write 9139 * out which does the reset. 9140 * 9141 * For most tg3 variants the trick below was working. 9142 * Ho hum... 9143 */ 9144 udelay(120); 9145 9146 /* Flush PCI posted writes. The normal MMIO registers 9147 * are inaccessible at this time so this is the only 9148 * way to make this reliably (actually, this is no longer 9149 * the case, see above). I tried to use indirect 9150 * register read/write but this upset some 5701 variants. 9151 */ 9152 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); 9153 9154 udelay(120); 9155 9156 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) { 9157 u16 val16; 9158 9159 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) { 9160 int j; 9161 u32 cfg_val; 9162 9163 /* Wait for link training to complete. */ 9164 for (j = 0; j < 5000; j++) 9165 udelay(100); 9166 9167 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); 9168 pci_write_config_dword(tp->pdev, 0xc4, 9169 cfg_val | (1 << 15)); 9170 } 9171 9172 /* Clear the "no snoop" and "relaxed ordering" bits. */ 9173 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN; 9174 /* 9175 * Older PCIe devices only support the 128 byte 9176 * MPS setting. Enforce the restriction. 9177 */ 9178 if (!tg3_flag(tp, CPMU_PRESENT)) 9179 val16 |= PCI_EXP_DEVCTL_PAYLOAD; 9180 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16); 9181 9182 /* Clear error status */ 9183 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA, 9184 PCI_EXP_DEVSTA_CED | 9185 PCI_EXP_DEVSTA_NFED | 9186 PCI_EXP_DEVSTA_FED | 9187 PCI_EXP_DEVSTA_URD); 9188 } 9189 9190 tg3_restore_pci_state(tp); 9191 9192 tg3_flag_clear(tp, CHIP_RESETTING); 9193 tg3_flag_clear(tp, ERROR_PROCESSED); 9194 9195 val = 0; 9196 if (tg3_flag(tp, 5780_CLASS)) 9197 val = tr32(MEMARB_MODE); 9198 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 9199 9200 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) { 9201 tg3_stop_fw(tp); 9202 tw32(0x5000, 0x400); 9203 } 9204 9205 if (tg3_flag(tp, IS_SSB_CORE)) { 9206 /* 9207 * BCM4785: In order to avoid repercussions from using 9208 * potentially defective internal ROM, stop the Rx RISC CPU, 9209 * which is not required. 9210 */ 9211 tg3_stop_fw(tp); 9212 tg3_halt_cpu(tp, RX_CPU_BASE); 9213 } 9214 9215 err = tg3_poll_fw(tp); 9216 if (err) 9217 return err; 9218 9219 tw32(GRC_MODE, tp->grc_mode); 9220 9221 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) { 9222 val = tr32(0xc4); 9223 9224 tw32(0xc4, val | (1 << 15)); 9225 } 9226 9227 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && 9228 tg3_asic_rev(tp) == ASIC_REV_5705) { 9229 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; 9230 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) 9231 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; 9232 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 9233 } 9234 9235 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 9236 tp->mac_mode = MAC_MODE_PORT_MODE_TBI; 9237 val = tp->mac_mode; 9238 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 9239 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 9240 val = tp->mac_mode; 9241 } else 9242 val = 0; 9243 9244 tw32_f(MAC_MODE, val); 9245 udelay(40); 9246 9247 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); 9248 9249 tg3_mdio_start(tp); 9250 9251 if (tg3_flag(tp, PCI_EXPRESS) && 9252 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 9253 tg3_asic_rev(tp) != ASIC_REV_5785 && 9254 !tg3_flag(tp, 57765_PLUS)) { 9255 val = tr32(0x7c00); 9256 9257 tw32(0x7c00, val | (1 << 25)); 9258 } 9259 9260 tg3_restore_clk(tp); 9261 9262 /* Reprobe ASF enable state. */ 9263 tg3_flag_clear(tp, ENABLE_ASF); 9264 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 9265 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 9266 9267 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); 9268 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 9269 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 9270 u32 nic_cfg; 9271 9272 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 9273 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 9274 tg3_flag_set(tp, ENABLE_ASF); 9275 tp->last_event_jiffies = jiffies; 9276 if (tg3_flag(tp, 5750_PLUS)) 9277 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 9278 9279 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg); 9280 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK) 9281 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 9282 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID) 9283 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 9284 } 9285 } 9286 9287 return 0; 9288 } 9289 9290 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *); 9291 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *); 9292 static void __tg3_set_rx_mode(struct net_device *); 9293 9294 /* tp->lock is held. */ 9295 static int tg3_halt(struct tg3 *tp, int kind, bool silent) 9296 { 9297 int err; 9298 9299 tg3_stop_fw(tp); 9300 9301 tg3_write_sig_pre_reset(tp, kind); 9302 9303 tg3_abort_hw(tp, silent); 9304 err = tg3_chip_reset(tp); 9305 9306 __tg3_set_mac_addr(tp, false); 9307 9308 tg3_write_sig_legacy(tp, kind); 9309 tg3_write_sig_post_reset(tp, kind); 9310 9311 if (tp->hw_stats) { 9312 /* Save the stats across chip resets... */ 9313 tg3_get_nstats(tp, &tp->net_stats_prev); 9314 tg3_get_estats(tp, &tp->estats_prev); 9315 9316 /* And make sure the next sample is new data */ 9317 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); 9318 } 9319 9320 return err; 9321 } 9322 9323 static int tg3_set_mac_addr(struct net_device *dev, void *p) 9324 { 9325 struct tg3 *tp = netdev_priv(dev); 9326 struct sockaddr *addr = p; 9327 int err = 0; 9328 bool skip_mac_1 = false; 9329 9330 if (!is_valid_ether_addr(addr->sa_data)) 9331 return -EADDRNOTAVAIL; 9332 9333 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 9334 9335 if (!netif_running(dev)) 9336 return 0; 9337 9338 if (tg3_flag(tp, ENABLE_ASF)) { 9339 u32 addr0_high, addr0_low, addr1_high, addr1_low; 9340 9341 addr0_high = tr32(MAC_ADDR_0_HIGH); 9342 addr0_low = tr32(MAC_ADDR_0_LOW); 9343 addr1_high = tr32(MAC_ADDR_1_HIGH); 9344 addr1_low = tr32(MAC_ADDR_1_LOW); 9345 9346 /* Skip MAC addr 1 if ASF is using it. */ 9347 if ((addr0_high != addr1_high || addr0_low != addr1_low) && 9348 !(addr1_high == 0 && addr1_low == 0)) 9349 skip_mac_1 = true; 9350 } 9351 spin_lock_bh(&tp->lock); 9352 __tg3_set_mac_addr(tp, skip_mac_1); 9353 __tg3_set_rx_mode(dev); 9354 spin_unlock_bh(&tp->lock); 9355 9356 return err; 9357 } 9358 9359 /* tp->lock is held. */ 9360 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, 9361 dma_addr_t mapping, u32 maxlen_flags, 9362 u32 nic_addr) 9363 { 9364 tg3_write_mem(tp, 9365 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), 9366 ((u64) mapping >> 32)); 9367 tg3_write_mem(tp, 9368 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), 9369 ((u64) mapping & 0xffffffff)); 9370 tg3_write_mem(tp, 9371 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), 9372 maxlen_flags); 9373 9374 if (!tg3_flag(tp, 5705_PLUS)) 9375 tg3_write_mem(tp, 9376 (bdinfo_addr + TG3_BDINFO_NIC_ADDR), 9377 nic_addr); 9378 } 9379 9380 9381 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9382 { 9383 int i = 0; 9384 9385 if (!tg3_flag(tp, ENABLE_TSS)) { 9386 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); 9387 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); 9388 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); 9389 } else { 9390 tw32(HOSTCC_TXCOL_TICKS, 0); 9391 tw32(HOSTCC_TXMAX_FRAMES, 0); 9392 tw32(HOSTCC_TXCOAL_MAXF_INT, 0); 9393 9394 for (; i < tp->txq_cnt; i++) { 9395 u32 reg; 9396 9397 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; 9398 tw32(reg, ec->tx_coalesce_usecs); 9399 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; 9400 tw32(reg, ec->tx_max_coalesced_frames); 9401 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; 9402 tw32(reg, ec->tx_max_coalesced_frames_irq); 9403 } 9404 } 9405 9406 for (; i < tp->irq_max - 1; i++) { 9407 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); 9408 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); 9409 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9410 } 9411 } 9412 9413 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9414 { 9415 int i = 0; 9416 u32 limit = tp->rxq_cnt; 9417 9418 if (!tg3_flag(tp, ENABLE_RSS)) { 9419 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); 9420 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); 9421 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); 9422 limit--; 9423 } else { 9424 tw32(HOSTCC_RXCOL_TICKS, 0); 9425 tw32(HOSTCC_RXMAX_FRAMES, 0); 9426 tw32(HOSTCC_RXCOAL_MAXF_INT, 0); 9427 } 9428 9429 for (; i < limit; i++) { 9430 u32 reg; 9431 9432 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; 9433 tw32(reg, ec->rx_coalesce_usecs); 9434 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; 9435 tw32(reg, ec->rx_max_coalesced_frames); 9436 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; 9437 tw32(reg, ec->rx_max_coalesced_frames_irq); 9438 } 9439 9440 for (; i < tp->irq_max - 1; i++) { 9441 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); 9442 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); 9443 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9444 } 9445 } 9446 9447 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) 9448 { 9449 tg3_coal_tx_init(tp, ec); 9450 tg3_coal_rx_init(tp, ec); 9451 9452 if (!tg3_flag(tp, 5705_PLUS)) { 9453 u32 val = ec->stats_block_coalesce_usecs; 9454 9455 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); 9456 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); 9457 9458 if (!tp->link_up) 9459 val = 0; 9460 9461 tw32(HOSTCC_STAT_COAL_TICKS, val); 9462 } 9463 } 9464 9465 /* tp->lock is held. */ 9466 static void tg3_tx_rcbs_disable(struct tg3 *tp) 9467 { 9468 u32 txrcb, limit; 9469 9470 /* Disable all transmit rings but the first. */ 9471 if (!tg3_flag(tp, 5705_PLUS)) 9472 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; 9473 else if (tg3_flag(tp, 5717_PLUS)) 9474 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; 9475 else if (tg3_flag(tp, 57765_CLASS) || 9476 tg3_asic_rev(tp) == ASIC_REV_5762) 9477 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; 9478 else 9479 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9480 9481 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9482 txrcb < limit; txrcb += TG3_BDINFO_SIZE) 9483 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, 9484 BDINFO_FLAGS_DISABLED); 9485 } 9486 9487 /* tp->lock is held. */ 9488 static void tg3_tx_rcbs_init(struct tg3 *tp) 9489 { 9490 int i = 0; 9491 u32 txrcb = NIC_SRAM_SEND_RCB; 9492 9493 if (tg3_flag(tp, ENABLE_TSS)) 9494 i++; 9495 9496 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { 9497 struct tg3_napi *tnapi = &tp->napi[i]; 9498 9499 if (!tnapi->tx_ring) 9500 continue; 9501 9502 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, 9503 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), 9504 NIC_SRAM_TX_BUFFER_DESC); 9505 } 9506 } 9507 9508 /* tp->lock is held. */ 9509 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp) 9510 { 9511 u32 rxrcb, limit; 9512 9513 /* Disable all receive return rings but the first. */ 9514 if (tg3_flag(tp, 5717_PLUS)) 9515 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; 9516 else if (!tg3_flag(tp, 5705_PLUS)) 9517 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; 9518 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9519 tg3_asic_rev(tp) == ASIC_REV_5762 || 9520 tg3_flag(tp, 57765_CLASS)) 9521 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; 9522 else 9523 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9524 9525 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9526 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) 9527 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, 9528 BDINFO_FLAGS_DISABLED); 9529 } 9530 9531 /* tp->lock is held. */ 9532 static void tg3_rx_ret_rcbs_init(struct tg3 *tp) 9533 { 9534 int i = 0; 9535 u32 rxrcb = NIC_SRAM_RCV_RET_RCB; 9536 9537 if (tg3_flag(tp, ENABLE_RSS)) 9538 i++; 9539 9540 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { 9541 struct tg3_napi *tnapi = &tp->napi[i]; 9542 9543 if (!tnapi->rx_rcb) 9544 continue; 9545 9546 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, 9547 (tp->rx_ret_ring_mask + 1) << 9548 BDINFO_FLAGS_MAXLEN_SHIFT, 0); 9549 } 9550 } 9551 9552 /* tp->lock is held. */ 9553 static void tg3_rings_reset(struct tg3 *tp) 9554 { 9555 int i; 9556 u32 stblk; 9557 struct tg3_napi *tnapi = &tp->napi[0]; 9558 9559 tg3_tx_rcbs_disable(tp); 9560 9561 tg3_rx_ret_rcbs_disable(tp); 9562 9563 /* Disable interrupts */ 9564 tw32_mailbox_f(tp->napi[0].int_mbox, 1); 9565 tp->napi[0].chk_msi_cnt = 0; 9566 tp->napi[0].last_rx_cons = 0; 9567 tp->napi[0].last_tx_cons = 0; 9568 9569 /* Zero mailbox registers. */ 9570 if (tg3_flag(tp, SUPPORT_MSIX)) { 9571 for (i = 1; i < tp->irq_max; i++) { 9572 tp->napi[i].tx_prod = 0; 9573 tp->napi[i].tx_cons = 0; 9574 if (tg3_flag(tp, ENABLE_TSS)) 9575 tw32_mailbox(tp->napi[i].prodmbox, 0); 9576 tw32_rx_mbox(tp->napi[i].consmbox, 0); 9577 tw32_mailbox_f(tp->napi[i].int_mbox, 1); 9578 tp->napi[i].chk_msi_cnt = 0; 9579 tp->napi[i].last_rx_cons = 0; 9580 tp->napi[i].last_tx_cons = 0; 9581 } 9582 if (!tg3_flag(tp, ENABLE_TSS)) 9583 tw32_mailbox(tp->napi[0].prodmbox, 0); 9584 } else { 9585 tp->napi[0].tx_prod = 0; 9586 tp->napi[0].tx_cons = 0; 9587 tw32_mailbox(tp->napi[0].prodmbox, 0); 9588 tw32_rx_mbox(tp->napi[0].consmbox, 0); 9589 } 9590 9591 /* Make sure the NIC-based send BD rings are disabled. */ 9592 if (!tg3_flag(tp, 5705_PLUS)) { 9593 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; 9594 for (i = 0; i < 16; i++) 9595 tw32_tx_mbox(mbox + i * 8, 0); 9596 } 9597 9598 /* Clear status block in ram. */ 9599 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9600 9601 /* Set status block DMA address */ 9602 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 9603 ((u64) tnapi->status_mapping >> 32)); 9604 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 9605 ((u64) tnapi->status_mapping & 0xffffffff)); 9606 9607 stblk = HOSTCC_STATBLCK_RING1; 9608 9609 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { 9610 u64 mapping = (u64)tnapi->status_mapping; 9611 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); 9612 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); 9613 stblk += 8; 9614 9615 /* Clear status block in ram. */ 9616 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9617 } 9618 9619 tg3_tx_rcbs_init(tp); 9620 tg3_rx_ret_rcbs_init(tp); 9621 } 9622 9623 static void tg3_setup_rxbd_thresholds(struct tg3 *tp) 9624 { 9625 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; 9626 9627 if (!tg3_flag(tp, 5750_PLUS) || 9628 tg3_flag(tp, 5780_CLASS) || 9629 tg3_asic_rev(tp) == ASIC_REV_5750 || 9630 tg3_asic_rev(tp) == ASIC_REV_5752 || 9631 tg3_flag(tp, 57765_PLUS)) 9632 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; 9633 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9634 tg3_asic_rev(tp) == ASIC_REV_5787) 9635 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; 9636 else 9637 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; 9638 9639 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); 9640 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); 9641 9642 val = min(nic_rep_thresh, host_rep_thresh); 9643 tw32(RCVBDI_STD_THRESH, val); 9644 9645 if (tg3_flag(tp, 57765_PLUS)) 9646 tw32(STD_REPLENISH_LWM, bdcache_maxcnt); 9647 9648 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 9649 return; 9650 9651 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; 9652 9653 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); 9654 9655 val = min(bdcache_maxcnt / 2, host_rep_thresh); 9656 tw32(RCVBDI_JUMBO_THRESH, val); 9657 9658 if (tg3_flag(tp, 57765_PLUS)) 9659 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); 9660 } 9661 9662 static inline u32 calc_crc(unsigned char *buf, int len) 9663 { 9664 u32 reg; 9665 u32 tmp; 9666 int j, k; 9667 9668 reg = 0xffffffff; 9669 9670 for (j = 0; j < len; j++) { 9671 reg ^= buf[j]; 9672 9673 for (k = 0; k < 8; k++) { 9674 tmp = reg & 0x01; 9675 9676 reg >>= 1; 9677 9678 if (tmp) 9679 reg ^= 0xedb88320; 9680 } 9681 } 9682 9683 return ~reg; 9684 } 9685 9686 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) 9687 { 9688 /* accept or reject all multicast frames */ 9689 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); 9690 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); 9691 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); 9692 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); 9693 } 9694 9695 static void __tg3_set_rx_mode(struct net_device *dev) 9696 { 9697 struct tg3 *tp = netdev_priv(dev); 9698 u32 rx_mode; 9699 9700 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | 9701 RX_MODE_KEEP_VLAN_TAG); 9702 9703 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) 9704 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 9705 * flag clear. 9706 */ 9707 if (!tg3_flag(tp, ENABLE_ASF)) 9708 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 9709 #endif 9710 9711 if (dev->flags & IFF_PROMISC) { 9712 /* Promiscuous mode. */ 9713 rx_mode |= RX_MODE_PROMISC; 9714 } else if (dev->flags & IFF_ALLMULTI) { 9715 /* Accept all multicast. */ 9716 tg3_set_multi(tp, 1); 9717 } else if (netdev_mc_empty(dev)) { 9718 /* Reject all multicast. */ 9719 tg3_set_multi(tp, 0); 9720 } else { 9721 /* Accept one or more multicast(s). */ 9722 struct netdev_hw_addr *ha; 9723 u32 mc_filter[4] = { 0, }; 9724 u32 regidx; 9725 u32 bit; 9726 u32 crc; 9727 9728 netdev_for_each_mc_addr(ha, dev) { 9729 crc = calc_crc(ha->addr, ETH_ALEN); 9730 bit = ~crc & 0x7f; 9731 regidx = (bit & 0x60) >> 5; 9732 bit &= 0x1f; 9733 mc_filter[regidx] |= (1 << bit); 9734 } 9735 9736 tw32(MAC_HASH_REG_0, mc_filter[0]); 9737 tw32(MAC_HASH_REG_1, mc_filter[1]); 9738 tw32(MAC_HASH_REG_2, mc_filter[2]); 9739 tw32(MAC_HASH_REG_3, mc_filter[3]); 9740 } 9741 9742 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) { 9743 rx_mode |= RX_MODE_PROMISC; 9744 } else if (!(dev->flags & IFF_PROMISC)) { 9745 /* Add all entries into to the mac addr filter list */ 9746 int i = 0; 9747 struct netdev_hw_addr *ha; 9748 9749 netdev_for_each_uc_addr(ha, dev) { 9750 __tg3_set_one_mac_addr(tp, ha->addr, 9751 i + TG3_UCAST_ADDR_IDX(tp)); 9752 i++; 9753 } 9754 } 9755 9756 if (rx_mode != tp->rx_mode) { 9757 tp->rx_mode = rx_mode; 9758 tw32_f(MAC_RX_MODE, rx_mode); 9759 udelay(10); 9760 } 9761 } 9762 9763 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt) 9764 { 9765 int i; 9766 9767 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 9768 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt); 9769 } 9770 9771 static void tg3_rss_check_indir_tbl(struct tg3 *tp) 9772 { 9773 int i; 9774 9775 if (!tg3_flag(tp, SUPPORT_MSIX)) 9776 return; 9777 9778 if (tp->rxq_cnt == 1) { 9779 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); 9780 return; 9781 } 9782 9783 /* Validate table against current IRQ count */ 9784 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { 9785 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt) 9786 break; 9787 } 9788 9789 if (i != TG3_RSS_INDIR_TBL_SIZE) 9790 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt); 9791 } 9792 9793 static void tg3_rss_write_indir_tbl(struct tg3 *tp) 9794 { 9795 int i = 0; 9796 u32 reg = MAC_RSS_INDIR_TBL_0; 9797 9798 while (i < TG3_RSS_INDIR_TBL_SIZE) { 9799 u32 val = tp->rss_ind_tbl[i]; 9800 i++; 9801 for (; i % 8; i++) { 9802 val <<= 4; 9803 val |= tp->rss_ind_tbl[i]; 9804 } 9805 tw32(reg, val); 9806 reg += 4; 9807 } 9808 } 9809 9810 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp) 9811 { 9812 if (tg3_asic_rev(tp) == ASIC_REV_5719) 9813 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719; 9814 else 9815 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720; 9816 } 9817 9818 /* tp->lock is held. */ 9819 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) 9820 { 9821 u32 val, rdmac_mode; 9822 int i, err, limit; 9823 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 9824 9825 tg3_disable_ints(tp); 9826 9827 tg3_stop_fw(tp); 9828 9829 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); 9830 9831 if (tg3_flag(tp, INIT_COMPLETE)) 9832 tg3_abort_hw(tp, 1); 9833 9834 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 9835 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { 9836 tg3_phy_pull_config(tp); 9837 tg3_eee_pull_config(tp, NULL); 9838 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 9839 } 9840 9841 /* Enable MAC control of LPI */ 9842 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 9843 tg3_setup_eee(tp); 9844 9845 if (reset_phy) 9846 tg3_phy_reset(tp); 9847 9848 err = tg3_chip_reset(tp); 9849 if (err) 9850 return err; 9851 9852 tg3_write_sig_legacy(tp, RESET_KIND_INIT); 9853 9854 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 9855 val = tr32(TG3_CPMU_CTRL); 9856 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); 9857 tw32(TG3_CPMU_CTRL, val); 9858 9859 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 9860 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 9861 val |= CPMU_LSPD_10MB_MACCLK_6_25; 9862 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 9863 9864 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); 9865 val &= ~CPMU_LNK_AWARE_MACCLK_MASK; 9866 val |= CPMU_LNK_AWARE_MACCLK_6_25; 9867 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); 9868 9869 val = tr32(TG3_CPMU_HST_ACC); 9870 val &= ~CPMU_HST_ACC_MACCLK_MASK; 9871 val |= CPMU_HST_ACC_MACCLK_6_25; 9872 tw32(TG3_CPMU_HST_ACC, val); 9873 } 9874 9875 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9876 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; 9877 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | 9878 PCIE_PWR_MGMT_L1_THRESH_4MS; 9879 tw32(PCIE_PWR_MGMT_THRESH, val); 9880 9881 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; 9882 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); 9883 9884 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); 9885 9886 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9887 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9888 } 9889 9890 if (tg3_flag(tp, L1PLLPD_EN)) { 9891 u32 grc_mode = tr32(GRC_MODE); 9892 9893 /* Access the lower 1K of PL PCIE block registers. */ 9894 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9895 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 9896 9897 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); 9898 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, 9899 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); 9900 9901 tw32(GRC_MODE, grc_mode); 9902 } 9903 9904 if (tg3_flag(tp, 57765_CLASS)) { 9905 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 9906 u32 grc_mode = tr32(GRC_MODE); 9907 9908 /* Access the lower 1K of PL PCIE block registers. */ 9909 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9910 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 9911 9912 val = tr32(TG3_PCIE_TLDLPL_PORT + 9913 TG3_PCIE_PL_LO_PHYCTL5); 9914 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, 9915 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); 9916 9917 tw32(GRC_MODE, grc_mode); 9918 } 9919 9920 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) { 9921 u32 grc_mode; 9922 9923 /* Fix transmit hangs */ 9924 val = tr32(TG3_CPMU_PADRNG_CTL); 9925 val |= TG3_CPMU_PADRNG_CTL_RDIV2; 9926 tw32(TG3_CPMU_PADRNG_CTL, val); 9927 9928 grc_mode = tr32(GRC_MODE); 9929 9930 /* Access the lower 1K of DL PCIE block registers. */ 9931 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9932 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); 9933 9934 val = tr32(TG3_PCIE_TLDLPL_PORT + 9935 TG3_PCIE_DL_LO_FTSMAX); 9936 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; 9937 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, 9938 val | TG3_PCIE_DL_LO_FTSMAX_VAL); 9939 9940 tw32(GRC_MODE, grc_mode); 9941 } 9942 9943 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 9944 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 9945 val |= CPMU_LSPD_10MB_MACCLK_6_25; 9946 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 9947 } 9948 9949 /* This works around an issue with Athlon chipsets on 9950 * B3 tigon3 silicon. This bit has no effect on any 9951 * other revision. But do not set this on PCI Express 9952 * chips and don't even touch the clocks if the CPMU is present. 9953 */ 9954 if (!tg3_flag(tp, CPMU_PRESENT)) { 9955 if (!tg3_flag(tp, PCI_EXPRESS)) 9956 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; 9957 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 9958 } 9959 9960 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 9961 tg3_flag(tp, PCIX_MODE)) { 9962 val = tr32(TG3PCI_PCISTATE); 9963 val |= PCISTATE_RETRY_SAME_DMA; 9964 tw32(TG3PCI_PCISTATE, val); 9965 } 9966 9967 if (tg3_flag(tp, ENABLE_APE)) { 9968 /* Allow reads and writes to the 9969 * APE register and memory space. 9970 */ 9971 val = tr32(TG3PCI_PCISTATE); 9972 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 9973 PCISTATE_ALLOW_APE_SHMEM_WR | 9974 PCISTATE_ALLOW_APE_PSPACE_WR; 9975 tw32(TG3PCI_PCISTATE, val); 9976 } 9977 9978 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) { 9979 /* Enable some hw fixes. */ 9980 val = tr32(TG3PCI_MSI_DATA); 9981 val |= (1 << 26) | (1 << 28) | (1 << 29); 9982 tw32(TG3PCI_MSI_DATA, val); 9983 } 9984 9985 /* Descriptor ring init may make accesses to the 9986 * NIC SRAM area to setup the TX descriptors, so we 9987 * can only do this after the hardware has been 9988 * successfully reset. 9989 */ 9990 err = tg3_init_rings(tp); 9991 if (err) 9992 return err; 9993 9994 if (tg3_flag(tp, 57765_PLUS)) { 9995 val = tr32(TG3PCI_DMA_RW_CTRL) & 9996 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 9997 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 9998 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; 9999 if (!tg3_flag(tp, 57765_CLASS) && 10000 tg3_asic_rev(tp) != ASIC_REV_5717 && 10001 tg3_asic_rev(tp) != ASIC_REV_5762) 10002 val |= DMA_RWCTRL_TAGGED_STAT_WA; 10003 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); 10004 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 && 10005 tg3_asic_rev(tp) != ASIC_REV_5761) { 10006 /* This value is determined during the probe time DMA 10007 * engine test, tg3_test_dma. 10008 */ 10009 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 10010 } 10011 10012 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | 10013 GRC_MODE_4X_NIC_SEND_RINGS | 10014 GRC_MODE_NO_TX_PHDR_CSUM | 10015 GRC_MODE_NO_RX_PHDR_CSUM); 10016 tp->grc_mode |= GRC_MODE_HOST_SENDBDS; 10017 10018 /* Pseudo-header checksum is done by hardware logic and not 10019 * the offload processers, so make the chip do the pseudo- 10020 * header checksums on receive. For transmit it is more 10021 * convenient to do the pseudo-header checksum in software 10022 * as Linux does that on transmit for us in all cases. 10023 */ 10024 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; 10025 10026 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP; 10027 if (tp->rxptpctl) 10028 tw32(TG3_RX_PTP_CTL, 10029 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 10030 10031 if (tg3_flag(tp, PTP_CAPABLE)) 10032 val |= GRC_MODE_TIME_SYNC_ENABLE; 10033 10034 tw32(GRC_MODE, tp->grc_mode | val); 10035 10036 /* Setup the timer prescalar register. Clock is always 66Mhz. */ 10037 val = tr32(GRC_MISC_CFG); 10038 val &= ~0xff; 10039 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); 10040 tw32(GRC_MISC_CFG, val); 10041 10042 /* Initialize MBUF/DESC pool. */ 10043 if (tg3_flag(tp, 5750_PLUS)) { 10044 /* Do nothing. */ 10045 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) { 10046 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); 10047 if (tg3_asic_rev(tp) == ASIC_REV_5704) 10048 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); 10049 else 10050 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); 10051 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); 10052 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); 10053 } else if (tg3_flag(tp, TSO_CAPABLE)) { 10054 int fw_len; 10055 10056 fw_len = tp->fw_len; 10057 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); 10058 tw32(BUFMGR_MB_POOL_ADDR, 10059 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); 10060 tw32(BUFMGR_MB_POOL_SIZE, 10061 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); 10062 } 10063 10064 if (tp->dev->mtu <= ETH_DATA_LEN) { 10065 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10066 tp->bufmgr_config.mbuf_read_dma_low_water); 10067 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10068 tp->bufmgr_config.mbuf_mac_rx_low_water); 10069 tw32(BUFMGR_MB_HIGH_WATER, 10070 tp->bufmgr_config.mbuf_high_water); 10071 } else { 10072 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10073 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); 10074 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10075 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); 10076 tw32(BUFMGR_MB_HIGH_WATER, 10077 tp->bufmgr_config.mbuf_high_water_jumbo); 10078 } 10079 tw32(BUFMGR_DMA_LOW_WATER, 10080 tp->bufmgr_config.dma_low_water); 10081 tw32(BUFMGR_DMA_HIGH_WATER, 10082 tp->bufmgr_config.dma_high_water); 10083 10084 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; 10085 if (tg3_asic_rev(tp) == ASIC_REV_5719) 10086 val |= BUFMGR_MODE_NO_TX_UNDERRUN; 10087 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10088 tg3_asic_rev(tp) == ASIC_REV_5762 || 10089 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10090 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) 10091 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; 10092 tw32(BUFMGR_MODE, val); 10093 for (i = 0; i < 2000; i++) { 10094 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) 10095 break; 10096 udelay(10); 10097 } 10098 if (i >= 2000) { 10099 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); 10100 return -ENODEV; 10101 } 10102 10103 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1) 10104 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); 10105 10106 tg3_setup_rxbd_thresholds(tp); 10107 10108 /* Initialize TG3_BDINFO's at: 10109 * RCVDBDI_STD_BD: standard eth size rx ring 10110 * RCVDBDI_JUMBO_BD: jumbo frame rx ring 10111 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) 10112 * 10113 * like so: 10114 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring 10115 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | 10116 * ring attribute flags 10117 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM 10118 * 10119 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. 10120 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. 10121 * 10122 * The size of each ring is fixed in the firmware, but the location is 10123 * configurable. 10124 */ 10125 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10126 ((u64) tpr->rx_std_mapping >> 32)); 10127 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10128 ((u64) tpr->rx_std_mapping & 0xffffffff)); 10129 if (!tg3_flag(tp, 5717_PLUS)) 10130 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 10131 NIC_SRAM_RX_BUFFER_DESC); 10132 10133 /* Disable the mini ring */ 10134 if (!tg3_flag(tp, 5705_PLUS)) 10135 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, 10136 BDINFO_FLAGS_DISABLED); 10137 10138 /* Program the jumbo buffer descriptor ring control 10139 * blocks on those devices that have them. 10140 */ 10141 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10142 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { 10143 10144 if (tg3_flag(tp, JUMBO_RING_ENABLE)) { 10145 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10146 ((u64) tpr->rx_jmb_mapping >> 32)); 10147 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10148 ((u64) tpr->rx_jmb_mapping & 0xffffffff)); 10149 val = TG3_RX_JMB_RING_SIZE(tp) << 10150 BDINFO_FLAGS_MAXLEN_SHIFT; 10151 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10152 val | BDINFO_FLAGS_USE_EXT_RECV); 10153 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || 10154 tg3_flag(tp, 57765_CLASS) || 10155 tg3_asic_rev(tp) == ASIC_REV_5762) 10156 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 10157 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 10158 } else { 10159 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10160 BDINFO_FLAGS_DISABLED); 10161 } 10162 10163 if (tg3_flag(tp, 57765_PLUS)) { 10164 val = TG3_RX_STD_RING_SIZE(tp); 10165 val <<= BDINFO_FLAGS_MAXLEN_SHIFT; 10166 val |= (TG3_RX_STD_DMA_SZ << 2); 10167 } else 10168 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; 10169 } else 10170 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; 10171 10172 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); 10173 10174 tpr->rx_std_prod_idx = tp->rx_pending; 10175 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); 10176 10177 tpr->rx_jmb_prod_idx = 10178 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; 10179 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); 10180 10181 tg3_rings_reset(tp); 10182 10183 /* Initialize MAC address and backoff seed. */ 10184 __tg3_set_mac_addr(tp, false); 10185 10186 /* MTU + ethernet header + FCS + optional VLAN tag */ 10187 tw32(MAC_RX_MTU_SIZE, 10188 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 10189 10190 /* The slot time is changed by tg3_setup_phy if we 10191 * run at gigabit with half duplex. 10192 */ 10193 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 10194 (6 << TX_LENGTHS_IPG_SHIFT) | 10195 (32 << TX_LENGTHS_SLOT_TIME_SHIFT); 10196 10197 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10198 tg3_asic_rev(tp) == ASIC_REV_5762) 10199 val |= tr32(MAC_TX_LENGTHS) & 10200 (TX_LENGTHS_JMB_FRM_LEN_MSK | 10201 TX_LENGTHS_CNT_DWN_VAL_MSK); 10202 10203 tw32(MAC_TX_LENGTHS, val); 10204 10205 /* Receive rules. */ 10206 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); 10207 tw32(RCVLPC_CONFIG, 0x0181); 10208 10209 /* Calculate RDMAC_MODE setting early, we need it to determine 10210 * the RCVLPC_STATE_ENABLE mask. 10211 */ 10212 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | 10213 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | 10214 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | 10215 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 10216 RDMAC_MODE_LNGREAD_ENAB); 10217 10218 if (tg3_asic_rev(tp) == ASIC_REV_5717) 10219 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; 10220 10221 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 10222 tg3_asic_rev(tp) == ASIC_REV_5785 || 10223 tg3_asic_rev(tp) == ASIC_REV_57780) 10224 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | 10225 RDMAC_MODE_MBUF_RBD_CRPT_ENAB | 10226 RDMAC_MODE_MBUF_SBD_CRPT_ENAB; 10227 10228 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10229 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10230 if (tg3_flag(tp, TSO_CAPABLE) && 10231 tg3_asic_rev(tp) == ASIC_REV_5705) { 10232 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; 10233 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10234 !tg3_flag(tp, IS_5788)) { 10235 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10236 } 10237 } 10238 10239 if (tg3_flag(tp, PCI_EXPRESS)) 10240 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10241 10242 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10243 tp->dma_limit = 0; 10244 if (tp->dev->mtu <= ETH_DATA_LEN) { 10245 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; 10246 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K; 10247 } 10248 } 10249 10250 if (tg3_flag(tp, HW_TSO_1) || 10251 tg3_flag(tp, HW_TSO_2) || 10252 tg3_flag(tp, HW_TSO_3)) 10253 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; 10254 10255 if (tg3_flag(tp, 57765_PLUS) || 10256 tg3_asic_rev(tp) == ASIC_REV_5785 || 10257 tg3_asic_rev(tp) == ASIC_REV_57780) 10258 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 10259 10260 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10261 tg3_asic_rev(tp) == ASIC_REV_5762) 10262 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; 10263 10264 if (tg3_asic_rev(tp) == ASIC_REV_5761 || 10265 tg3_asic_rev(tp) == ASIC_REV_5784 || 10266 tg3_asic_rev(tp) == ASIC_REV_5785 || 10267 tg3_asic_rev(tp) == ASIC_REV_57780 || 10268 tg3_flag(tp, 57765_PLUS)) { 10269 u32 tgtreg; 10270 10271 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10272 tgtreg = TG3_RDMA_RSRVCTRL_REG2; 10273 else 10274 tgtreg = TG3_RDMA_RSRVCTRL_REG; 10275 10276 val = tr32(tgtreg); 10277 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10278 tg3_asic_rev(tp) == ASIC_REV_5762) { 10279 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | 10280 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | 10281 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); 10282 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | 10283 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 10284 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; 10285 } 10286 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 10287 } 10288 10289 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10290 tg3_asic_rev(tp) == ASIC_REV_5720 || 10291 tg3_asic_rev(tp) == ASIC_REV_5762) { 10292 u32 tgtreg; 10293 10294 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10295 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2; 10296 else 10297 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL; 10298 10299 val = tr32(tgtreg); 10300 tw32(tgtreg, val | 10301 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | 10302 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); 10303 } 10304 10305 /* Receive/send statistics. */ 10306 if (tg3_flag(tp, 5750_PLUS)) { 10307 val = tr32(RCVLPC_STATS_ENABLE); 10308 val &= ~RCVLPC_STATSENAB_DACK_FIX; 10309 tw32(RCVLPC_STATS_ENABLE, val); 10310 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && 10311 tg3_flag(tp, TSO_CAPABLE)) { 10312 val = tr32(RCVLPC_STATS_ENABLE); 10313 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; 10314 tw32(RCVLPC_STATS_ENABLE, val); 10315 } else { 10316 tw32(RCVLPC_STATS_ENABLE, 0xffffff); 10317 } 10318 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); 10319 tw32(SNDDATAI_STATSENAB, 0xffffff); 10320 tw32(SNDDATAI_STATSCTRL, 10321 (SNDDATAI_SCTRL_ENABLE | 10322 SNDDATAI_SCTRL_FASTUPD)); 10323 10324 /* Setup host coalescing engine. */ 10325 tw32(HOSTCC_MODE, 0); 10326 for (i = 0; i < 2000; i++) { 10327 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) 10328 break; 10329 udelay(10); 10330 } 10331 10332 __tg3_set_coalesce(tp, &tp->coal); 10333 10334 if (!tg3_flag(tp, 5705_PLUS)) { 10335 /* Status/statistics block address. See tg3_timer, 10336 * the tg3_periodic_fetch_stats call there, and 10337 * tg3_get_stats to see how this works for 5705/5750 chips. 10338 */ 10339 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 10340 ((u64) tp->stats_mapping >> 32)); 10341 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 10342 ((u64) tp->stats_mapping & 0xffffffff)); 10343 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); 10344 10345 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); 10346 10347 /* Clear statistics and status block memory areas */ 10348 for (i = NIC_SRAM_STATS_BLK; 10349 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; 10350 i += sizeof(u32)) { 10351 tg3_write_mem(tp, i, 0); 10352 udelay(40); 10353 } 10354 } 10355 10356 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); 10357 10358 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); 10359 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); 10360 if (!tg3_flag(tp, 5705_PLUS)) 10361 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); 10362 10363 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 10364 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 10365 /* reset to prevent losing 1st rx packet intermittently */ 10366 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10367 udelay(10); 10368 } 10369 10370 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 10371 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | 10372 MAC_MODE_FHDE_ENABLE; 10373 if (tg3_flag(tp, ENABLE_APE)) 10374 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 10375 if (!tg3_flag(tp, 5705_PLUS) && 10376 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10377 tg3_asic_rev(tp) != ASIC_REV_5700) 10378 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 10379 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); 10380 udelay(40); 10381 10382 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). 10383 * If TG3_FLAG_IS_NIC is zero, we should read the 10384 * register to preserve the GPIO settings for LOMs. The GPIOs, 10385 * whether used as inputs or outputs, are set by boot code after 10386 * reset. 10387 */ 10388 if (!tg3_flag(tp, IS_NIC)) { 10389 u32 gpio_mask; 10390 10391 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | 10392 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | 10393 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; 10394 10395 if (tg3_asic_rev(tp) == ASIC_REV_5752) 10396 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | 10397 GRC_LCLCTRL_GPIO_OUTPUT3; 10398 10399 if (tg3_asic_rev(tp) == ASIC_REV_5755) 10400 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; 10401 10402 tp->grc_local_ctrl &= ~gpio_mask; 10403 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; 10404 10405 /* GPIO1 must be driven high for eeprom write protect */ 10406 if (tg3_flag(tp, EEPROM_WRITE_PROT)) 10407 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 10408 GRC_LCLCTRL_GPIO_OUTPUT1); 10409 } 10410 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10411 udelay(100); 10412 10413 if (tg3_flag(tp, USING_MSIX)) { 10414 val = tr32(MSGINT_MODE); 10415 val |= MSGINT_MODE_ENABLE; 10416 if (tp->irq_cnt > 1) 10417 val |= MSGINT_MODE_MULTIVEC_EN; 10418 if (!tg3_flag(tp, 1SHOT_MSI)) 10419 val |= MSGINT_MODE_ONE_SHOT_DISABLE; 10420 tw32(MSGINT_MODE, val); 10421 } 10422 10423 if (!tg3_flag(tp, 5705_PLUS)) { 10424 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 10425 udelay(40); 10426 } 10427 10428 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | 10429 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | 10430 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | 10431 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | 10432 WDMAC_MODE_LNGREAD_ENAB); 10433 10434 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10435 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10436 if (tg3_flag(tp, TSO_CAPABLE) && 10437 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 || 10438 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) { 10439 /* nothing */ 10440 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10441 !tg3_flag(tp, IS_5788)) { 10442 val |= WDMAC_MODE_RX_ACCEL; 10443 } 10444 } 10445 10446 /* Enable host coalescing bug fix */ 10447 if (tg3_flag(tp, 5755_PLUS)) 10448 val |= WDMAC_MODE_STATUS_TAG_FIX; 10449 10450 if (tg3_asic_rev(tp) == ASIC_REV_5785) 10451 val |= WDMAC_MODE_BURST_ALL_DATA; 10452 10453 tw32_f(WDMAC_MODE, val); 10454 udelay(40); 10455 10456 if (tg3_flag(tp, PCIX_MODE)) { 10457 u16 pcix_cmd; 10458 10459 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10460 &pcix_cmd); 10461 if (tg3_asic_rev(tp) == ASIC_REV_5703) { 10462 pcix_cmd &= ~PCI_X_CMD_MAX_READ; 10463 pcix_cmd |= PCI_X_CMD_READ_2K; 10464 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) { 10465 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); 10466 pcix_cmd |= PCI_X_CMD_READ_2K; 10467 } 10468 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10469 pcix_cmd); 10470 } 10471 10472 tw32_f(RDMAC_MODE, rdmac_mode); 10473 udelay(40); 10474 10475 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10476 tg3_asic_rev(tp) == ASIC_REV_5720) { 10477 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) { 10478 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp)) 10479 break; 10480 } 10481 if (i < TG3_NUM_RDMA_CHANNELS) { 10482 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10483 val |= tg3_lso_rd_dma_workaround_bit(tp); 10484 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10485 tg3_flag_set(tp, 5719_5720_RDMA_BUG); 10486 } 10487 } 10488 10489 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); 10490 if (!tg3_flag(tp, 5705_PLUS)) 10491 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); 10492 10493 if (tg3_asic_rev(tp) == ASIC_REV_5761) 10494 tw32(SNDDATAC_MODE, 10495 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); 10496 else 10497 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); 10498 10499 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); 10500 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); 10501 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; 10502 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 10503 val |= RCVDBDI_MODE_LRG_RING_SZ; 10504 tw32(RCVDBDI_MODE, val); 10505 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 10506 if (tg3_flag(tp, HW_TSO_1) || 10507 tg3_flag(tp, HW_TSO_2) || 10508 tg3_flag(tp, HW_TSO_3)) 10509 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); 10510 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; 10511 if (tg3_flag(tp, ENABLE_TSS)) 10512 val |= SNDBDI_MODE_MULTI_TXQ_EN; 10513 tw32(SNDBDI_MODE, val); 10514 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); 10515 10516 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 10517 err = tg3_load_5701_a0_firmware_fix(tp); 10518 if (err) 10519 return err; 10520 } 10521 10522 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10523 /* Ignore any errors for the firmware download. If download 10524 * fails, the device will operate with EEE disabled 10525 */ 10526 tg3_load_57766_firmware(tp); 10527 } 10528 10529 if (tg3_flag(tp, TSO_CAPABLE)) { 10530 err = tg3_load_tso_firmware(tp); 10531 if (err) 10532 return err; 10533 } 10534 10535 tp->tx_mode = TX_MODE_ENABLE; 10536 10537 if (tg3_flag(tp, 5755_PLUS) || 10538 tg3_asic_rev(tp) == ASIC_REV_5906) 10539 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; 10540 10541 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10542 tg3_asic_rev(tp) == ASIC_REV_5762) { 10543 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; 10544 tp->tx_mode &= ~val; 10545 tp->tx_mode |= tr32(MAC_TX_MODE) & val; 10546 } 10547 10548 tw32_f(MAC_TX_MODE, tp->tx_mode); 10549 udelay(100); 10550 10551 if (tg3_flag(tp, ENABLE_RSS)) { 10552 u32 rss_key[10]; 10553 10554 tg3_rss_write_indir_tbl(tp); 10555 10556 netdev_rss_key_fill(rss_key, 10 * sizeof(u32)); 10557 10558 for (i = 0; i < 10 ; i++) 10559 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]); 10560 } 10561 10562 tp->rx_mode = RX_MODE_ENABLE; 10563 if (tg3_flag(tp, 5755_PLUS)) 10564 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 10565 10566 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10567 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX; 10568 10569 if (tg3_flag(tp, ENABLE_RSS)) 10570 tp->rx_mode |= RX_MODE_RSS_ENABLE | 10571 RX_MODE_RSS_ITBL_HASH_BITS_7 | 10572 RX_MODE_RSS_IPV6_HASH_EN | 10573 RX_MODE_RSS_TCP_IPV6_HASH_EN | 10574 RX_MODE_RSS_IPV4_HASH_EN | 10575 RX_MODE_RSS_TCP_IPV4_HASH_EN; 10576 10577 tw32_f(MAC_RX_MODE, tp->rx_mode); 10578 udelay(10); 10579 10580 tw32(MAC_LED_CTRL, tp->led_ctrl); 10581 10582 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 10583 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10584 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10585 udelay(10); 10586 } 10587 tw32_f(MAC_RX_MODE, tp->rx_mode); 10588 udelay(10); 10589 10590 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10591 if ((tg3_asic_rev(tp) == ASIC_REV_5704) && 10592 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { 10593 /* Set drive transmission level to 1.2V */ 10594 /* only if the signal pre-emphasis bit is not set */ 10595 val = tr32(MAC_SERDES_CFG); 10596 val &= 0xfffff000; 10597 val |= 0x880; 10598 tw32(MAC_SERDES_CFG, val); 10599 } 10600 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) 10601 tw32(MAC_SERDES_CFG, 0x616000); 10602 } 10603 10604 /* Prevent chip from dropping frames when flow control 10605 * is enabled. 10606 */ 10607 if (tg3_flag(tp, 57765_CLASS)) 10608 val = 1; 10609 else 10610 val = 2; 10611 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); 10612 10613 if (tg3_asic_rev(tp) == ASIC_REV_5704 && 10614 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 10615 /* Use hardware link auto-negotiation */ 10616 tg3_flag_set(tp, HW_AUTONEG); 10617 } 10618 10619 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 10620 tg3_asic_rev(tp) == ASIC_REV_5714) { 10621 u32 tmp; 10622 10623 tmp = tr32(SERDES_RX_CTRL); 10624 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); 10625 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; 10626 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; 10627 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10628 } 10629 10630 if (!tg3_flag(tp, USE_PHYLIB)) { 10631 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 10632 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 10633 10634 err = tg3_setup_phy(tp, false); 10635 if (err) 10636 return err; 10637 10638 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10639 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 10640 u32 tmp; 10641 10642 /* Clear CRC stats. */ 10643 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { 10644 tg3_writephy(tp, MII_TG3_TEST1, 10645 tmp | MII_TG3_TEST1_CRC_EN); 10646 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); 10647 } 10648 } 10649 } 10650 10651 __tg3_set_rx_mode(tp->dev); 10652 10653 /* Initialize receive rules. */ 10654 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); 10655 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); 10656 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); 10657 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); 10658 10659 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) 10660 limit = 8; 10661 else 10662 limit = 16; 10663 if (tg3_flag(tp, ENABLE_ASF)) 10664 limit -= 4; 10665 switch (limit) { 10666 case 16: 10667 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); 10668 case 15: 10669 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); 10670 case 14: 10671 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); 10672 case 13: 10673 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); 10674 case 12: 10675 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); 10676 case 11: 10677 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); 10678 case 10: 10679 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); 10680 case 9: 10681 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); 10682 case 8: 10683 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); 10684 case 7: 10685 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); 10686 case 6: 10687 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); 10688 case 5: 10689 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); 10690 case 4: 10691 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ 10692 case 3: 10693 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ 10694 case 2: 10695 case 1: 10696 10697 default: 10698 break; 10699 } 10700 10701 if (tg3_flag(tp, ENABLE_APE)) 10702 /* Write our heartbeat update interval to APE. */ 10703 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, 10704 APE_HOST_HEARTBEAT_INT_DISABLE); 10705 10706 tg3_write_sig_post_reset(tp, RESET_KIND_INIT); 10707 10708 return 0; 10709 } 10710 10711 /* Called at device open time to get the chip ready for 10712 * packet processing. Invoked with tp->lock held. 10713 */ 10714 static int tg3_init_hw(struct tg3 *tp, bool reset_phy) 10715 { 10716 /* Chip may have been just powered on. If so, the boot code may still 10717 * be running initialization. Wait for it to finish to avoid races in 10718 * accessing the hardware. 10719 */ 10720 tg3_enable_register_access(tp); 10721 tg3_poll_fw(tp); 10722 10723 tg3_switch_clocks(tp); 10724 10725 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 10726 10727 return tg3_reset_hw(tp, reset_phy); 10728 } 10729 10730 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) 10731 { 10732 int i; 10733 10734 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) { 10735 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN; 10736 10737 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len); 10738 off += len; 10739 10740 if (ocir->signature != TG3_OCIR_SIG_MAGIC || 10741 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE)) 10742 memset(ocir, 0, TG3_OCIR_LEN); 10743 } 10744 } 10745 10746 /* sysfs attributes for hwmon */ 10747 static ssize_t tg3_show_temp(struct device *dev, 10748 struct device_attribute *devattr, char *buf) 10749 { 10750 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 10751 struct tg3 *tp = dev_get_drvdata(dev); 10752 u32 temperature; 10753 10754 spin_lock_bh(&tp->lock); 10755 tg3_ape_scratchpad_read(tp, &temperature, attr->index, 10756 sizeof(temperature)); 10757 spin_unlock_bh(&tp->lock); 10758 return sprintf(buf, "%u\n", temperature * 1000); 10759 } 10760 10761 10762 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL, 10763 TG3_TEMP_SENSOR_OFFSET); 10764 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL, 10765 TG3_TEMP_CAUTION_OFFSET); 10766 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL, 10767 TG3_TEMP_MAX_OFFSET); 10768 10769 static struct attribute *tg3_attrs[] = { 10770 &sensor_dev_attr_temp1_input.dev_attr.attr, 10771 &sensor_dev_attr_temp1_crit.dev_attr.attr, 10772 &sensor_dev_attr_temp1_max.dev_attr.attr, 10773 NULL 10774 }; 10775 ATTRIBUTE_GROUPS(tg3); 10776 10777 static void tg3_hwmon_close(struct tg3 *tp) 10778 { 10779 if (tp->hwmon_dev) { 10780 hwmon_device_unregister(tp->hwmon_dev); 10781 tp->hwmon_dev = NULL; 10782 } 10783 } 10784 10785 static void tg3_hwmon_open(struct tg3 *tp) 10786 { 10787 int i; 10788 u32 size = 0; 10789 struct pci_dev *pdev = tp->pdev; 10790 struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; 10791 10792 tg3_sd_scan_scratchpad(tp, ocirs); 10793 10794 for (i = 0; i < TG3_SD_NUM_RECS; i++) { 10795 if (!ocirs[i].src_data_length) 10796 continue; 10797 10798 size += ocirs[i].src_hdr_length; 10799 size += ocirs[i].src_data_length; 10800 } 10801 10802 if (!size) 10803 return; 10804 10805 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", 10806 tp, tg3_groups); 10807 if (IS_ERR(tp->hwmon_dev)) { 10808 tp->hwmon_dev = NULL; 10809 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); 10810 } 10811 } 10812 10813 10814 #define TG3_STAT_ADD32(PSTAT, REG) \ 10815 do { u32 __val = tr32(REG); \ 10816 (PSTAT)->low += __val; \ 10817 if ((PSTAT)->low < __val) \ 10818 (PSTAT)->high += 1; \ 10819 } while (0) 10820 10821 static void tg3_periodic_fetch_stats(struct tg3 *tp) 10822 { 10823 struct tg3_hw_stats *sp = tp->hw_stats; 10824 10825 if (!tp->link_up) 10826 return; 10827 10828 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); 10829 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); 10830 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); 10831 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); 10832 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); 10833 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); 10834 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); 10835 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); 10836 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); 10837 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); 10838 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); 10839 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); 10840 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); 10841 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) && 10842 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low + 10843 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) { 10844 u32 val; 10845 10846 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10847 val &= ~tg3_lso_rd_dma_workaround_bit(tp); 10848 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10849 tg3_flag_clear(tp, 5719_5720_RDMA_BUG); 10850 } 10851 10852 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); 10853 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); 10854 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); 10855 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); 10856 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); 10857 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); 10858 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); 10859 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); 10860 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); 10861 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); 10862 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); 10863 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); 10864 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); 10865 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); 10866 10867 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); 10868 if (tg3_asic_rev(tp) != ASIC_REV_5717 && 10869 tg3_asic_rev(tp) != ASIC_REV_5762 && 10870 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 && 10871 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) { 10872 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); 10873 } else { 10874 u32 val = tr32(HOSTCC_FLOW_ATTN); 10875 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; 10876 if (val) { 10877 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); 10878 sp->rx_discards.low += val; 10879 if (sp->rx_discards.low < val) 10880 sp->rx_discards.high += 1; 10881 } 10882 sp->mbuf_lwm_thresh_hit = sp->rx_discards; 10883 } 10884 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); 10885 } 10886 10887 static void tg3_chk_missed_msi(struct tg3 *tp) 10888 { 10889 u32 i; 10890 10891 for (i = 0; i < tp->irq_cnt; i++) { 10892 struct tg3_napi *tnapi = &tp->napi[i]; 10893 10894 if (tg3_has_work(tnapi)) { 10895 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && 10896 tnapi->last_tx_cons == tnapi->tx_cons) { 10897 if (tnapi->chk_msi_cnt < 1) { 10898 tnapi->chk_msi_cnt++; 10899 return; 10900 } 10901 tg3_msi(0, tnapi); 10902 } 10903 } 10904 tnapi->chk_msi_cnt = 0; 10905 tnapi->last_rx_cons = tnapi->rx_rcb_ptr; 10906 tnapi->last_tx_cons = tnapi->tx_cons; 10907 } 10908 } 10909 10910 static void tg3_timer(unsigned long __opaque) 10911 { 10912 struct tg3 *tp = (struct tg3 *) __opaque; 10913 10914 spin_lock(&tp->lock); 10915 10916 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) { 10917 spin_unlock(&tp->lock); 10918 goto restart_timer; 10919 } 10920 10921 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10922 tg3_flag(tp, 57765_CLASS)) 10923 tg3_chk_missed_msi(tp); 10924 10925 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 10926 /* BCM4785: Flush posted writes from GbE to host memory. */ 10927 tr32(HOSTCC_MODE); 10928 } 10929 10930 if (!tg3_flag(tp, TAGGED_STATUS)) { 10931 /* All of this garbage is because when using non-tagged 10932 * IRQ status the mailbox/status_block protocol the chip 10933 * uses with the cpu is race prone. 10934 */ 10935 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { 10936 tw32(GRC_LOCAL_CTRL, 10937 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 10938 } else { 10939 tw32(HOSTCC_MODE, tp->coalesce_mode | 10940 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); 10941 } 10942 10943 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 10944 spin_unlock(&tp->lock); 10945 tg3_reset_task_schedule(tp); 10946 goto restart_timer; 10947 } 10948 } 10949 10950 /* This part only runs once per second. */ 10951 if (!--tp->timer_counter) { 10952 if (tg3_flag(tp, 5705_PLUS)) 10953 tg3_periodic_fetch_stats(tp); 10954 10955 if (tp->setlpicnt && !--tp->setlpicnt) 10956 tg3_phy_eee_enable(tp); 10957 10958 if (tg3_flag(tp, USE_LINKCHG_REG)) { 10959 u32 mac_stat; 10960 int phy_event; 10961 10962 mac_stat = tr32(MAC_STATUS); 10963 10964 phy_event = 0; 10965 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { 10966 if (mac_stat & MAC_STATUS_MI_INTERRUPT) 10967 phy_event = 1; 10968 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) 10969 phy_event = 1; 10970 10971 if (phy_event) 10972 tg3_setup_phy(tp, false); 10973 } else if (tg3_flag(tp, POLL_SERDES)) { 10974 u32 mac_stat = tr32(MAC_STATUS); 10975 int need_setup = 0; 10976 10977 if (tp->link_up && 10978 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { 10979 need_setup = 1; 10980 } 10981 if (!tp->link_up && 10982 (mac_stat & (MAC_STATUS_PCS_SYNCED | 10983 MAC_STATUS_SIGNAL_DET))) { 10984 need_setup = 1; 10985 } 10986 if (need_setup) { 10987 if (!tp->serdes_counter) { 10988 tw32_f(MAC_MODE, 10989 (tp->mac_mode & 10990 ~MAC_MODE_PORT_MODE_MASK)); 10991 udelay(40); 10992 tw32_f(MAC_MODE, tp->mac_mode); 10993 udelay(40); 10994 } 10995 tg3_setup_phy(tp, false); 10996 } 10997 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 10998 tg3_flag(tp, 5780_CLASS)) { 10999 tg3_serdes_parallel_detect(tp); 11000 } else if (tg3_flag(tp, POLL_CPMU_LINK)) { 11001 u32 cpmu = tr32(TG3_CPMU_STATUS); 11002 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) == 11003 TG3_CPMU_STATUS_LINK_MASK); 11004 11005 if (link_up != tp->link_up) 11006 tg3_setup_phy(tp, false); 11007 } 11008 11009 tp->timer_counter = tp->timer_multiplier; 11010 } 11011 11012 /* Heartbeat is only sent once every 2 seconds. 11013 * 11014 * The heartbeat is to tell the ASF firmware that the host 11015 * driver is still alive. In the event that the OS crashes, 11016 * ASF needs to reset the hardware to free up the FIFO space 11017 * that may be filled with rx packets destined for the host. 11018 * If the FIFO is full, ASF will no longer function properly. 11019 * 11020 * Unintended resets have been reported on real time kernels 11021 * where the timer doesn't run on time. Netpoll will also have 11022 * same problem. 11023 * 11024 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware 11025 * to check the ring condition when the heartbeat is expiring 11026 * before doing the reset. This will prevent most unintended 11027 * resets. 11028 */ 11029 if (!--tp->asf_counter) { 11030 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 11031 tg3_wait_for_event_ack(tp); 11032 11033 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, 11034 FWCMD_NICDRV_ALIVE3); 11035 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 11036 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 11037 TG3_FW_UPDATE_TIMEOUT_SEC); 11038 11039 tg3_generate_fw_event(tp); 11040 } 11041 tp->asf_counter = tp->asf_multiplier; 11042 } 11043 11044 spin_unlock(&tp->lock); 11045 11046 restart_timer: 11047 tp->timer.expires = jiffies + tp->timer_offset; 11048 add_timer(&tp->timer); 11049 } 11050 11051 static void tg3_timer_init(struct tg3 *tp) 11052 { 11053 if (tg3_flag(tp, TAGGED_STATUS) && 11054 tg3_asic_rev(tp) != ASIC_REV_5717 && 11055 !tg3_flag(tp, 57765_CLASS)) 11056 tp->timer_offset = HZ; 11057 else 11058 tp->timer_offset = HZ / 10; 11059 11060 BUG_ON(tp->timer_offset > HZ); 11061 11062 tp->timer_multiplier = (HZ / tp->timer_offset); 11063 tp->asf_multiplier = (HZ / tp->timer_offset) * 11064 TG3_FW_UPDATE_FREQ_SEC; 11065 11066 init_timer(&tp->timer); 11067 tp->timer.data = (unsigned long) tp; 11068 tp->timer.function = tg3_timer; 11069 } 11070 11071 static void tg3_timer_start(struct tg3 *tp) 11072 { 11073 tp->asf_counter = tp->asf_multiplier; 11074 tp->timer_counter = tp->timer_multiplier; 11075 11076 tp->timer.expires = jiffies + tp->timer_offset; 11077 add_timer(&tp->timer); 11078 } 11079 11080 static void tg3_timer_stop(struct tg3 *tp) 11081 { 11082 del_timer_sync(&tp->timer); 11083 } 11084 11085 /* Restart hardware after configuration changes, self-test, etc. 11086 * Invoked with tp->lock held. 11087 */ 11088 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy) 11089 __releases(tp->lock) 11090 __acquires(tp->lock) 11091 { 11092 int err; 11093 11094 err = tg3_init_hw(tp, reset_phy); 11095 if (err) { 11096 netdev_err(tp->dev, 11097 "Failed to re-initialize device, aborting\n"); 11098 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11099 tg3_full_unlock(tp); 11100 tg3_timer_stop(tp); 11101 tp->irq_sync = 0; 11102 tg3_napi_enable(tp); 11103 dev_close(tp->dev); 11104 tg3_full_lock(tp, 0); 11105 } 11106 return err; 11107 } 11108 11109 static void tg3_reset_task(struct work_struct *work) 11110 { 11111 struct tg3 *tp = container_of(work, struct tg3, reset_task); 11112 int err; 11113 11114 rtnl_lock(); 11115 tg3_full_lock(tp, 0); 11116 11117 if (!netif_running(tp->dev)) { 11118 tg3_flag_clear(tp, RESET_TASK_PENDING); 11119 tg3_full_unlock(tp); 11120 rtnl_unlock(); 11121 return; 11122 } 11123 11124 tg3_full_unlock(tp); 11125 11126 tg3_phy_stop(tp); 11127 11128 tg3_netif_stop(tp); 11129 11130 tg3_full_lock(tp, 1); 11131 11132 if (tg3_flag(tp, TX_RECOVERY_PENDING)) { 11133 tp->write32_tx_mbox = tg3_write32_tx_mbox; 11134 tp->write32_rx_mbox = tg3_write_flush_reg32; 11135 tg3_flag_set(tp, MBOX_WRITE_REORDER); 11136 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 11137 } 11138 11139 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 11140 err = tg3_init_hw(tp, true); 11141 if (err) 11142 goto out; 11143 11144 tg3_netif_start(tp); 11145 11146 out: 11147 tg3_full_unlock(tp); 11148 11149 if (!err) 11150 tg3_phy_start(tp); 11151 11152 tg3_flag_clear(tp, RESET_TASK_PENDING); 11153 rtnl_unlock(); 11154 } 11155 11156 static int tg3_request_irq(struct tg3 *tp, int irq_num) 11157 { 11158 irq_handler_t fn; 11159 unsigned long flags; 11160 char *name; 11161 struct tg3_napi *tnapi = &tp->napi[irq_num]; 11162 11163 if (tp->irq_cnt == 1) 11164 name = tp->dev->name; 11165 else { 11166 name = &tnapi->irq_lbl[0]; 11167 if (tnapi->tx_buffers && tnapi->rx_rcb) 11168 snprintf(name, IFNAMSIZ, 11169 "%s-txrx-%d", tp->dev->name, irq_num); 11170 else if (tnapi->tx_buffers) 11171 snprintf(name, IFNAMSIZ, 11172 "%s-tx-%d", tp->dev->name, irq_num); 11173 else if (tnapi->rx_rcb) 11174 snprintf(name, IFNAMSIZ, 11175 "%s-rx-%d", tp->dev->name, irq_num); 11176 else 11177 snprintf(name, IFNAMSIZ, 11178 "%s-%d", tp->dev->name, irq_num); 11179 name[IFNAMSIZ-1] = 0; 11180 } 11181 11182 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11183 fn = tg3_msi; 11184 if (tg3_flag(tp, 1SHOT_MSI)) 11185 fn = tg3_msi_1shot; 11186 flags = 0; 11187 } else { 11188 fn = tg3_interrupt; 11189 if (tg3_flag(tp, TAGGED_STATUS)) 11190 fn = tg3_interrupt_tagged; 11191 flags = IRQF_SHARED; 11192 } 11193 11194 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); 11195 } 11196 11197 static int tg3_test_interrupt(struct tg3 *tp) 11198 { 11199 struct tg3_napi *tnapi = &tp->napi[0]; 11200 struct net_device *dev = tp->dev; 11201 int err, i, intr_ok = 0; 11202 u32 val; 11203 11204 if (!netif_running(dev)) 11205 return -ENODEV; 11206 11207 tg3_disable_ints(tp); 11208 11209 free_irq(tnapi->irq_vec, tnapi); 11210 11211 /* 11212 * Turn off MSI one shot mode. Otherwise this test has no 11213 * observable way to know whether the interrupt was delivered. 11214 */ 11215 if (tg3_flag(tp, 57765_PLUS)) { 11216 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; 11217 tw32(MSGINT_MODE, val); 11218 } 11219 11220 err = request_irq(tnapi->irq_vec, tg3_test_isr, 11221 IRQF_SHARED, dev->name, tnapi); 11222 if (err) 11223 return err; 11224 11225 tnapi->hw_status->status &= ~SD_STATUS_UPDATED; 11226 tg3_enable_ints(tp); 11227 11228 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 11229 tnapi->coal_now); 11230 11231 for (i = 0; i < 5; i++) { 11232 u32 int_mbox, misc_host_ctrl; 11233 11234 int_mbox = tr32_mailbox(tnapi->int_mbox); 11235 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 11236 11237 if ((int_mbox != 0) || 11238 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { 11239 intr_ok = 1; 11240 break; 11241 } 11242 11243 if (tg3_flag(tp, 57765_PLUS) && 11244 tnapi->hw_status->status_tag != tnapi->last_tag) 11245 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 11246 11247 msleep(10); 11248 } 11249 11250 tg3_disable_ints(tp); 11251 11252 free_irq(tnapi->irq_vec, tnapi); 11253 11254 err = tg3_request_irq(tp, 0); 11255 11256 if (err) 11257 return err; 11258 11259 if (intr_ok) { 11260 /* Reenable MSI one shot mode. */ 11261 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) { 11262 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; 11263 tw32(MSGINT_MODE, val); 11264 } 11265 return 0; 11266 } 11267 11268 return -EIO; 11269 } 11270 11271 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is 11272 * successfully restored 11273 */ 11274 static int tg3_test_msi(struct tg3 *tp) 11275 { 11276 int err; 11277 u16 pci_cmd; 11278 11279 if (!tg3_flag(tp, USING_MSI)) 11280 return 0; 11281 11282 /* Turn off SERR reporting in case MSI terminates with Master 11283 * Abort. 11284 */ 11285 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 11286 pci_write_config_word(tp->pdev, PCI_COMMAND, 11287 pci_cmd & ~PCI_COMMAND_SERR); 11288 11289 err = tg3_test_interrupt(tp); 11290 11291 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 11292 11293 if (!err) 11294 return 0; 11295 11296 /* other failures */ 11297 if (err != -EIO) 11298 return err; 11299 11300 /* MSI test failed, go back to INTx mode */ 11301 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " 11302 "to INTx mode. Please report this failure to the PCI " 11303 "maintainer and include system chipset information\n"); 11304 11305 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11306 11307 pci_disable_msi(tp->pdev); 11308 11309 tg3_flag_clear(tp, USING_MSI); 11310 tp->napi[0].irq_vec = tp->pdev->irq; 11311 11312 err = tg3_request_irq(tp, 0); 11313 if (err) 11314 return err; 11315 11316 /* Need to reset the chip because the MSI cycle may have terminated 11317 * with Master Abort. 11318 */ 11319 tg3_full_lock(tp, 1); 11320 11321 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11322 err = tg3_init_hw(tp, true); 11323 11324 tg3_full_unlock(tp); 11325 11326 if (err) 11327 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11328 11329 return err; 11330 } 11331 11332 static int tg3_request_firmware(struct tg3 *tp) 11333 { 11334 const struct tg3_firmware_hdr *fw_hdr; 11335 11336 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { 11337 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", 11338 tp->fw_needed); 11339 return -ENOENT; 11340 } 11341 11342 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 11343 11344 /* Firmware blob starts with version numbers, followed by 11345 * start address and _full_ length including BSS sections 11346 * (which must be longer than the actual data, of course 11347 */ 11348 11349 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */ 11350 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) { 11351 netdev_err(tp->dev, "bogus length %d in \"%s\"\n", 11352 tp->fw_len, tp->fw_needed); 11353 release_firmware(tp->fw); 11354 tp->fw = NULL; 11355 return -EINVAL; 11356 } 11357 11358 /* We no longer need firmware; we have it. */ 11359 tp->fw_needed = NULL; 11360 return 0; 11361 } 11362 11363 static u32 tg3_irq_count(struct tg3 *tp) 11364 { 11365 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt); 11366 11367 if (irq_cnt > 1) { 11368 /* We want as many rx rings enabled as there are cpus. 11369 * In multiqueue MSI-X mode, the first MSI-X vector 11370 * only deals with link interrupts, etc, so we add 11371 * one to the number of vectors we are requesting. 11372 */ 11373 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max); 11374 } 11375 11376 return irq_cnt; 11377 } 11378 11379 static bool tg3_enable_msix(struct tg3 *tp) 11380 { 11381 int i, rc; 11382 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS]; 11383 11384 tp->txq_cnt = tp->txq_req; 11385 tp->rxq_cnt = tp->rxq_req; 11386 if (!tp->rxq_cnt) 11387 tp->rxq_cnt = netif_get_num_default_rss_queues(); 11388 if (tp->rxq_cnt > tp->rxq_max) 11389 tp->rxq_cnt = tp->rxq_max; 11390 11391 /* Disable multiple TX rings by default. Simple round-robin hardware 11392 * scheduling of the TX rings can cause starvation of rings with 11393 * small packets when other rings have TSO or jumbo packets. 11394 */ 11395 if (!tp->txq_req) 11396 tp->txq_cnt = 1; 11397 11398 tp->irq_cnt = tg3_irq_count(tp); 11399 11400 for (i = 0; i < tp->irq_max; i++) { 11401 msix_ent[i].entry = i; 11402 msix_ent[i].vector = 0; 11403 } 11404 11405 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt); 11406 if (rc < 0) { 11407 return false; 11408 } else if (rc < tp->irq_cnt) { 11409 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", 11410 tp->irq_cnt, rc); 11411 tp->irq_cnt = rc; 11412 tp->rxq_cnt = max(rc - 1, 1); 11413 if (tp->txq_cnt) 11414 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max); 11415 } 11416 11417 for (i = 0; i < tp->irq_max; i++) 11418 tp->napi[i].irq_vec = msix_ent[i].vector; 11419 11420 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) { 11421 pci_disable_msix(tp->pdev); 11422 return false; 11423 } 11424 11425 if (tp->irq_cnt == 1) 11426 return true; 11427 11428 tg3_flag_set(tp, ENABLE_RSS); 11429 11430 if (tp->txq_cnt > 1) 11431 tg3_flag_set(tp, ENABLE_TSS); 11432 11433 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt); 11434 11435 return true; 11436 } 11437 11438 static void tg3_ints_init(struct tg3 *tp) 11439 { 11440 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && 11441 !tg3_flag(tp, TAGGED_STATUS)) { 11442 /* All MSI supporting chips should support tagged 11443 * status. Assert that this is the case. 11444 */ 11445 netdev_warn(tp->dev, 11446 "MSI without TAGGED_STATUS? Not using MSI\n"); 11447 goto defcfg; 11448 } 11449 11450 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) 11451 tg3_flag_set(tp, USING_MSIX); 11452 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) 11453 tg3_flag_set(tp, USING_MSI); 11454 11455 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11456 u32 msi_mode = tr32(MSGINT_MODE); 11457 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) 11458 msi_mode |= MSGINT_MODE_MULTIVEC_EN; 11459 if (!tg3_flag(tp, 1SHOT_MSI)) 11460 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE; 11461 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); 11462 } 11463 defcfg: 11464 if (!tg3_flag(tp, USING_MSIX)) { 11465 tp->irq_cnt = 1; 11466 tp->napi[0].irq_vec = tp->pdev->irq; 11467 } 11468 11469 if (tp->irq_cnt == 1) { 11470 tp->txq_cnt = 1; 11471 tp->rxq_cnt = 1; 11472 netif_set_real_num_tx_queues(tp->dev, 1); 11473 netif_set_real_num_rx_queues(tp->dev, 1); 11474 } 11475 } 11476 11477 static void tg3_ints_fini(struct tg3 *tp) 11478 { 11479 if (tg3_flag(tp, USING_MSIX)) 11480 pci_disable_msix(tp->pdev); 11481 else if (tg3_flag(tp, USING_MSI)) 11482 pci_disable_msi(tp->pdev); 11483 tg3_flag_clear(tp, USING_MSI); 11484 tg3_flag_clear(tp, USING_MSIX); 11485 tg3_flag_clear(tp, ENABLE_RSS); 11486 tg3_flag_clear(tp, ENABLE_TSS); 11487 } 11488 11489 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, 11490 bool init) 11491 { 11492 struct net_device *dev = tp->dev; 11493 int i, err; 11494 11495 /* 11496 * Setup interrupts first so we know how 11497 * many NAPI resources to allocate 11498 */ 11499 tg3_ints_init(tp); 11500 11501 tg3_rss_check_indir_tbl(tp); 11502 11503 /* The placement of this call is tied 11504 * to the setup and use of Host TX descriptors. 11505 */ 11506 err = tg3_alloc_consistent(tp); 11507 if (err) 11508 goto out_ints_fini; 11509 11510 tg3_napi_init(tp); 11511 11512 tg3_napi_enable(tp); 11513 11514 for (i = 0; i < tp->irq_cnt; i++) { 11515 struct tg3_napi *tnapi = &tp->napi[i]; 11516 err = tg3_request_irq(tp, i); 11517 if (err) { 11518 for (i--; i >= 0; i--) { 11519 tnapi = &tp->napi[i]; 11520 free_irq(tnapi->irq_vec, tnapi); 11521 } 11522 goto out_napi_fini; 11523 } 11524 } 11525 11526 tg3_full_lock(tp, 0); 11527 11528 if (init) 11529 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 11530 11531 err = tg3_init_hw(tp, reset_phy); 11532 if (err) { 11533 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11534 tg3_free_rings(tp); 11535 } 11536 11537 tg3_full_unlock(tp); 11538 11539 if (err) 11540 goto out_free_irq; 11541 11542 if (test_irq && tg3_flag(tp, USING_MSI)) { 11543 err = tg3_test_msi(tp); 11544 11545 if (err) { 11546 tg3_full_lock(tp, 0); 11547 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11548 tg3_free_rings(tp); 11549 tg3_full_unlock(tp); 11550 11551 goto out_napi_fini; 11552 } 11553 11554 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { 11555 u32 val = tr32(PCIE_TRANSACTION_CFG); 11556 11557 tw32(PCIE_TRANSACTION_CFG, 11558 val | PCIE_TRANS_CFG_1SHOT_MSI); 11559 } 11560 } 11561 11562 tg3_phy_start(tp); 11563 11564 tg3_hwmon_open(tp); 11565 11566 tg3_full_lock(tp, 0); 11567 11568 tg3_timer_start(tp); 11569 tg3_flag_set(tp, INIT_COMPLETE); 11570 tg3_enable_ints(tp); 11571 11572 tg3_ptp_resume(tp); 11573 11574 tg3_full_unlock(tp); 11575 11576 netif_tx_start_all_queues(dev); 11577 11578 /* 11579 * Reset loopback feature if it was turned on while the device was down 11580 * make sure that it's installed properly now. 11581 */ 11582 if (dev->features & NETIF_F_LOOPBACK) 11583 tg3_set_loopback(dev, dev->features); 11584 11585 return 0; 11586 11587 out_free_irq: 11588 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11589 struct tg3_napi *tnapi = &tp->napi[i]; 11590 free_irq(tnapi->irq_vec, tnapi); 11591 } 11592 11593 out_napi_fini: 11594 tg3_napi_disable(tp); 11595 tg3_napi_fini(tp); 11596 tg3_free_consistent(tp); 11597 11598 out_ints_fini: 11599 tg3_ints_fini(tp); 11600 11601 return err; 11602 } 11603 11604 static void tg3_stop(struct tg3 *tp) 11605 { 11606 int i; 11607 11608 tg3_reset_task_cancel(tp); 11609 tg3_netif_stop(tp); 11610 11611 tg3_timer_stop(tp); 11612 11613 tg3_hwmon_close(tp); 11614 11615 tg3_phy_stop(tp); 11616 11617 tg3_full_lock(tp, 1); 11618 11619 tg3_disable_ints(tp); 11620 11621 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11622 tg3_free_rings(tp); 11623 tg3_flag_clear(tp, INIT_COMPLETE); 11624 11625 tg3_full_unlock(tp); 11626 11627 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11628 struct tg3_napi *tnapi = &tp->napi[i]; 11629 free_irq(tnapi->irq_vec, tnapi); 11630 } 11631 11632 tg3_ints_fini(tp); 11633 11634 tg3_napi_fini(tp); 11635 11636 tg3_free_consistent(tp); 11637 } 11638 11639 static int tg3_open(struct net_device *dev) 11640 { 11641 struct tg3 *tp = netdev_priv(dev); 11642 int err; 11643 11644 if (tp->pcierr_recovery) { 11645 netdev_err(dev, "Failed to open device. PCI error recovery " 11646 "in progress\n"); 11647 return -EAGAIN; 11648 } 11649 11650 if (tp->fw_needed) { 11651 err = tg3_request_firmware(tp); 11652 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 11653 if (err) { 11654 netdev_warn(tp->dev, "EEE capability disabled\n"); 11655 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 11656 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 11657 netdev_warn(tp->dev, "EEE capability restored\n"); 11658 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 11659 } 11660 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 11661 if (err) 11662 return err; 11663 } else if (err) { 11664 netdev_warn(tp->dev, "TSO capability disabled\n"); 11665 tg3_flag_clear(tp, TSO_CAPABLE); 11666 } else if (!tg3_flag(tp, TSO_CAPABLE)) { 11667 netdev_notice(tp->dev, "TSO capability restored\n"); 11668 tg3_flag_set(tp, TSO_CAPABLE); 11669 } 11670 } 11671 11672 tg3_carrier_off(tp); 11673 11674 err = tg3_power_up(tp); 11675 if (err) 11676 return err; 11677 11678 tg3_full_lock(tp, 0); 11679 11680 tg3_disable_ints(tp); 11681 tg3_flag_clear(tp, INIT_COMPLETE); 11682 11683 tg3_full_unlock(tp); 11684 11685 err = tg3_start(tp, 11686 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN), 11687 true, true); 11688 if (err) { 11689 tg3_frob_aux_power(tp, false); 11690 pci_set_power_state(tp->pdev, PCI_D3hot); 11691 } 11692 11693 return err; 11694 } 11695 11696 static int tg3_close(struct net_device *dev) 11697 { 11698 struct tg3 *tp = netdev_priv(dev); 11699 11700 if (tp->pcierr_recovery) { 11701 netdev_err(dev, "Failed to close device. PCI error recovery " 11702 "in progress\n"); 11703 return -EAGAIN; 11704 } 11705 11706 tg3_stop(tp); 11707 11708 /* Clear stats across close / open calls */ 11709 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); 11710 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); 11711 11712 if (pci_device_is_present(tp->pdev)) { 11713 tg3_power_down_prepare(tp); 11714 11715 tg3_carrier_off(tp); 11716 } 11717 return 0; 11718 } 11719 11720 static inline u64 get_stat64(tg3_stat64_t *val) 11721 { 11722 return ((u64)val->high << 32) | ((u64)val->low); 11723 } 11724 11725 static u64 tg3_calc_crc_errors(struct tg3 *tp) 11726 { 11727 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11728 11729 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 11730 (tg3_asic_rev(tp) == ASIC_REV_5700 || 11731 tg3_asic_rev(tp) == ASIC_REV_5701)) { 11732 u32 val; 11733 11734 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { 11735 tg3_writephy(tp, MII_TG3_TEST1, 11736 val | MII_TG3_TEST1_CRC_EN); 11737 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); 11738 } else 11739 val = 0; 11740 11741 tp->phy_crc_errors += val; 11742 11743 return tp->phy_crc_errors; 11744 } 11745 11746 return get_stat64(&hw_stats->rx_fcs_errors); 11747 } 11748 11749 #define ESTAT_ADD(member) \ 11750 estats->member = old_estats->member + \ 11751 get_stat64(&hw_stats->member) 11752 11753 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats) 11754 { 11755 struct tg3_ethtool_stats *old_estats = &tp->estats_prev; 11756 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11757 11758 ESTAT_ADD(rx_octets); 11759 ESTAT_ADD(rx_fragments); 11760 ESTAT_ADD(rx_ucast_packets); 11761 ESTAT_ADD(rx_mcast_packets); 11762 ESTAT_ADD(rx_bcast_packets); 11763 ESTAT_ADD(rx_fcs_errors); 11764 ESTAT_ADD(rx_align_errors); 11765 ESTAT_ADD(rx_xon_pause_rcvd); 11766 ESTAT_ADD(rx_xoff_pause_rcvd); 11767 ESTAT_ADD(rx_mac_ctrl_rcvd); 11768 ESTAT_ADD(rx_xoff_entered); 11769 ESTAT_ADD(rx_frame_too_long_errors); 11770 ESTAT_ADD(rx_jabbers); 11771 ESTAT_ADD(rx_undersize_packets); 11772 ESTAT_ADD(rx_in_length_errors); 11773 ESTAT_ADD(rx_out_length_errors); 11774 ESTAT_ADD(rx_64_or_less_octet_packets); 11775 ESTAT_ADD(rx_65_to_127_octet_packets); 11776 ESTAT_ADD(rx_128_to_255_octet_packets); 11777 ESTAT_ADD(rx_256_to_511_octet_packets); 11778 ESTAT_ADD(rx_512_to_1023_octet_packets); 11779 ESTAT_ADD(rx_1024_to_1522_octet_packets); 11780 ESTAT_ADD(rx_1523_to_2047_octet_packets); 11781 ESTAT_ADD(rx_2048_to_4095_octet_packets); 11782 ESTAT_ADD(rx_4096_to_8191_octet_packets); 11783 ESTAT_ADD(rx_8192_to_9022_octet_packets); 11784 11785 ESTAT_ADD(tx_octets); 11786 ESTAT_ADD(tx_collisions); 11787 ESTAT_ADD(tx_xon_sent); 11788 ESTAT_ADD(tx_xoff_sent); 11789 ESTAT_ADD(tx_flow_control); 11790 ESTAT_ADD(tx_mac_errors); 11791 ESTAT_ADD(tx_single_collisions); 11792 ESTAT_ADD(tx_mult_collisions); 11793 ESTAT_ADD(tx_deferred); 11794 ESTAT_ADD(tx_excessive_collisions); 11795 ESTAT_ADD(tx_late_collisions); 11796 ESTAT_ADD(tx_collide_2times); 11797 ESTAT_ADD(tx_collide_3times); 11798 ESTAT_ADD(tx_collide_4times); 11799 ESTAT_ADD(tx_collide_5times); 11800 ESTAT_ADD(tx_collide_6times); 11801 ESTAT_ADD(tx_collide_7times); 11802 ESTAT_ADD(tx_collide_8times); 11803 ESTAT_ADD(tx_collide_9times); 11804 ESTAT_ADD(tx_collide_10times); 11805 ESTAT_ADD(tx_collide_11times); 11806 ESTAT_ADD(tx_collide_12times); 11807 ESTAT_ADD(tx_collide_13times); 11808 ESTAT_ADD(tx_collide_14times); 11809 ESTAT_ADD(tx_collide_15times); 11810 ESTAT_ADD(tx_ucast_packets); 11811 ESTAT_ADD(tx_mcast_packets); 11812 ESTAT_ADD(tx_bcast_packets); 11813 ESTAT_ADD(tx_carrier_sense_errors); 11814 ESTAT_ADD(tx_discards); 11815 ESTAT_ADD(tx_errors); 11816 11817 ESTAT_ADD(dma_writeq_full); 11818 ESTAT_ADD(dma_write_prioq_full); 11819 ESTAT_ADD(rxbds_empty); 11820 ESTAT_ADD(rx_discards); 11821 ESTAT_ADD(rx_errors); 11822 ESTAT_ADD(rx_threshold_hit); 11823 11824 ESTAT_ADD(dma_readq_full); 11825 ESTAT_ADD(dma_read_prioq_full); 11826 ESTAT_ADD(tx_comp_queue_full); 11827 11828 ESTAT_ADD(ring_set_send_prod_index); 11829 ESTAT_ADD(ring_status_update); 11830 ESTAT_ADD(nic_irqs); 11831 ESTAT_ADD(nic_avoided_irqs); 11832 ESTAT_ADD(nic_tx_threshold_hit); 11833 11834 ESTAT_ADD(mbuf_lwm_thresh_hit); 11835 } 11836 11837 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) 11838 { 11839 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; 11840 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11841 11842 stats->rx_packets = old_stats->rx_packets + 11843 get_stat64(&hw_stats->rx_ucast_packets) + 11844 get_stat64(&hw_stats->rx_mcast_packets) + 11845 get_stat64(&hw_stats->rx_bcast_packets); 11846 11847 stats->tx_packets = old_stats->tx_packets + 11848 get_stat64(&hw_stats->tx_ucast_packets) + 11849 get_stat64(&hw_stats->tx_mcast_packets) + 11850 get_stat64(&hw_stats->tx_bcast_packets); 11851 11852 stats->rx_bytes = old_stats->rx_bytes + 11853 get_stat64(&hw_stats->rx_octets); 11854 stats->tx_bytes = old_stats->tx_bytes + 11855 get_stat64(&hw_stats->tx_octets); 11856 11857 stats->rx_errors = old_stats->rx_errors + 11858 get_stat64(&hw_stats->rx_errors); 11859 stats->tx_errors = old_stats->tx_errors + 11860 get_stat64(&hw_stats->tx_errors) + 11861 get_stat64(&hw_stats->tx_mac_errors) + 11862 get_stat64(&hw_stats->tx_carrier_sense_errors) + 11863 get_stat64(&hw_stats->tx_discards); 11864 11865 stats->multicast = old_stats->multicast + 11866 get_stat64(&hw_stats->rx_mcast_packets); 11867 stats->collisions = old_stats->collisions + 11868 get_stat64(&hw_stats->tx_collisions); 11869 11870 stats->rx_length_errors = old_stats->rx_length_errors + 11871 get_stat64(&hw_stats->rx_frame_too_long_errors) + 11872 get_stat64(&hw_stats->rx_undersize_packets); 11873 11874 stats->rx_frame_errors = old_stats->rx_frame_errors + 11875 get_stat64(&hw_stats->rx_align_errors); 11876 stats->tx_aborted_errors = old_stats->tx_aborted_errors + 11877 get_stat64(&hw_stats->tx_discards); 11878 stats->tx_carrier_errors = old_stats->tx_carrier_errors + 11879 get_stat64(&hw_stats->tx_carrier_sense_errors); 11880 11881 stats->rx_crc_errors = old_stats->rx_crc_errors + 11882 tg3_calc_crc_errors(tp); 11883 11884 stats->rx_missed_errors = old_stats->rx_missed_errors + 11885 get_stat64(&hw_stats->rx_discards); 11886 11887 stats->rx_dropped = tp->rx_dropped; 11888 stats->tx_dropped = tp->tx_dropped; 11889 } 11890 11891 static int tg3_get_regs_len(struct net_device *dev) 11892 { 11893 return TG3_REG_BLK_SIZE; 11894 } 11895 11896 static void tg3_get_regs(struct net_device *dev, 11897 struct ethtool_regs *regs, void *_p) 11898 { 11899 struct tg3 *tp = netdev_priv(dev); 11900 11901 regs->version = 0; 11902 11903 memset(_p, 0, TG3_REG_BLK_SIZE); 11904 11905 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11906 return; 11907 11908 tg3_full_lock(tp, 0); 11909 11910 tg3_dump_legacy_regs(tp, (u32 *)_p); 11911 11912 tg3_full_unlock(tp); 11913 } 11914 11915 static int tg3_get_eeprom_len(struct net_device *dev) 11916 { 11917 struct tg3 *tp = netdev_priv(dev); 11918 11919 return tp->nvram_size; 11920 } 11921 11922 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 11923 { 11924 struct tg3 *tp = netdev_priv(dev); 11925 int ret, cpmu_restore = 0; 11926 u8 *pd; 11927 u32 i, offset, len, b_offset, b_count, cpmu_val = 0; 11928 __be32 val; 11929 11930 if (tg3_flag(tp, NO_NVRAM)) 11931 return -EINVAL; 11932 11933 offset = eeprom->offset; 11934 len = eeprom->len; 11935 eeprom->len = 0; 11936 11937 eeprom->magic = TG3_EEPROM_MAGIC; 11938 11939 /* Override clock, link aware and link idle modes */ 11940 if (tg3_flag(tp, CPMU_PRESENT)) { 11941 cpmu_val = tr32(TG3_CPMU_CTRL); 11942 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE | 11943 CPMU_CTRL_LINK_IDLE_MODE)) { 11944 tw32(TG3_CPMU_CTRL, cpmu_val & 11945 ~(CPMU_CTRL_LINK_AWARE_MODE | 11946 CPMU_CTRL_LINK_IDLE_MODE)); 11947 cpmu_restore = 1; 11948 } 11949 } 11950 tg3_override_clk(tp); 11951 11952 if (offset & 3) { 11953 /* adjustments to start on required 4 byte boundary */ 11954 b_offset = offset & 3; 11955 b_count = 4 - b_offset; 11956 if (b_count > len) { 11957 /* i.e. offset=1 len=2 */ 11958 b_count = len; 11959 } 11960 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); 11961 if (ret) 11962 goto eeprom_done; 11963 memcpy(data, ((char *)&val) + b_offset, b_count); 11964 len -= b_count; 11965 offset += b_count; 11966 eeprom->len += b_count; 11967 } 11968 11969 /* read bytes up to the last 4 byte boundary */ 11970 pd = &data[eeprom->len]; 11971 for (i = 0; i < (len - (len & 3)); i += 4) { 11972 ret = tg3_nvram_read_be32(tp, offset + i, &val); 11973 if (ret) { 11974 if (i) 11975 i -= 4; 11976 eeprom->len += i; 11977 goto eeprom_done; 11978 } 11979 memcpy(pd + i, &val, 4); 11980 if (need_resched()) { 11981 if (signal_pending(current)) { 11982 eeprom->len += i; 11983 ret = -EINTR; 11984 goto eeprom_done; 11985 } 11986 cond_resched(); 11987 } 11988 } 11989 eeprom->len += i; 11990 11991 if (len & 3) { 11992 /* read last bytes not ending on 4 byte boundary */ 11993 pd = &data[eeprom->len]; 11994 b_count = len & 3; 11995 b_offset = offset + len - b_count; 11996 ret = tg3_nvram_read_be32(tp, b_offset, &val); 11997 if (ret) 11998 goto eeprom_done; 11999 memcpy(pd, &val, b_count); 12000 eeprom->len += b_count; 12001 } 12002 ret = 0; 12003 12004 eeprom_done: 12005 /* Restore clock, link aware and link idle modes */ 12006 tg3_restore_clk(tp); 12007 if (cpmu_restore) 12008 tw32(TG3_CPMU_CTRL, cpmu_val); 12009 12010 return ret; 12011 } 12012 12013 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 12014 { 12015 struct tg3 *tp = netdev_priv(dev); 12016 int ret; 12017 u32 offset, len, b_offset, odd_len; 12018 u8 *buf; 12019 __be32 start, end; 12020 12021 if (tg3_flag(tp, NO_NVRAM) || 12022 eeprom->magic != TG3_EEPROM_MAGIC) 12023 return -EINVAL; 12024 12025 offset = eeprom->offset; 12026 len = eeprom->len; 12027 12028 if ((b_offset = (offset & 3))) { 12029 /* adjustments to start on required 4 byte boundary */ 12030 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); 12031 if (ret) 12032 return ret; 12033 len += b_offset; 12034 offset &= ~3; 12035 if (len < 4) 12036 len = 4; 12037 } 12038 12039 odd_len = 0; 12040 if (len & 3) { 12041 /* adjustments to end on required 4 byte boundary */ 12042 odd_len = 1; 12043 len = (len + 3) & ~3; 12044 ret = tg3_nvram_read_be32(tp, offset+len-4, &end); 12045 if (ret) 12046 return ret; 12047 } 12048 12049 buf = data; 12050 if (b_offset || odd_len) { 12051 buf = kmalloc(len, GFP_KERNEL); 12052 if (!buf) 12053 return -ENOMEM; 12054 if (b_offset) 12055 memcpy(buf, &start, 4); 12056 if (odd_len) 12057 memcpy(buf+len-4, &end, 4); 12058 memcpy(buf + b_offset, data, eeprom->len); 12059 } 12060 12061 ret = tg3_nvram_write_block(tp, offset, len, buf); 12062 12063 if (buf != data) 12064 kfree(buf); 12065 12066 return ret; 12067 } 12068 12069 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 12070 { 12071 struct tg3 *tp = netdev_priv(dev); 12072 12073 if (tg3_flag(tp, USE_PHYLIB)) { 12074 struct phy_device *phydev; 12075 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12076 return -EAGAIN; 12077 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12078 return phy_ethtool_gset(phydev, cmd); 12079 } 12080 12081 cmd->supported = (SUPPORTED_Autoneg); 12082 12083 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12084 cmd->supported |= (SUPPORTED_1000baseT_Half | 12085 SUPPORTED_1000baseT_Full); 12086 12087 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12088 cmd->supported |= (SUPPORTED_100baseT_Half | 12089 SUPPORTED_100baseT_Full | 12090 SUPPORTED_10baseT_Half | 12091 SUPPORTED_10baseT_Full | 12092 SUPPORTED_TP); 12093 cmd->port = PORT_TP; 12094 } else { 12095 cmd->supported |= SUPPORTED_FIBRE; 12096 cmd->port = PORT_FIBRE; 12097 } 12098 12099 cmd->advertising = tp->link_config.advertising; 12100 if (tg3_flag(tp, PAUSE_AUTONEG)) { 12101 if (tp->link_config.flowctrl & FLOW_CTRL_RX) { 12102 if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12103 cmd->advertising |= ADVERTISED_Pause; 12104 } else { 12105 cmd->advertising |= ADVERTISED_Pause | 12106 ADVERTISED_Asym_Pause; 12107 } 12108 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12109 cmd->advertising |= ADVERTISED_Asym_Pause; 12110 } 12111 } 12112 if (netif_running(dev) && tp->link_up) { 12113 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed); 12114 cmd->duplex = tp->link_config.active_duplex; 12115 cmd->lp_advertising = tp->link_config.rmt_adv; 12116 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12117 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE) 12118 cmd->eth_tp_mdix = ETH_TP_MDI_X; 12119 else 12120 cmd->eth_tp_mdix = ETH_TP_MDI; 12121 } 12122 } else { 12123 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); 12124 cmd->duplex = DUPLEX_UNKNOWN; 12125 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; 12126 } 12127 cmd->phy_address = tp->phy_addr; 12128 cmd->transceiver = XCVR_INTERNAL; 12129 cmd->autoneg = tp->link_config.autoneg; 12130 cmd->maxtxpkt = 0; 12131 cmd->maxrxpkt = 0; 12132 return 0; 12133 } 12134 12135 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 12136 { 12137 struct tg3 *tp = netdev_priv(dev); 12138 u32 speed = ethtool_cmd_speed(cmd); 12139 12140 if (tg3_flag(tp, USE_PHYLIB)) { 12141 struct phy_device *phydev; 12142 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12143 return -EAGAIN; 12144 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12145 return phy_ethtool_sset(phydev, cmd); 12146 } 12147 12148 if (cmd->autoneg != AUTONEG_ENABLE && 12149 cmd->autoneg != AUTONEG_DISABLE) 12150 return -EINVAL; 12151 12152 if (cmd->autoneg == AUTONEG_DISABLE && 12153 cmd->duplex != DUPLEX_FULL && 12154 cmd->duplex != DUPLEX_HALF) 12155 return -EINVAL; 12156 12157 if (cmd->autoneg == AUTONEG_ENABLE) { 12158 u32 mask = ADVERTISED_Autoneg | 12159 ADVERTISED_Pause | 12160 ADVERTISED_Asym_Pause; 12161 12162 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12163 mask |= ADVERTISED_1000baseT_Half | 12164 ADVERTISED_1000baseT_Full; 12165 12166 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 12167 mask |= ADVERTISED_100baseT_Half | 12168 ADVERTISED_100baseT_Full | 12169 ADVERTISED_10baseT_Half | 12170 ADVERTISED_10baseT_Full | 12171 ADVERTISED_TP; 12172 else 12173 mask |= ADVERTISED_FIBRE; 12174 12175 if (cmd->advertising & ~mask) 12176 return -EINVAL; 12177 12178 mask &= (ADVERTISED_1000baseT_Half | 12179 ADVERTISED_1000baseT_Full | 12180 ADVERTISED_100baseT_Half | 12181 ADVERTISED_100baseT_Full | 12182 ADVERTISED_10baseT_Half | 12183 ADVERTISED_10baseT_Full); 12184 12185 cmd->advertising &= mask; 12186 } else { 12187 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { 12188 if (speed != SPEED_1000) 12189 return -EINVAL; 12190 12191 if (cmd->duplex != DUPLEX_FULL) 12192 return -EINVAL; 12193 } else { 12194 if (speed != SPEED_100 && 12195 speed != SPEED_10) 12196 return -EINVAL; 12197 } 12198 } 12199 12200 tg3_full_lock(tp, 0); 12201 12202 tp->link_config.autoneg = cmd->autoneg; 12203 if (cmd->autoneg == AUTONEG_ENABLE) { 12204 tp->link_config.advertising = (cmd->advertising | 12205 ADVERTISED_Autoneg); 12206 tp->link_config.speed = SPEED_UNKNOWN; 12207 tp->link_config.duplex = DUPLEX_UNKNOWN; 12208 } else { 12209 tp->link_config.advertising = 0; 12210 tp->link_config.speed = speed; 12211 tp->link_config.duplex = cmd->duplex; 12212 } 12213 12214 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12215 12216 tg3_warn_mgmt_link_flap(tp); 12217 12218 if (netif_running(dev)) 12219 tg3_setup_phy(tp, true); 12220 12221 tg3_full_unlock(tp); 12222 12223 return 0; 12224 } 12225 12226 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 12227 { 12228 struct tg3 *tp = netdev_priv(dev); 12229 12230 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 12231 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 12232 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version)); 12233 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info)); 12234 } 12235 12236 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12237 { 12238 struct tg3 *tp = netdev_priv(dev); 12239 12240 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) 12241 wol->supported = WAKE_MAGIC; 12242 else 12243 wol->supported = 0; 12244 wol->wolopts = 0; 12245 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) 12246 wol->wolopts = WAKE_MAGIC; 12247 memset(&wol->sopass, 0, sizeof(wol->sopass)); 12248 } 12249 12250 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12251 { 12252 struct tg3 *tp = netdev_priv(dev); 12253 struct device *dp = &tp->pdev->dev; 12254 12255 if (wol->wolopts & ~WAKE_MAGIC) 12256 return -EINVAL; 12257 if ((wol->wolopts & WAKE_MAGIC) && 12258 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) 12259 return -EINVAL; 12260 12261 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); 12262 12263 if (device_may_wakeup(dp)) 12264 tg3_flag_set(tp, WOL_ENABLE); 12265 else 12266 tg3_flag_clear(tp, WOL_ENABLE); 12267 12268 return 0; 12269 } 12270 12271 static u32 tg3_get_msglevel(struct net_device *dev) 12272 { 12273 struct tg3 *tp = netdev_priv(dev); 12274 return tp->msg_enable; 12275 } 12276 12277 static void tg3_set_msglevel(struct net_device *dev, u32 value) 12278 { 12279 struct tg3 *tp = netdev_priv(dev); 12280 tp->msg_enable = value; 12281 } 12282 12283 static int tg3_nway_reset(struct net_device *dev) 12284 { 12285 struct tg3 *tp = netdev_priv(dev); 12286 int r; 12287 12288 if (!netif_running(dev)) 12289 return -EAGAIN; 12290 12291 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 12292 return -EINVAL; 12293 12294 tg3_warn_mgmt_link_flap(tp); 12295 12296 if (tg3_flag(tp, USE_PHYLIB)) { 12297 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12298 return -EAGAIN; 12299 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 12300 } else { 12301 u32 bmcr; 12302 12303 spin_lock_bh(&tp->lock); 12304 r = -EINVAL; 12305 tg3_readphy(tp, MII_BMCR, &bmcr); 12306 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 12307 ((bmcr & BMCR_ANENABLE) || 12308 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { 12309 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | 12310 BMCR_ANENABLE); 12311 r = 0; 12312 } 12313 spin_unlock_bh(&tp->lock); 12314 } 12315 12316 return r; 12317 } 12318 12319 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 12320 { 12321 struct tg3 *tp = netdev_priv(dev); 12322 12323 ering->rx_max_pending = tp->rx_std_ring_mask; 12324 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12325 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; 12326 else 12327 ering->rx_jumbo_max_pending = 0; 12328 12329 ering->tx_max_pending = TG3_TX_RING_SIZE - 1; 12330 12331 ering->rx_pending = tp->rx_pending; 12332 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12333 ering->rx_jumbo_pending = tp->rx_jumbo_pending; 12334 else 12335 ering->rx_jumbo_pending = 0; 12336 12337 ering->tx_pending = tp->napi[0].tx_pending; 12338 } 12339 12340 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 12341 { 12342 struct tg3 *tp = netdev_priv(dev); 12343 int i, irq_sync = 0, err = 0; 12344 12345 if ((ering->rx_pending > tp->rx_std_ring_mask) || 12346 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || 12347 (ering->tx_pending > TG3_TX_RING_SIZE - 1) || 12348 (ering->tx_pending <= MAX_SKB_FRAGS) || 12349 (tg3_flag(tp, TSO_BUG) && 12350 (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) 12351 return -EINVAL; 12352 12353 if (netif_running(dev)) { 12354 tg3_phy_stop(tp); 12355 tg3_netif_stop(tp); 12356 irq_sync = 1; 12357 } 12358 12359 tg3_full_lock(tp, irq_sync); 12360 12361 tp->rx_pending = ering->rx_pending; 12362 12363 if (tg3_flag(tp, MAX_RXPEND_64) && 12364 tp->rx_pending > 63) 12365 tp->rx_pending = 63; 12366 12367 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12368 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 12369 12370 for (i = 0; i < tp->irq_max; i++) 12371 tp->napi[i].tx_pending = ering->tx_pending; 12372 12373 if (netif_running(dev)) { 12374 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12375 err = tg3_restart_hw(tp, false); 12376 if (!err) 12377 tg3_netif_start(tp); 12378 } 12379 12380 tg3_full_unlock(tp); 12381 12382 if (irq_sync && !err) 12383 tg3_phy_start(tp); 12384 12385 return err; 12386 } 12387 12388 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12389 { 12390 struct tg3 *tp = netdev_priv(dev); 12391 12392 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); 12393 12394 if (tp->link_config.flowctrl & FLOW_CTRL_RX) 12395 epause->rx_pause = 1; 12396 else 12397 epause->rx_pause = 0; 12398 12399 if (tp->link_config.flowctrl & FLOW_CTRL_TX) 12400 epause->tx_pause = 1; 12401 else 12402 epause->tx_pause = 0; 12403 } 12404 12405 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12406 { 12407 struct tg3 *tp = netdev_priv(dev); 12408 int err = 0; 12409 12410 if (tp->link_config.autoneg == AUTONEG_ENABLE) 12411 tg3_warn_mgmt_link_flap(tp); 12412 12413 if (tg3_flag(tp, USE_PHYLIB)) { 12414 u32 newadv; 12415 struct phy_device *phydev; 12416 12417 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12418 12419 if (!(phydev->supported & SUPPORTED_Pause) || 12420 (!(phydev->supported & SUPPORTED_Asym_Pause) && 12421 (epause->rx_pause != epause->tx_pause))) 12422 return -EINVAL; 12423 12424 tp->link_config.flowctrl = 0; 12425 if (epause->rx_pause) { 12426 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12427 12428 if (epause->tx_pause) { 12429 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12430 newadv = ADVERTISED_Pause; 12431 } else 12432 newadv = ADVERTISED_Pause | 12433 ADVERTISED_Asym_Pause; 12434 } else if (epause->tx_pause) { 12435 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12436 newadv = ADVERTISED_Asym_Pause; 12437 } else 12438 newadv = 0; 12439 12440 if (epause->autoneg) 12441 tg3_flag_set(tp, PAUSE_AUTONEG); 12442 else 12443 tg3_flag_clear(tp, PAUSE_AUTONEG); 12444 12445 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 12446 u32 oldadv = phydev->advertising & 12447 (ADVERTISED_Pause | ADVERTISED_Asym_Pause); 12448 if (oldadv != newadv) { 12449 phydev->advertising &= 12450 ~(ADVERTISED_Pause | 12451 ADVERTISED_Asym_Pause); 12452 phydev->advertising |= newadv; 12453 if (phydev->autoneg) { 12454 /* 12455 * Always renegotiate the link to 12456 * inform our link partner of our 12457 * flow control settings, even if the 12458 * flow control is forced. Let 12459 * tg3_adjust_link() do the final 12460 * flow control setup. 12461 */ 12462 return phy_start_aneg(phydev); 12463 } 12464 } 12465 12466 if (!epause->autoneg) 12467 tg3_setup_flow_control(tp, 0, 0); 12468 } else { 12469 tp->link_config.advertising &= 12470 ~(ADVERTISED_Pause | 12471 ADVERTISED_Asym_Pause); 12472 tp->link_config.advertising |= newadv; 12473 } 12474 } else { 12475 int irq_sync = 0; 12476 12477 if (netif_running(dev)) { 12478 tg3_netif_stop(tp); 12479 irq_sync = 1; 12480 } 12481 12482 tg3_full_lock(tp, irq_sync); 12483 12484 if (epause->autoneg) 12485 tg3_flag_set(tp, PAUSE_AUTONEG); 12486 else 12487 tg3_flag_clear(tp, PAUSE_AUTONEG); 12488 if (epause->rx_pause) 12489 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12490 else 12491 tp->link_config.flowctrl &= ~FLOW_CTRL_RX; 12492 if (epause->tx_pause) 12493 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12494 else 12495 tp->link_config.flowctrl &= ~FLOW_CTRL_TX; 12496 12497 if (netif_running(dev)) { 12498 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12499 err = tg3_restart_hw(tp, false); 12500 if (!err) 12501 tg3_netif_start(tp); 12502 } 12503 12504 tg3_full_unlock(tp); 12505 } 12506 12507 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12508 12509 return err; 12510 } 12511 12512 static int tg3_get_sset_count(struct net_device *dev, int sset) 12513 { 12514 switch (sset) { 12515 case ETH_SS_TEST: 12516 return TG3_NUM_TEST; 12517 case ETH_SS_STATS: 12518 return TG3_NUM_STATS; 12519 default: 12520 return -EOPNOTSUPP; 12521 } 12522 } 12523 12524 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 12525 u32 *rules __always_unused) 12526 { 12527 struct tg3 *tp = netdev_priv(dev); 12528 12529 if (!tg3_flag(tp, SUPPORT_MSIX)) 12530 return -EOPNOTSUPP; 12531 12532 switch (info->cmd) { 12533 case ETHTOOL_GRXRINGS: 12534 if (netif_running(tp->dev)) 12535 info->data = tp->rxq_cnt; 12536 else { 12537 info->data = num_online_cpus(); 12538 if (info->data > TG3_RSS_MAX_NUM_QS) 12539 info->data = TG3_RSS_MAX_NUM_QS; 12540 } 12541 12542 /* The first interrupt vector only 12543 * handles link interrupts. 12544 */ 12545 info->data -= 1; 12546 return 0; 12547 12548 default: 12549 return -EOPNOTSUPP; 12550 } 12551 } 12552 12553 static u32 tg3_get_rxfh_indir_size(struct net_device *dev) 12554 { 12555 u32 size = 0; 12556 struct tg3 *tp = netdev_priv(dev); 12557 12558 if (tg3_flag(tp, SUPPORT_MSIX)) 12559 size = TG3_RSS_INDIR_TBL_SIZE; 12560 12561 return size; 12562 } 12563 12564 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) 12565 { 12566 struct tg3 *tp = netdev_priv(dev); 12567 int i; 12568 12569 if (hfunc) 12570 *hfunc = ETH_RSS_HASH_TOP; 12571 if (!indir) 12572 return 0; 12573 12574 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12575 indir[i] = tp->rss_ind_tbl[i]; 12576 12577 return 0; 12578 } 12579 12580 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, 12581 const u8 hfunc) 12582 { 12583 struct tg3 *tp = netdev_priv(dev); 12584 size_t i; 12585 12586 /* We require at least one supported parameter to be changed and no 12587 * change in any of the unsupported parameters 12588 */ 12589 if (key || 12590 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 12591 return -EOPNOTSUPP; 12592 12593 if (!indir) 12594 return 0; 12595 12596 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12597 tp->rss_ind_tbl[i] = indir[i]; 12598 12599 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS)) 12600 return 0; 12601 12602 /* It is legal to write the indirection 12603 * table while the device is running. 12604 */ 12605 tg3_full_lock(tp, 0); 12606 tg3_rss_write_indir_tbl(tp); 12607 tg3_full_unlock(tp); 12608 12609 return 0; 12610 } 12611 12612 static void tg3_get_channels(struct net_device *dev, 12613 struct ethtool_channels *channel) 12614 { 12615 struct tg3 *tp = netdev_priv(dev); 12616 u32 deflt_qs = netif_get_num_default_rss_queues(); 12617 12618 channel->max_rx = tp->rxq_max; 12619 channel->max_tx = tp->txq_max; 12620 12621 if (netif_running(dev)) { 12622 channel->rx_count = tp->rxq_cnt; 12623 channel->tx_count = tp->txq_cnt; 12624 } else { 12625 if (tp->rxq_req) 12626 channel->rx_count = tp->rxq_req; 12627 else 12628 channel->rx_count = min(deflt_qs, tp->rxq_max); 12629 12630 if (tp->txq_req) 12631 channel->tx_count = tp->txq_req; 12632 else 12633 channel->tx_count = min(deflt_qs, tp->txq_max); 12634 } 12635 } 12636 12637 static int tg3_set_channels(struct net_device *dev, 12638 struct ethtool_channels *channel) 12639 { 12640 struct tg3 *tp = netdev_priv(dev); 12641 12642 if (!tg3_flag(tp, SUPPORT_MSIX)) 12643 return -EOPNOTSUPP; 12644 12645 if (channel->rx_count > tp->rxq_max || 12646 channel->tx_count > tp->txq_max) 12647 return -EINVAL; 12648 12649 tp->rxq_req = channel->rx_count; 12650 tp->txq_req = channel->tx_count; 12651 12652 if (!netif_running(dev)) 12653 return 0; 12654 12655 tg3_stop(tp); 12656 12657 tg3_carrier_off(tp); 12658 12659 tg3_start(tp, true, false, false); 12660 12661 return 0; 12662 } 12663 12664 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 12665 { 12666 switch (stringset) { 12667 case ETH_SS_STATS: 12668 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); 12669 break; 12670 case ETH_SS_TEST: 12671 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); 12672 break; 12673 default: 12674 WARN_ON(1); /* we need a WARN() */ 12675 break; 12676 } 12677 } 12678 12679 static int tg3_set_phys_id(struct net_device *dev, 12680 enum ethtool_phys_id_state state) 12681 { 12682 struct tg3 *tp = netdev_priv(dev); 12683 12684 if (!netif_running(tp->dev)) 12685 return -EAGAIN; 12686 12687 switch (state) { 12688 case ETHTOOL_ID_ACTIVE: 12689 return 1; /* cycle on/off once per second */ 12690 12691 case ETHTOOL_ID_ON: 12692 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12693 LED_CTRL_1000MBPS_ON | 12694 LED_CTRL_100MBPS_ON | 12695 LED_CTRL_10MBPS_ON | 12696 LED_CTRL_TRAFFIC_OVERRIDE | 12697 LED_CTRL_TRAFFIC_BLINK | 12698 LED_CTRL_TRAFFIC_LED); 12699 break; 12700 12701 case ETHTOOL_ID_OFF: 12702 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12703 LED_CTRL_TRAFFIC_OVERRIDE); 12704 break; 12705 12706 case ETHTOOL_ID_INACTIVE: 12707 tw32(MAC_LED_CTRL, tp->led_ctrl); 12708 break; 12709 } 12710 12711 return 0; 12712 } 12713 12714 static void tg3_get_ethtool_stats(struct net_device *dev, 12715 struct ethtool_stats *estats, u64 *tmp_stats) 12716 { 12717 struct tg3 *tp = netdev_priv(dev); 12718 12719 if (tp->hw_stats) 12720 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats); 12721 else 12722 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats)); 12723 } 12724 12725 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen) 12726 { 12727 int i; 12728 __be32 *buf; 12729 u32 offset = 0, len = 0; 12730 u32 magic, val; 12731 12732 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) 12733 return NULL; 12734 12735 if (magic == TG3_EEPROM_MAGIC) { 12736 for (offset = TG3_NVM_DIR_START; 12737 offset < TG3_NVM_DIR_END; 12738 offset += TG3_NVM_DIRENT_SIZE) { 12739 if (tg3_nvram_read(tp, offset, &val)) 12740 return NULL; 12741 12742 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == 12743 TG3_NVM_DIRTYPE_EXTVPD) 12744 break; 12745 } 12746 12747 if (offset != TG3_NVM_DIR_END) { 12748 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; 12749 if (tg3_nvram_read(tp, offset + 4, &offset)) 12750 return NULL; 12751 12752 offset = tg3_nvram_logical_addr(tp, offset); 12753 } 12754 } 12755 12756 if (!offset || !len) { 12757 offset = TG3_NVM_VPD_OFF; 12758 len = TG3_NVM_VPD_LEN; 12759 } 12760 12761 buf = kmalloc(len, GFP_KERNEL); 12762 if (buf == NULL) 12763 return NULL; 12764 12765 if (magic == TG3_EEPROM_MAGIC) { 12766 for (i = 0; i < len; i += 4) { 12767 /* The data is in little-endian format in NVRAM. 12768 * Use the big-endian read routines to preserve 12769 * the byte order as it exists in NVRAM. 12770 */ 12771 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) 12772 goto error; 12773 } 12774 } else { 12775 u8 *ptr; 12776 ssize_t cnt; 12777 unsigned int pos = 0; 12778 12779 ptr = (u8 *)&buf[0]; 12780 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) { 12781 cnt = pci_read_vpd(tp->pdev, pos, 12782 len - pos, ptr); 12783 if (cnt == -ETIMEDOUT || cnt == -EINTR) 12784 cnt = 0; 12785 else if (cnt < 0) 12786 goto error; 12787 } 12788 if (pos != len) 12789 goto error; 12790 } 12791 12792 *vpdlen = len; 12793 12794 return buf; 12795 12796 error: 12797 kfree(buf); 12798 return NULL; 12799 } 12800 12801 #define NVRAM_TEST_SIZE 0x100 12802 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 12803 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 12804 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c 12805 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 12806 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 12807 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 12808 #define NVRAM_SELFBOOT_HW_SIZE 0x20 12809 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c 12810 12811 static int tg3_test_nvram(struct tg3 *tp) 12812 { 12813 u32 csum, magic, len; 12814 __be32 *buf; 12815 int i, j, k, err = 0, size; 12816 12817 if (tg3_flag(tp, NO_NVRAM)) 12818 return 0; 12819 12820 if (tg3_nvram_read(tp, 0, &magic) != 0) 12821 return -EIO; 12822 12823 if (magic == TG3_EEPROM_MAGIC) 12824 size = NVRAM_TEST_SIZE; 12825 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { 12826 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == 12827 TG3_EEPROM_SB_FORMAT_1) { 12828 switch (magic & TG3_EEPROM_SB_REVISION_MASK) { 12829 case TG3_EEPROM_SB_REVISION_0: 12830 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; 12831 break; 12832 case TG3_EEPROM_SB_REVISION_2: 12833 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; 12834 break; 12835 case TG3_EEPROM_SB_REVISION_3: 12836 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; 12837 break; 12838 case TG3_EEPROM_SB_REVISION_4: 12839 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; 12840 break; 12841 case TG3_EEPROM_SB_REVISION_5: 12842 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; 12843 break; 12844 case TG3_EEPROM_SB_REVISION_6: 12845 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; 12846 break; 12847 default: 12848 return -EIO; 12849 } 12850 } else 12851 return 0; 12852 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 12853 size = NVRAM_SELFBOOT_HW_SIZE; 12854 else 12855 return -EIO; 12856 12857 buf = kmalloc(size, GFP_KERNEL); 12858 if (buf == NULL) 12859 return -ENOMEM; 12860 12861 err = -EIO; 12862 for (i = 0, j = 0; i < size; i += 4, j++) { 12863 err = tg3_nvram_read_be32(tp, i, &buf[j]); 12864 if (err) 12865 break; 12866 } 12867 if (i < size) 12868 goto out; 12869 12870 /* Selfboot format */ 12871 magic = be32_to_cpu(buf[0]); 12872 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == 12873 TG3_EEPROM_MAGIC_FW) { 12874 u8 *buf8 = (u8 *) buf, csum8 = 0; 12875 12876 if ((magic & TG3_EEPROM_SB_REVISION_MASK) == 12877 TG3_EEPROM_SB_REVISION_2) { 12878 /* For rev 2, the csum doesn't include the MBA. */ 12879 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) 12880 csum8 += buf8[i]; 12881 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) 12882 csum8 += buf8[i]; 12883 } else { 12884 for (i = 0; i < size; i++) 12885 csum8 += buf8[i]; 12886 } 12887 12888 if (csum8 == 0) { 12889 err = 0; 12890 goto out; 12891 } 12892 12893 err = -EIO; 12894 goto out; 12895 } 12896 12897 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == 12898 TG3_EEPROM_MAGIC_HW) { 12899 u8 data[NVRAM_SELFBOOT_DATA_SIZE]; 12900 u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; 12901 u8 *buf8 = (u8 *) buf; 12902 12903 /* Separate the parity bits and the data bytes. */ 12904 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { 12905 if ((i == 0) || (i == 8)) { 12906 int l; 12907 u8 msk; 12908 12909 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) 12910 parity[k++] = buf8[i] & msk; 12911 i++; 12912 } else if (i == 16) { 12913 int l; 12914 u8 msk; 12915 12916 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) 12917 parity[k++] = buf8[i] & msk; 12918 i++; 12919 12920 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) 12921 parity[k++] = buf8[i] & msk; 12922 i++; 12923 } 12924 data[j++] = buf8[i]; 12925 } 12926 12927 err = -EIO; 12928 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { 12929 u8 hw8 = hweight8(data[i]); 12930 12931 if ((hw8 & 0x1) && parity[i]) 12932 goto out; 12933 else if (!(hw8 & 0x1) && !parity[i]) 12934 goto out; 12935 } 12936 err = 0; 12937 goto out; 12938 } 12939 12940 err = -EIO; 12941 12942 /* Bootstrap checksum at offset 0x10 */ 12943 csum = calc_crc((unsigned char *) buf, 0x10); 12944 if (csum != le32_to_cpu(buf[0x10/4])) 12945 goto out; 12946 12947 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ 12948 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); 12949 if (csum != le32_to_cpu(buf[0xfc/4])) 12950 goto out; 12951 12952 kfree(buf); 12953 12954 buf = tg3_vpd_readblock(tp, &len); 12955 if (!buf) 12956 return -ENOMEM; 12957 12958 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA); 12959 if (i > 0) { 12960 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]); 12961 if (j < 0) 12962 goto out; 12963 12964 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len) 12965 goto out; 12966 12967 i += PCI_VPD_LRDT_TAG_SIZE; 12968 j = pci_vpd_find_info_keyword((u8 *)buf, i, j, 12969 PCI_VPD_RO_KEYWORD_CHKSUM); 12970 if (j > 0) { 12971 u8 csum8 = 0; 12972 12973 j += PCI_VPD_INFO_FLD_HDR_SIZE; 12974 12975 for (i = 0; i <= j; i++) 12976 csum8 += ((u8 *)buf)[i]; 12977 12978 if (csum8) 12979 goto out; 12980 } 12981 } 12982 12983 err = 0; 12984 12985 out: 12986 kfree(buf); 12987 return err; 12988 } 12989 12990 #define TG3_SERDES_TIMEOUT_SEC 2 12991 #define TG3_COPPER_TIMEOUT_SEC 6 12992 12993 static int tg3_test_link(struct tg3 *tp) 12994 { 12995 int i, max; 12996 12997 if (!netif_running(tp->dev)) 12998 return -ENODEV; 12999 13000 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 13001 max = TG3_SERDES_TIMEOUT_SEC; 13002 else 13003 max = TG3_COPPER_TIMEOUT_SEC; 13004 13005 for (i = 0; i < max; i++) { 13006 if (tp->link_up) 13007 return 0; 13008 13009 if (msleep_interruptible(1000)) 13010 break; 13011 } 13012 13013 return -EIO; 13014 } 13015 13016 /* Only test the commonly used registers */ 13017 static int tg3_test_registers(struct tg3 *tp) 13018 { 13019 int i, is_5705, is_5750; 13020 u32 offset, read_mask, write_mask, val, save_val, read_val; 13021 static struct { 13022 u16 offset; 13023 u16 flags; 13024 #define TG3_FL_5705 0x1 13025 #define TG3_FL_NOT_5705 0x2 13026 #define TG3_FL_NOT_5788 0x4 13027 #define TG3_FL_NOT_5750 0x8 13028 u32 read_mask; 13029 u32 write_mask; 13030 } reg_tbl[] = { 13031 /* MAC Control Registers */ 13032 { MAC_MODE, TG3_FL_NOT_5705, 13033 0x00000000, 0x00ef6f8c }, 13034 { MAC_MODE, TG3_FL_5705, 13035 0x00000000, 0x01ef6b8c }, 13036 { MAC_STATUS, TG3_FL_NOT_5705, 13037 0x03800107, 0x00000000 }, 13038 { MAC_STATUS, TG3_FL_5705, 13039 0x03800100, 0x00000000 }, 13040 { MAC_ADDR_0_HIGH, 0x0000, 13041 0x00000000, 0x0000ffff }, 13042 { MAC_ADDR_0_LOW, 0x0000, 13043 0x00000000, 0xffffffff }, 13044 { MAC_RX_MTU_SIZE, 0x0000, 13045 0x00000000, 0x0000ffff }, 13046 { MAC_TX_MODE, 0x0000, 13047 0x00000000, 0x00000070 }, 13048 { MAC_TX_LENGTHS, 0x0000, 13049 0x00000000, 0x00003fff }, 13050 { MAC_RX_MODE, TG3_FL_NOT_5705, 13051 0x00000000, 0x000007fc }, 13052 { MAC_RX_MODE, TG3_FL_5705, 13053 0x00000000, 0x000007dc }, 13054 { MAC_HASH_REG_0, 0x0000, 13055 0x00000000, 0xffffffff }, 13056 { MAC_HASH_REG_1, 0x0000, 13057 0x00000000, 0xffffffff }, 13058 { MAC_HASH_REG_2, 0x0000, 13059 0x00000000, 0xffffffff }, 13060 { MAC_HASH_REG_3, 0x0000, 13061 0x00000000, 0xffffffff }, 13062 13063 /* Receive Data and Receive BD Initiator Control Registers. */ 13064 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, 13065 0x00000000, 0xffffffff }, 13066 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, 13067 0x00000000, 0xffffffff }, 13068 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, 13069 0x00000000, 0x00000003 }, 13070 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, 13071 0x00000000, 0xffffffff }, 13072 { RCVDBDI_STD_BD+0, 0x0000, 13073 0x00000000, 0xffffffff }, 13074 { RCVDBDI_STD_BD+4, 0x0000, 13075 0x00000000, 0xffffffff }, 13076 { RCVDBDI_STD_BD+8, 0x0000, 13077 0x00000000, 0xffff0002 }, 13078 { RCVDBDI_STD_BD+0xc, 0x0000, 13079 0x00000000, 0xffffffff }, 13080 13081 /* Receive BD Initiator Control Registers. */ 13082 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, 13083 0x00000000, 0xffffffff }, 13084 { RCVBDI_STD_THRESH, TG3_FL_5705, 13085 0x00000000, 0x000003ff }, 13086 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, 13087 0x00000000, 0xffffffff }, 13088 13089 /* Host Coalescing Control Registers. */ 13090 { HOSTCC_MODE, TG3_FL_NOT_5705, 13091 0x00000000, 0x00000004 }, 13092 { HOSTCC_MODE, TG3_FL_5705, 13093 0x00000000, 0x000000f6 }, 13094 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, 13095 0x00000000, 0xffffffff }, 13096 { HOSTCC_RXCOL_TICKS, TG3_FL_5705, 13097 0x00000000, 0x000003ff }, 13098 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, 13099 0x00000000, 0xffffffff }, 13100 { HOSTCC_TXCOL_TICKS, TG3_FL_5705, 13101 0x00000000, 0x000003ff }, 13102 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, 13103 0x00000000, 0xffffffff }, 13104 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13105 0x00000000, 0x000000ff }, 13106 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, 13107 0x00000000, 0xffffffff }, 13108 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13109 0x00000000, 0x000000ff }, 13110 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, 13111 0x00000000, 0xffffffff }, 13112 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, 13113 0x00000000, 0xffffffff }, 13114 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13115 0x00000000, 0xffffffff }, 13116 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13117 0x00000000, 0x000000ff }, 13118 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13119 0x00000000, 0xffffffff }, 13120 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13121 0x00000000, 0x000000ff }, 13122 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, 13123 0x00000000, 0xffffffff }, 13124 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, 13125 0x00000000, 0xffffffff }, 13126 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, 13127 0x00000000, 0xffffffff }, 13128 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, 13129 0x00000000, 0xffffffff }, 13130 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, 13131 0x00000000, 0xffffffff }, 13132 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, 13133 0xffffffff, 0x00000000 }, 13134 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, 13135 0xffffffff, 0x00000000 }, 13136 13137 /* Buffer Manager Control Registers. */ 13138 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, 13139 0x00000000, 0x007fff80 }, 13140 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, 13141 0x00000000, 0x007fffff }, 13142 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, 13143 0x00000000, 0x0000003f }, 13144 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, 13145 0x00000000, 0x000001ff }, 13146 { BUFMGR_MB_HIGH_WATER, 0x0000, 13147 0x00000000, 0x000001ff }, 13148 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, 13149 0xffffffff, 0x00000000 }, 13150 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, 13151 0xffffffff, 0x00000000 }, 13152 13153 /* Mailbox Registers */ 13154 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, 13155 0x00000000, 0x000001ff }, 13156 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, 13157 0x00000000, 0x000001ff }, 13158 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, 13159 0x00000000, 0x000007ff }, 13160 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, 13161 0x00000000, 0x000001ff }, 13162 13163 { 0xffff, 0x0000, 0x00000000, 0x00000000 }, 13164 }; 13165 13166 is_5705 = is_5750 = 0; 13167 if (tg3_flag(tp, 5705_PLUS)) { 13168 is_5705 = 1; 13169 if (tg3_flag(tp, 5750_PLUS)) 13170 is_5750 = 1; 13171 } 13172 13173 for (i = 0; reg_tbl[i].offset != 0xffff; i++) { 13174 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) 13175 continue; 13176 13177 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) 13178 continue; 13179 13180 if (tg3_flag(tp, IS_5788) && 13181 (reg_tbl[i].flags & TG3_FL_NOT_5788)) 13182 continue; 13183 13184 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) 13185 continue; 13186 13187 offset = (u32) reg_tbl[i].offset; 13188 read_mask = reg_tbl[i].read_mask; 13189 write_mask = reg_tbl[i].write_mask; 13190 13191 /* Save the original register content */ 13192 save_val = tr32(offset); 13193 13194 /* Determine the read-only value. */ 13195 read_val = save_val & read_mask; 13196 13197 /* Write zero to the register, then make sure the read-only bits 13198 * are not changed and the read/write bits are all zeros. 13199 */ 13200 tw32(offset, 0); 13201 13202 val = tr32(offset); 13203 13204 /* Test the read-only and read/write bits. */ 13205 if (((val & read_mask) != read_val) || (val & write_mask)) 13206 goto out; 13207 13208 /* Write ones to all the bits defined by RdMask and WrMask, then 13209 * make sure the read-only bits are not changed and the 13210 * read/write bits are all ones. 13211 */ 13212 tw32(offset, read_mask | write_mask); 13213 13214 val = tr32(offset); 13215 13216 /* Test the read-only bits. */ 13217 if ((val & read_mask) != read_val) 13218 goto out; 13219 13220 /* Test the read/write bits. */ 13221 if ((val & write_mask) != write_mask) 13222 goto out; 13223 13224 tw32(offset, save_val); 13225 } 13226 13227 return 0; 13228 13229 out: 13230 if (netif_msg_hw(tp)) 13231 netdev_err(tp->dev, 13232 "Register test failed at offset %x\n", offset); 13233 tw32(offset, save_val); 13234 return -EIO; 13235 } 13236 13237 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) 13238 { 13239 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; 13240 int i; 13241 u32 j; 13242 13243 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { 13244 for (j = 0; j < len; j += 4) { 13245 u32 val; 13246 13247 tg3_write_mem(tp, offset + j, test_pattern[i]); 13248 tg3_read_mem(tp, offset + j, &val); 13249 if (val != test_pattern[i]) 13250 return -EIO; 13251 } 13252 } 13253 return 0; 13254 } 13255 13256 static int tg3_test_memory(struct tg3 *tp) 13257 { 13258 static struct mem_entry { 13259 u32 offset; 13260 u32 len; 13261 } mem_tbl_570x[] = { 13262 { 0x00000000, 0x00b50}, 13263 { 0x00002000, 0x1c000}, 13264 { 0xffffffff, 0x00000} 13265 }, mem_tbl_5705[] = { 13266 { 0x00000100, 0x0000c}, 13267 { 0x00000200, 0x00008}, 13268 { 0x00004000, 0x00800}, 13269 { 0x00006000, 0x01000}, 13270 { 0x00008000, 0x02000}, 13271 { 0x00010000, 0x0e000}, 13272 { 0xffffffff, 0x00000} 13273 }, mem_tbl_5755[] = { 13274 { 0x00000200, 0x00008}, 13275 { 0x00004000, 0x00800}, 13276 { 0x00006000, 0x00800}, 13277 { 0x00008000, 0x02000}, 13278 { 0x00010000, 0x0c000}, 13279 { 0xffffffff, 0x00000} 13280 }, mem_tbl_5906[] = { 13281 { 0x00000200, 0x00008}, 13282 { 0x00004000, 0x00400}, 13283 { 0x00006000, 0x00400}, 13284 { 0x00008000, 0x01000}, 13285 { 0x00010000, 0x01000}, 13286 { 0xffffffff, 0x00000} 13287 }, mem_tbl_5717[] = { 13288 { 0x00000200, 0x00008}, 13289 { 0x00010000, 0x0a000}, 13290 { 0x00020000, 0x13c00}, 13291 { 0xffffffff, 0x00000} 13292 }, mem_tbl_57765[] = { 13293 { 0x00000200, 0x00008}, 13294 { 0x00004000, 0x00800}, 13295 { 0x00006000, 0x09800}, 13296 { 0x00010000, 0x0a000}, 13297 { 0xffffffff, 0x00000} 13298 }; 13299 struct mem_entry *mem_tbl; 13300 int err = 0; 13301 int i; 13302 13303 if (tg3_flag(tp, 5717_PLUS)) 13304 mem_tbl = mem_tbl_5717; 13305 else if (tg3_flag(tp, 57765_CLASS) || 13306 tg3_asic_rev(tp) == ASIC_REV_5762) 13307 mem_tbl = mem_tbl_57765; 13308 else if (tg3_flag(tp, 5755_PLUS)) 13309 mem_tbl = mem_tbl_5755; 13310 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 13311 mem_tbl = mem_tbl_5906; 13312 else if (tg3_flag(tp, 5705_PLUS)) 13313 mem_tbl = mem_tbl_5705; 13314 else 13315 mem_tbl = mem_tbl_570x; 13316 13317 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { 13318 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); 13319 if (err) 13320 break; 13321 } 13322 13323 return err; 13324 } 13325 13326 #define TG3_TSO_MSS 500 13327 13328 #define TG3_TSO_IP_HDR_LEN 20 13329 #define TG3_TSO_TCP_HDR_LEN 20 13330 #define TG3_TSO_TCP_OPT_LEN 12 13331 13332 static const u8 tg3_tso_header[] = { 13333 0x08, 0x00, 13334 0x45, 0x00, 0x00, 0x00, 13335 0x00, 0x00, 0x40, 0x00, 13336 0x40, 0x06, 0x00, 0x00, 13337 0x0a, 0x00, 0x00, 0x01, 13338 0x0a, 0x00, 0x00, 0x02, 13339 0x0d, 0x00, 0xe0, 0x00, 13340 0x00, 0x00, 0x01, 0x00, 13341 0x00, 0x00, 0x02, 0x00, 13342 0x80, 0x10, 0x10, 0x00, 13343 0x14, 0x09, 0x00, 0x00, 13344 0x01, 0x01, 0x08, 0x0a, 13345 0x11, 0x11, 0x11, 0x11, 13346 0x11, 0x11, 0x11, 0x11, 13347 }; 13348 13349 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) 13350 { 13351 u32 rx_start_idx, rx_idx, tx_idx, opaque_key; 13352 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; 13353 u32 budget; 13354 struct sk_buff *skb; 13355 u8 *tx_data, *rx_data; 13356 dma_addr_t map; 13357 int num_pkts, tx_len, rx_len, i, err; 13358 struct tg3_rx_buffer_desc *desc; 13359 struct tg3_napi *tnapi, *rnapi; 13360 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 13361 13362 tnapi = &tp->napi[0]; 13363 rnapi = &tp->napi[0]; 13364 if (tp->irq_cnt > 1) { 13365 if (tg3_flag(tp, ENABLE_RSS)) 13366 rnapi = &tp->napi[1]; 13367 if (tg3_flag(tp, ENABLE_TSS)) 13368 tnapi = &tp->napi[1]; 13369 } 13370 coal_now = tnapi->coal_now | rnapi->coal_now; 13371 13372 err = -EIO; 13373 13374 tx_len = pktsz; 13375 skb = netdev_alloc_skb(tp->dev, tx_len); 13376 if (!skb) 13377 return -ENOMEM; 13378 13379 tx_data = skb_put(skb, tx_len); 13380 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN); 13381 memset(tx_data + ETH_ALEN, 0x0, 8); 13382 13383 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); 13384 13385 if (tso_loopback) { 13386 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; 13387 13388 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + 13389 TG3_TSO_TCP_OPT_LEN; 13390 13391 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, 13392 sizeof(tg3_tso_header)); 13393 mss = TG3_TSO_MSS; 13394 13395 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); 13396 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); 13397 13398 /* Set the total length field in the IP header */ 13399 iph->tot_len = htons((u16)(mss + hdr_len)); 13400 13401 base_flags = (TXD_FLAG_CPU_PRE_DMA | 13402 TXD_FLAG_CPU_POST_DMA); 13403 13404 if (tg3_flag(tp, HW_TSO_1) || 13405 tg3_flag(tp, HW_TSO_2) || 13406 tg3_flag(tp, HW_TSO_3)) { 13407 struct tcphdr *th; 13408 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; 13409 th = (struct tcphdr *)&tx_data[val]; 13410 th->check = 0; 13411 } else 13412 base_flags |= TXD_FLAG_TCPUDP_CSUM; 13413 13414 if (tg3_flag(tp, HW_TSO_3)) { 13415 mss |= (hdr_len & 0xc) << 12; 13416 if (hdr_len & 0x10) 13417 base_flags |= 0x00000010; 13418 base_flags |= (hdr_len & 0x3e0) << 5; 13419 } else if (tg3_flag(tp, HW_TSO_2)) 13420 mss |= hdr_len << 9; 13421 else if (tg3_flag(tp, HW_TSO_1) || 13422 tg3_asic_rev(tp) == ASIC_REV_5705) { 13423 mss |= (TG3_TSO_TCP_OPT_LEN << 9); 13424 } else { 13425 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); 13426 } 13427 13428 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); 13429 } else { 13430 num_pkts = 1; 13431 data_off = ETH_HLEN; 13432 13433 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 13434 tx_len > VLAN_ETH_FRAME_LEN) 13435 base_flags |= TXD_FLAG_JMB_PKT; 13436 } 13437 13438 for (i = data_off; i < tx_len; i++) 13439 tx_data[i] = (u8) (i & 0xff); 13440 13441 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); 13442 if (pci_dma_mapping_error(tp->pdev, map)) { 13443 dev_kfree_skb(skb); 13444 return -EIO; 13445 } 13446 13447 val = tnapi->tx_prod; 13448 tnapi->tx_buffers[val].skb = skb; 13449 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); 13450 13451 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13452 rnapi->coal_now); 13453 13454 udelay(10); 13455 13456 rx_start_idx = rnapi->hw_status->idx[0].rx_producer; 13457 13458 budget = tg3_tx_avail(tnapi); 13459 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, 13460 base_flags | TXD_FLAG_END, mss, 0)) { 13461 tnapi->tx_buffers[val].skb = NULL; 13462 dev_kfree_skb(skb); 13463 return -EIO; 13464 } 13465 13466 tnapi->tx_prod++; 13467 13468 /* Sync BD data before updating mailbox */ 13469 wmb(); 13470 13471 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); 13472 tr32_mailbox(tnapi->prodmbox); 13473 13474 udelay(10); 13475 13476 /* 350 usec to allow enough time on some 10/100 Mbps devices. */ 13477 for (i = 0; i < 35; i++) { 13478 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13479 coal_now); 13480 13481 udelay(10); 13482 13483 tx_idx = tnapi->hw_status->idx[0].tx_consumer; 13484 rx_idx = rnapi->hw_status->idx[0].rx_producer; 13485 if ((tx_idx == tnapi->tx_prod) && 13486 (rx_idx == (rx_start_idx + num_pkts))) 13487 break; 13488 } 13489 13490 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1); 13491 dev_kfree_skb(skb); 13492 13493 if (tx_idx != tnapi->tx_prod) 13494 goto out; 13495 13496 if (rx_idx != rx_start_idx + num_pkts) 13497 goto out; 13498 13499 val = data_off; 13500 while (rx_idx != rx_start_idx) { 13501 desc = &rnapi->rx_rcb[rx_start_idx++]; 13502 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 13503 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 13504 13505 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 13506 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) 13507 goto out; 13508 13509 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) 13510 - ETH_FCS_LEN; 13511 13512 if (!tso_loopback) { 13513 if (rx_len != tx_len) 13514 goto out; 13515 13516 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { 13517 if (opaque_key != RXD_OPAQUE_RING_STD) 13518 goto out; 13519 } else { 13520 if (opaque_key != RXD_OPAQUE_RING_JUMBO) 13521 goto out; 13522 } 13523 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 13524 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 13525 >> RXD_TCPCSUM_SHIFT != 0xffff) { 13526 goto out; 13527 } 13528 13529 if (opaque_key == RXD_OPAQUE_RING_STD) { 13530 rx_data = tpr->rx_std_buffers[desc_idx].data; 13531 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], 13532 mapping); 13533 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 13534 rx_data = tpr->rx_jmb_buffers[desc_idx].data; 13535 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], 13536 mapping); 13537 } else 13538 goto out; 13539 13540 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, 13541 PCI_DMA_FROMDEVICE); 13542 13543 rx_data += TG3_RX_OFFSET(tp); 13544 for (i = data_off; i < rx_len; i++, val++) { 13545 if (*(rx_data + i) != (u8) (val & 0xff)) 13546 goto out; 13547 } 13548 } 13549 13550 err = 0; 13551 13552 /* tg3_free_rings will unmap and free the rx_data */ 13553 out: 13554 return err; 13555 } 13556 13557 #define TG3_STD_LOOPBACK_FAILED 1 13558 #define TG3_JMB_LOOPBACK_FAILED 2 13559 #define TG3_TSO_LOOPBACK_FAILED 4 13560 #define TG3_LOOPBACK_FAILED \ 13561 (TG3_STD_LOOPBACK_FAILED | \ 13562 TG3_JMB_LOOPBACK_FAILED | \ 13563 TG3_TSO_LOOPBACK_FAILED) 13564 13565 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk) 13566 { 13567 int err = -EIO; 13568 u32 eee_cap; 13569 u32 jmb_pkt_sz = 9000; 13570 13571 if (tp->dma_limit) 13572 jmb_pkt_sz = tp->dma_limit - ETH_HLEN; 13573 13574 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; 13575 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 13576 13577 if (!netif_running(tp->dev)) { 13578 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13579 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13580 if (do_extlpbk) 13581 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13582 goto done; 13583 } 13584 13585 err = tg3_reset_hw(tp, true); 13586 if (err) { 13587 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13588 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13589 if (do_extlpbk) 13590 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13591 goto done; 13592 } 13593 13594 if (tg3_flag(tp, ENABLE_RSS)) { 13595 int i; 13596 13597 /* Reroute all rx packets to the 1st queue */ 13598 for (i = MAC_RSS_INDIR_TBL_0; 13599 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) 13600 tw32(i, 0x0); 13601 } 13602 13603 /* HW errata - mac loopback fails in some cases on 5780. 13604 * Normal traffic and PHY loopback are not affected by 13605 * errata. Also, the MAC loopback test is deprecated for 13606 * all newer ASIC revisions. 13607 */ 13608 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 13609 !tg3_flag(tp, CPMU_PRESENT)) { 13610 tg3_mac_loopback(tp, true); 13611 13612 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13613 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13614 13615 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13616 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13617 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13618 13619 tg3_mac_loopback(tp, false); 13620 } 13621 13622 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 13623 !tg3_flag(tp, USE_PHYLIB)) { 13624 int i; 13625 13626 tg3_phy_lpbk_set(tp, 0, false); 13627 13628 /* Wait for link */ 13629 for (i = 0; i < 100; i++) { 13630 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 13631 break; 13632 mdelay(1); 13633 } 13634 13635 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13636 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13637 if (tg3_flag(tp, TSO_CAPABLE) && 13638 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13639 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED; 13640 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13641 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13642 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13643 13644 if (do_extlpbk) { 13645 tg3_phy_lpbk_set(tp, 0, true); 13646 13647 /* All link indications report up, but the hardware 13648 * isn't really ready for about 20 msec. Double it 13649 * to be sure. 13650 */ 13651 mdelay(40); 13652 13653 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13654 data[TG3_EXT_LOOPB_TEST] |= 13655 TG3_STD_LOOPBACK_FAILED; 13656 if (tg3_flag(tp, TSO_CAPABLE) && 13657 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13658 data[TG3_EXT_LOOPB_TEST] |= 13659 TG3_TSO_LOOPBACK_FAILED; 13660 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13661 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13662 data[TG3_EXT_LOOPB_TEST] |= 13663 TG3_JMB_LOOPBACK_FAILED; 13664 } 13665 13666 /* Re-enable gphy autopowerdown. */ 13667 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 13668 tg3_phy_toggle_apd(tp, true); 13669 } 13670 13671 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] | 13672 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0; 13673 13674 done: 13675 tp->phy_flags |= eee_cap; 13676 13677 return err; 13678 } 13679 13680 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, 13681 u64 *data) 13682 { 13683 struct tg3 *tp = netdev_priv(dev); 13684 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; 13685 13686 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 13687 if (tg3_power_up(tp)) { 13688 etest->flags |= ETH_TEST_FL_FAILED; 13689 memset(data, 1, sizeof(u64) * TG3_NUM_TEST); 13690 return; 13691 } 13692 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 13693 } 13694 13695 memset(data, 0, sizeof(u64) * TG3_NUM_TEST); 13696 13697 if (tg3_test_nvram(tp) != 0) { 13698 etest->flags |= ETH_TEST_FL_FAILED; 13699 data[TG3_NVRAM_TEST] = 1; 13700 } 13701 if (!doextlpbk && tg3_test_link(tp)) { 13702 etest->flags |= ETH_TEST_FL_FAILED; 13703 data[TG3_LINK_TEST] = 1; 13704 } 13705 if (etest->flags & ETH_TEST_FL_OFFLINE) { 13706 int err, err2 = 0, irq_sync = 0; 13707 13708 if (netif_running(dev)) { 13709 tg3_phy_stop(tp); 13710 tg3_netif_stop(tp); 13711 irq_sync = 1; 13712 } 13713 13714 tg3_full_lock(tp, irq_sync); 13715 tg3_halt(tp, RESET_KIND_SUSPEND, 1); 13716 err = tg3_nvram_lock(tp); 13717 tg3_halt_cpu(tp, RX_CPU_BASE); 13718 if (!tg3_flag(tp, 5705_PLUS)) 13719 tg3_halt_cpu(tp, TX_CPU_BASE); 13720 if (!err) 13721 tg3_nvram_unlock(tp); 13722 13723 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 13724 tg3_phy_reset(tp); 13725 13726 if (tg3_test_registers(tp) != 0) { 13727 etest->flags |= ETH_TEST_FL_FAILED; 13728 data[TG3_REGISTER_TEST] = 1; 13729 } 13730 13731 if (tg3_test_memory(tp) != 0) { 13732 etest->flags |= ETH_TEST_FL_FAILED; 13733 data[TG3_MEMORY_TEST] = 1; 13734 } 13735 13736 if (doextlpbk) 13737 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 13738 13739 if (tg3_test_loopback(tp, data, doextlpbk)) 13740 etest->flags |= ETH_TEST_FL_FAILED; 13741 13742 tg3_full_unlock(tp); 13743 13744 if (tg3_test_interrupt(tp) != 0) { 13745 etest->flags |= ETH_TEST_FL_FAILED; 13746 data[TG3_INTERRUPT_TEST] = 1; 13747 } 13748 13749 tg3_full_lock(tp, 0); 13750 13751 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 13752 if (netif_running(dev)) { 13753 tg3_flag_set(tp, INIT_COMPLETE); 13754 err2 = tg3_restart_hw(tp, true); 13755 if (!err2) 13756 tg3_netif_start(tp); 13757 } 13758 13759 tg3_full_unlock(tp); 13760 13761 if (irq_sync && !err2) 13762 tg3_phy_start(tp); 13763 } 13764 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 13765 tg3_power_down_prepare(tp); 13766 13767 } 13768 13769 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 13770 { 13771 struct tg3 *tp = netdev_priv(dev); 13772 struct hwtstamp_config stmpconf; 13773 13774 if (!tg3_flag(tp, PTP_CAPABLE)) 13775 return -EOPNOTSUPP; 13776 13777 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) 13778 return -EFAULT; 13779 13780 if (stmpconf.flags) 13781 return -EINVAL; 13782 13783 if (stmpconf.tx_type != HWTSTAMP_TX_ON && 13784 stmpconf.tx_type != HWTSTAMP_TX_OFF) 13785 return -ERANGE; 13786 13787 switch (stmpconf.rx_filter) { 13788 case HWTSTAMP_FILTER_NONE: 13789 tp->rxptpctl = 0; 13790 break; 13791 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 13792 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13793 TG3_RX_PTP_CTL_ALL_V1_EVENTS; 13794 break; 13795 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 13796 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13797 TG3_RX_PTP_CTL_SYNC_EVNT; 13798 break; 13799 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 13800 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13801 TG3_RX_PTP_CTL_DELAY_REQ; 13802 break; 13803 case HWTSTAMP_FILTER_PTP_V2_EVENT: 13804 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13805 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13806 break; 13807 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 13808 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13809 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13810 break; 13811 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 13812 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13813 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13814 break; 13815 case HWTSTAMP_FILTER_PTP_V2_SYNC: 13816 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13817 TG3_RX_PTP_CTL_SYNC_EVNT; 13818 break; 13819 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 13820 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13821 TG3_RX_PTP_CTL_SYNC_EVNT; 13822 break; 13823 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 13824 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13825 TG3_RX_PTP_CTL_SYNC_EVNT; 13826 break; 13827 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 13828 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13829 TG3_RX_PTP_CTL_DELAY_REQ; 13830 break; 13831 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 13832 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13833 TG3_RX_PTP_CTL_DELAY_REQ; 13834 break; 13835 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 13836 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13837 TG3_RX_PTP_CTL_DELAY_REQ; 13838 break; 13839 default: 13840 return -ERANGE; 13841 } 13842 13843 if (netif_running(dev) && tp->rxptpctl) 13844 tw32(TG3_RX_PTP_CTL, 13845 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 13846 13847 if (stmpconf.tx_type == HWTSTAMP_TX_ON) 13848 tg3_flag_set(tp, TX_TSTAMP_EN); 13849 else 13850 tg3_flag_clear(tp, TX_TSTAMP_EN); 13851 13852 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13853 -EFAULT : 0; 13854 } 13855 13856 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 13857 { 13858 struct tg3 *tp = netdev_priv(dev); 13859 struct hwtstamp_config stmpconf; 13860 13861 if (!tg3_flag(tp, PTP_CAPABLE)) 13862 return -EOPNOTSUPP; 13863 13864 stmpconf.flags = 0; 13865 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ? 13866 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF); 13867 13868 switch (tp->rxptpctl) { 13869 case 0: 13870 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE; 13871 break; 13872 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS: 13873 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 13874 break; 13875 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13876 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 13877 break; 13878 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13879 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 13880 break; 13881 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13882 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 13883 break; 13884 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13885 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 13886 break; 13887 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13888 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 13889 break; 13890 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13891 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 13892 break; 13893 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13894 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC; 13895 break; 13896 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13897 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 13898 break; 13899 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13900 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 13901 break; 13902 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13903 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ; 13904 break; 13905 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13906 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 13907 break; 13908 default: 13909 WARN_ON_ONCE(1); 13910 return -ERANGE; 13911 } 13912 13913 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13914 -EFAULT : 0; 13915 } 13916 13917 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 13918 { 13919 struct mii_ioctl_data *data = if_mii(ifr); 13920 struct tg3 *tp = netdev_priv(dev); 13921 int err; 13922 13923 if (tg3_flag(tp, USE_PHYLIB)) { 13924 struct phy_device *phydev; 13925 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 13926 return -EAGAIN; 13927 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 13928 return phy_mii_ioctl(phydev, ifr, cmd); 13929 } 13930 13931 switch (cmd) { 13932 case SIOCGMIIPHY: 13933 data->phy_id = tp->phy_addr; 13934 13935 /* fallthru */ 13936 case SIOCGMIIREG: { 13937 u32 mii_regval; 13938 13939 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 13940 break; /* We have no PHY */ 13941 13942 if (!netif_running(dev)) 13943 return -EAGAIN; 13944 13945 spin_lock_bh(&tp->lock); 13946 err = __tg3_readphy(tp, data->phy_id & 0x1f, 13947 data->reg_num & 0x1f, &mii_regval); 13948 spin_unlock_bh(&tp->lock); 13949 13950 data->val_out = mii_regval; 13951 13952 return err; 13953 } 13954 13955 case SIOCSMIIREG: 13956 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 13957 break; /* We have no PHY */ 13958 13959 if (!netif_running(dev)) 13960 return -EAGAIN; 13961 13962 spin_lock_bh(&tp->lock); 13963 err = __tg3_writephy(tp, data->phy_id & 0x1f, 13964 data->reg_num & 0x1f, data->val_in); 13965 spin_unlock_bh(&tp->lock); 13966 13967 return err; 13968 13969 case SIOCSHWTSTAMP: 13970 return tg3_hwtstamp_set(dev, ifr); 13971 13972 case SIOCGHWTSTAMP: 13973 return tg3_hwtstamp_get(dev, ifr); 13974 13975 default: 13976 /* do nothing */ 13977 break; 13978 } 13979 return -EOPNOTSUPP; 13980 } 13981 13982 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 13983 { 13984 struct tg3 *tp = netdev_priv(dev); 13985 13986 memcpy(ec, &tp->coal, sizeof(*ec)); 13987 return 0; 13988 } 13989 13990 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 13991 { 13992 struct tg3 *tp = netdev_priv(dev); 13993 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; 13994 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; 13995 13996 if (!tg3_flag(tp, 5705_PLUS)) { 13997 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; 13998 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; 13999 max_stat_coal_ticks = MAX_STAT_COAL_TICKS; 14000 min_stat_coal_ticks = MIN_STAT_COAL_TICKS; 14001 } 14002 14003 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || 14004 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || 14005 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || 14006 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || 14007 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || 14008 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || 14009 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || 14010 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || 14011 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || 14012 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) 14013 return -EINVAL; 14014 14015 /* No rx interrupts will be generated if both are zero */ 14016 if ((ec->rx_coalesce_usecs == 0) && 14017 (ec->rx_max_coalesced_frames == 0)) 14018 return -EINVAL; 14019 14020 /* No tx interrupts will be generated if both are zero */ 14021 if ((ec->tx_coalesce_usecs == 0) && 14022 (ec->tx_max_coalesced_frames == 0)) 14023 return -EINVAL; 14024 14025 /* Only copy relevant parameters, ignore all others. */ 14026 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; 14027 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; 14028 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; 14029 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; 14030 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; 14031 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; 14032 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; 14033 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; 14034 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; 14035 14036 if (netif_running(dev)) { 14037 tg3_full_lock(tp, 0); 14038 __tg3_set_coalesce(tp, &tp->coal); 14039 tg3_full_unlock(tp); 14040 } 14041 return 0; 14042 } 14043 14044 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) 14045 { 14046 struct tg3 *tp = netdev_priv(dev); 14047 14048 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14049 netdev_warn(tp->dev, "Board does not support EEE!\n"); 14050 return -EOPNOTSUPP; 14051 } 14052 14053 if (edata->advertised != tp->eee.advertised) { 14054 netdev_warn(tp->dev, 14055 "Direct manipulation of EEE advertisement is not supported\n"); 14056 return -EINVAL; 14057 } 14058 14059 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) { 14060 netdev_warn(tp->dev, 14061 "Maximal Tx Lpi timer supported is %#x(u)\n", 14062 TG3_CPMU_DBTMR1_LNKIDLE_MAX); 14063 return -EINVAL; 14064 } 14065 14066 tp->eee = *edata; 14067 14068 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 14069 tg3_warn_mgmt_link_flap(tp); 14070 14071 if (netif_running(tp->dev)) { 14072 tg3_full_lock(tp, 0); 14073 tg3_setup_eee(tp); 14074 tg3_phy_reset(tp); 14075 tg3_full_unlock(tp); 14076 } 14077 14078 return 0; 14079 } 14080 14081 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata) 14082 { 14083 struct tg3 *tp = netdev_priv(dev); 14084 14085 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14086 netdev_warn(tp->dev, 14087 "Board does not support EEE!\n"); 14088 return -EOPNOTSUPP; 14089 } 14090 14091 *edata = tp->eee; 14092 return 0; 14093 } 14094 14095 static const struct ethtool_ops tg3_ethtool_ops = { 14096 .get_settings = tg3_get_settings, 14097 .set_settings = tg3_set_settings, 14098 .get_drvinfo = tg3_get_drvinfo, 14099 .get_regs_len = tg3_get_regs_len, 14100 .get_regs = tg3_get_regs, 14101 .get_wol = tg3_get_wol, 14102 .set_wol = tg3_set_wol, 14103 .get_msglevel = tg3_get_msglevel, 14104 .set_msglevel = tg3_set_msglevel, 14105 .nway_reset = tg3_nway_reset, 14106 .get_link = ethtool_op_get_link, 14107 .get_eeprom_len = tg3_get_eeprom_len, 14108 .get_eeprom = tg3_get_eeprom, 14109 .set_eeprom = tg3_set_eeprom, 14110 .get_ringparam = tg3_get_ringparam, 14111 .set_ringparam = tg3_set_ringparam, 14112 .get_pauseparam = tg3_get_pauseparam, 14113 .set_pauseparam = tg3_set_pauseparam, 14114 .self_test = tg3_self_test, 14115 .get_strings = tg3_get_strings, 14116 .set_phys_id = tg3_set_phys_id, 14117 .get_ethtool_stats = tg3_get_ethtool_stats, 14118 .get_coalesce = tg3_get_coalesce, 14119 .set_coalesce = tg3_set_coalesce, 14120 .get_sset_count = tg3_get_sset_count, 14121 .get_rxnfc = tg3_get_rxnfc, 14122 .get_rxfh_indir_size = tg3_get_rxfh_indir_size, 14123 .get_rxfh = tg3_get_rxfh, 14124 .set_rxfh = tg3_set_rxfh, 14125 .get_channels = tg3_get_channels, 14126 .set_channels = tg3_set_channels, 14127 .get_ts_info = tg3_get_ts_info, 14128 .get_eee = tg3_get_eee, 14129 .set_eee = tg3_set_eee, 14130 }; 14131 14132 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, 14133 struct rtnl_link_stats64 *stats) 14134 { 14135 struct tg3 *tp = netdev_priv(dev); 14136 14137 spin_lock_bh(&tp->lock); 14138 if (!tp->hw_stats) { 14139 *stats = tp->net_stats_prev; 14140 spin_unlock_bh(&tp->lock); 14141 return stats; 14142 } 14143 14144 tg3_get_nstats(tp, stats); 14145 spin_unlock_bh(&tp->lock); 14146 14147 return stats; 14148 } 14149 14150 static void tg3_set_rx_mode(struct net_device *dev) 14151 { 14152 struct tg3 *tp = netdev_priv(dev); 14153 14154 if (!netif_running(dev)) 14155 return; 14156 14157 tg3_full_lock(tp, 0); 14158 __tg3_set_rx_mode(dev); 14159 tg3_full_unlock(tp); 14160 } 14161 14162 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, 14163 int new_mtu) 14164 { 14165 dev->mtu = new_mtu; 14166 14167 if (new_mtu > ETH_DATA_LEN) { 14168 if (tg3_flag(tp, 5780_CLASS)) { 14169 netdev_update_features(dev); 14170 tg3_flag_clear(tp, TSO_CAPABLE); 14171 } else { 14172 tg3_flag_set(tp, JUMBO_RING_ENABLE); 14173 } 14174 } else { 14175 if (tg3_flag(tp, 5780_CLASS)) { 14176 tg3_flag_set(tp, TSO_CAPABLE); 14177 netdev_update_features(dev); 14178 } 14179 tg3_flag_clear(tp, JUMBO_RING_ENABLE); 14180 } 14181 } 14182 14183 static int tg3_change_mtu(struct net_device *dev, int new_mtu) 14184 { 14185 struct tg3 *tp = netdev_priv(dev); 14186 int err; 14187 bool reset_phy = false; 14188 14189 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) 14190 return -EINVAL; 14191 14192 if (!netif_running(dev)) { 14193 /* We'll just catch it later when the 14194 * device is up'd. 14195 */ 14196 tg3_set_mtu(dev, tp, new_mtu); 14197 return 0; 14198 } 14199 14200 tg3_phy_stop(tp); 14201 14202 tg3_netif_stop(tp); 14203 14204 tg3_set_mtu(dev, tp, new_mtu); 14205 14206 tg3_full_lock(tp, 1); 14207 14208 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 14209 14210 /* Reset PHY, otherwise the read DMA engine will be in a mode that 14211 * breaks all requests to 256 bytes. 14212 */ 14213 if (tg3_asic_rev(tp) == ASIC_REV_57766) 14214 reset_phy = true; 14215 14216 err = tg3_restart_hw(tp, reset_phy); 14217 14218 if (!err) 14219 tg3_netif_start(tp); 14220 14221 tg3_full_unlock(tp); 14222 14223 if (!err) 14224 tg3_phy_start(tp); 14225 14226 return err; 14227 } 14228 14229 static const struct net_device_ops tg3_netdev_ops = { 14230 .ndo_open = tg3_open, 14231 .ndo_stop = tg3_close, 14232 .ndo_start_xmit = tg3_start_xmit, 14233 .ndo_get_stats64 = tg3_get_stats64, 14234 .ndo_validate_addr = eth_validate_addr, 14235 .ndo_set_rx_mode = tg3_set_rx_mode, 14236 .ndo_set_mac_address = tg3_set_mac_addr, 14237 .ndo_do_ioctl = tg3_ioctl, 14238 .ndo_tx_timeout = tg3_tx_timeout, 14239 .ndo_change_mtu = tg3_change_mtu, 14240 .ndo_fix_features = tg3_fix_features, 14241 .ndo_set_features = tg3_set_features, 14242 #ifdef CONFIG_NET_POLL_CONTROLLER 14243 .ndo_poll_controller = tg3_poll_controller, 14244 #endif 14245 }; 14246 14247 static void tg3_get_eeprom_size(struct tg3 *tp) 14248 { 14249 u32 cursize, val, magic; 14250 14251 tp->nvram_size = EEPROM_CHIP_SIZE; 14252 14253 if (tg3_nvram_read(tp, 0, &magic) != 0) 14254 return; 14255 14256 if ((magic != TG3_EEPROM_MAGIC) && 14257 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && 14258 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) 14259 return; 14260 14261 /* 14262 * Size the chip by reading offsets at increasing powers of two. 14263 * When we encounter our validation signature, we know the addressing 14264 * has wrapped around, and thus have our chip size. 14265 */ 14266 cursize = 0x10; 14267 14268 while (cursize < tp->nvram_size) { 14269 if (tg3_nvram_read(tp, cursize, &val) != 0) 14270 return; 14271 14272 if (val == magic) 14273 break; 14274 14275 cursize <<= 1; 14276 } 14277 14278 tp->nvram_size = cursize; 14279 } 14280 14281 static void tg3_get_nvram_size(struct tg3 *tp) 14282 { 14283 u32 val; 14284 14285 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) 14286 return; 14287 14288 /* Selfboot format */ 14289 if (val != TG3_EEPROM_MAGIC) { 14290 tg3_get_eeprom_size(tp); 14291 return; 14292 } 14293 14294 if (tg3_nvram_read(tp, 0xf0, &val) == 0) { 14295 if (val != 0) { 14296 /* This is confusing. We want to operate on the 14297 * 16-bit value at offset 0xf2. The tg3_nvram_read() 14298 * call will read from NVRAM and byteswap the data 14299 * according to the byteswapping settings for all 14300 * other register accesses. This ensures the data we 14301 * want will always reside in the lower 16-bits. 14302 * However, the data in NVRAM is in LE format, which 14303 * means the data from the NVRAM read will always be 14304 * opposite the endianness of the CPU. The 16-bit 14305 * byteswap then brings the data to CPU endianness. 14306 */ 14307 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; 14308 return; 14309 } 14310 } 14311 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14312 } 14313 14314 static void tg3_get_nvram_info(struct tg3 *tp) 14315 { 14316 u32 nvcfg1; 14317 14318 nvcfg1 = tr32(NVRAM_CFG1); 14319 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { 14320 tg3_flag_set(tp, FLASH); 14321 } else { 14322 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14323 tw32(NVRAM_CFG1, nvcfg1); 14324 } 14325 14326 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 14327 tg3_flag(tp, 5780_CLASS)) { 14328 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { 14329 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: 14330 tp->nvram_jedecnum = JEDEC_ATMEL; 14331 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14332 tg3_flag_set(tp, NVRAM_BUFFERED); 14333 break; 14334 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: 14335 tp->nvram_jedecnum = JEDEC_ATMEL; 14336 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; 14337 break; 14338 case FLASH_VENDOR_ATMEL_EEPROM: 14339 tp->nvram_jedecnum = JEDEC_ATMEL; 14340 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14341 tg3_flag_set(tp, NVRAM_BUFFERED); 14342 break; 14343 case FLASH_VENDOR_ST: 14344 tp->nvram_jedecnum = JEDEC_ST; 14345 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; 14346 tg3_flag_set(tp, NVRAM_BUFFERED); 14347 break; 14348 case FLASH_VENDOR_SAIFUN: 14349 tp->nvram_jedecnum = JEDEC_SAIFUN; 14350 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; 14351 break; 14352 case FLASH_VENDOR_SST_SMALL: 14353 case FLASH_VENDOR_SST_LARGE: 14354 tp->nvram_jedecnum = JEDEC_SST; 14355 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; 14356 break; 14357 } 14358 } else { 14359 tp->nvram_jedecnum = JEDEC_ATMEL; 14360 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14361 tg3_flag_set(tp, NVRAM_BUFFERED); 14362 } 14363 } 14364 14365 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) 14366 { 14367 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { 14368 case FLASH_5752PAGE_SIZE_256: 14369 tp->nvram_pagesize = 256; 14370 break; 14371 case FLASH_5752PAGE_SIZE_512: 14372 tp->nvram_pagesize = 512; 14373 break; 14374 case FLASH_5752PAGE_SIZE_1K: 14375 tp->nvram_pagesize = 1024; 14376 break; 14377 case FLASH_5752PAGE_SIZE_2K: 14378 tp->nvram_pagesize = 2048; 14379 break; 14380 case FLASH_5752PAGE_SIZE_4K: 14381 tp->nvram_pagesize = 4096; 14382 break; 14383 case FLASH_5752PAGE_SIZE_264: 14384 tp->nvram_pagesize = 264; 14385 break; 14386 case FLASH_5752PAGE_SIZE_528: 14387 tp->nvram_pagesize = 528; 14388 break; 14389 } 14390 } 14391 14392 static void tg3_get_5752_nvram_info(struct tg3 *tp) 14393 { 14394 u32 nvcfg1; 14395 14396 nvcfg1 = tr32(NVRAM_CFG1); 14397 14398 /* NVRAM protection for TPM */ 14399 if (nvcfg1 & (1 << 27)) 14400 tg3_flag_set(tp, PROTECTED_NVRAM); 14401 14402 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14403 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 14404 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: 14405 tp->nvram_jedecnum = JEDEC_ATMEL; 14406 tg3_flag_set(tp, NVRAM_BUFFERED); 14407 break; 14408 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14409 tp->nvram_jedecnum = JEDEC_ATMEL; 14410 tg3_flag_set(tp, NVRAM_BUFFERED); 14411 tg3_flag_set(tp, FLASH); 14412 break; 14413 case FLASH_5752VENDOR_ST_M45PE10: 14414 case FLASH_5752VENDOR_ST_M45PE20: 14415 case FLASH_5752VENDOR_ST_M45PE40: 14416 tp->nvram_jedecnum = JEDEC_ST; 14417 tg3_flag_set(tp, NVRAM_BUFFERED); 14418 tg3_flag_set(tp, FLASH); 14419 break; 14420 } 14421 14422 if (tg3_flag(tp, FLASH)) { 14423 tg3_nvram_get_pagesize(tp, nvcfg1); 14424 } else { 14425 /* For eeprom, set pagesize to maximum eeprom size */ 14426 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14427 14428 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14429 tw32(NVRAM_CFG1, nvcfg1); 14430 } 14431 } 14432 14433 static void tg3_get_5755_nvram_info(struct tg3 *tp) 14434 { 14435 u32 nvcfg1, protect = 0; 14436 14437 nvcfg1 = tr32(NVRAM_CFG1); 14438 14439 /* NVRAM protection for TPM */ 14440 if (nvcfg1 & (1 << 27)) { 14441 tg3_flag_set(tp, PROTECTED_NVRAM); 14442 protect = 1; 14443 } 14444 14445 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14446 switch (nvcfg1) { 14447 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14448 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14449 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14450 case FLASH_5755VENDOR_ATMEL_FLASH_5: 14451 tp->nvram_jedecnum = JEDEC_ATMEL; 14452 tg3_flag_set(tp, NVRAM_BUFFERED); 14453 tg3_flag_set(tp, FLASH); 14454 tp->nvram_pagesize = 264; 14455 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || 14456 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) 14457 tp->nvram_size = (protect ? 0x3e200 : 14458 TG3_NVRAM_SIZE_512KB); 14459 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) 14460 tp->nvram_size = (protect ? 0x1f200 : 14461 TG3_NVRAM_SIZE_256KB); 14462 else 14463 tp->nvram_size = (protect ? 0x1f200 : 14464 TG3_NVRAM_SIZE_128KB); 14465 break; 14466 case FLASH_5752VENDOR_ST_M45PE10: 14467 case FLASH_5752VENDOR_ST_M45PE20: 14468 case FLASH_5752VENDOR_ST_M45PE40: 14469 tp->nvram_jedecnum = JEDEC_ST; 14470 tg3_flag_set(tp, NVRAM_BUFFERED); 14471 tg3_flag_set(tp, FLASH); 14472 tp->nvram_pagesize = 256; 14473 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) 14474 tp->nvram_size = (protect ? 14475 TG3_NVRAM_SIZE_64KB : 14476 TG3_NVRAM_SIZE_128KB); 14477 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) 14478 tp->nvram_size = (protect ? 14479 TG3_NVRAM_SIZE_64KB : 14480 TG3_NVRAM_SIZE_256KB); 14481 else 14482 tp->nvram_size = (protect ? 14483 TG3_NVRAM_SIZE_128KB : 14484 TG3_NVRAM_SIZE_512KB); 14485 break; 14486 } 14487 } 14488 14489 static void tg3_get_5787_nvram_info(struct tg3 *tp) 14490 { 14491 u32 nvcfg1; 14492 14493 nvcfg1 = tr32(NVRAM_CFG1); 14494 14495 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14496 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: 14497 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14498 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: 14499 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14500 tp->nvram_jedecnum = JEDEC_ATMEL; 14501 tg3_flag_set(tp, NVRAM_BUFFERED); 14502 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14503 14504 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14505 tw32(NVRAM_CFG1, nvcfg1); 14506 break; 14507 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14508 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14509 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14510 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14511 tp->nvram_jedecnum = JEDEC_ATMEL; 14512 tg3_flag_set(tp, NVRAM_BUFFERED); 14513 tg3_flag_set(tp, FLASH); 14514 tp->nvram_pagesize = 264; 14515 break; 14516 case FLASH_5752VENDOR_ST_M45PE10: 14517 case FLASH_5752VENDOR_ST_M45PE20: 14518 case FLASH_5752VENDOR_ST_M45PE40: 14519 tp->nvram_jedecnum = JEDEC_ST; 14520 tg3_flag_set(tp, NVRAM_BUFFERED); 14521 tg3_flag_set(tp, FLASH); 14522 tp->nvram_pagesize = 256; 14523 break; 14524 } 14525 } 14526 14527 static void tg3_get_5761_nvram_info(struct tg3 *tp) 14528 { 14529 u32 nvcfg1, protect = 0; 14530 14531 nvcfg1 = tr32(NVRAM_CFG1); 14532 14533 /* NVRAM protection for TPM */ 14534 if (nvcfg1 & (1 << 27)) { 14535 tg3_flag_set(tp, PROTECTED_NVRAM); 14536 protect = 1; 14537 } 14538 14539 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14540 switch (nvcfg1) { 14541 case FLASH_5761VENDOR_ATMEL_ADB021D: 14542 case FLASH_5761VENDOR_ATMEL_ADB041D: 14543 case FLASH_5761VENDOR_ATMEL_ADB081D: 14544 case FLASH_5761VENDOR_ATMEL_ADB161D: 14545 case FLASH_5761VENDOR_ATMEL_MDB021D: 14546 case FLASH_5761VENDOR_ATMEL_MDB041D: 14547 case FLASH_5761VENDOR_ATMEL_MDB081D: 14548 case FLASH_5761VENDOR_ATMEL_MDB161D: 14549 tp->nvram_jedecnum = JEDEC_ATMEL; 14550 tg3_flag_set(tp, NVRAM_BUFFERED); 14551 tg3_flag_set(tp, FLASH); 14552 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14553 tp->nvram_pagesize = 256; 14554 break; 14555 case FLASH_5761VENDOR_ST_A_M45PE20: 14556 case FLASH_5761VENDOR_ST_A_M45PE40: 14557 case FLASH_5761VENDOR_ST_A_M45PE80: 14558 case FLASH_5761VENDOR_ST_A_M45PE16: 14559 case FLASH_5761VENDOR_ST_M_M45PE20: 14560 case FLASH_5761VENDOR_ST_M_M45PE40: 14561 case FLASH_5761VENDOR_ST_M_M45PE80: 14562 case FLASH_5761VENDOR_ST_M_M45PE16: 14563 tp->nvram_jedecnum = JEDEC_ST; 14564 tg3_flag_set(tp, NVRAM_BUFFERED); 14565 tg3_flag_set(tp, FLASH); 14566 tp->nvram_pagesize = 256; 14567 break; 14568 } 14569 14570 if (protect) { 14571 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); 14572 } else { 14573 switch (nvcfg1) { 14574 case FLASH_5761VENDOR_ATMEL_ADB161D: 14575 case FLASH_5761VENDOR_ATMEL_MDB161D: 14576 case FLASH_5761VENDOR_ST_A_M45PE16: 14577 case FLASH_5761VENDOR_ST_M_M45PE16: 14578 tp->nvram_size = TG3_NVRAM_SIZE_2MB; 14579 break; 14580 case FLASH_5761VENDOR_ATMEL_ADB081D: 14581 case FLASH_5761VENDOR_ATMEL_MDB081D: 14582 case FLASH_5761VENDOR_ST_A_M45PE80: 14583 case FLASH_5761VENDOR_ST_M_M45PE80: 14584 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14585 break; 14586 case FLASH_5761VENDOR_ATMEL_ADB041D: 14587 case FLASH_5761VENDOR_ATMEL_MDB041D: 14588 case FLASH_5761VENDOR_ST_A_M45PE40: 14589 case FLASH_5761VENDOR_ST_M_M45PE40: 14590 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14591 break; 14592 case FLASH_5761VENDOR_ATMEL_ADB021D: 14593 case FLASH_5761VENDOR_ATMEL_MDB021D: 14594 case FLASH_5761VENDOR_ST_A_M45PE20: 14595 case FLASH_5761VENDOR_ST_M_M45PE20: 14596 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14597 break; 14598 } 14599 } 14600 } 14601 14602 static void tg3_get_5906_nvram_info(struct tg3 *tp) 14603 { 14604 tp->nvram_jedecnum = JEDEC_ATMEL; 14605 tg3_flag_set(tp, NVRAM_BUFFERED); 14606 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14607 } 14608 14609 static void tg3_get_57780_nvram_info(struct tg3 *tp) 14610 { 14611 u32 nvcfg1; 14612 14613 nvcfg1 = tr32(NVRAM_CFG1); 14614 14615 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14616 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14617 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14618 tp->nvram_jedecnum = JEDEC_ATMEL; 14619 tg3_flag_set(tp, NVRAM_BUFFERED); 14620 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14621 14622 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14623 tw32(NVRAM_CFG1, nvcfg1); 14624 return; 14625 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14626 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14627 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14628 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14629 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14630 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14631 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14632 tp->nvram_jedecnum = JEDEC_ATMEL; 14633 tg3_flag_set(tp, NVRAM_BUFFERED); 14634 tg3_flag_set(tp, FLASH); 14635 14636 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14637 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14638 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14639 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14640 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14641 break; 14642 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14643 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14644 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14645 break; 14646 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14647 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14648 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14649 break; 14650 } 14651 break; 14652 case FLASH_5752VENDOR_ST_M45PE10: 14653 case FLASH_5752VENDOR_ST_M45PE20: 14654 case FLASH_5752VENDOR_ST_M45PE40: 14655 tp->nvram_jedecnum = JEDEC_ST; 14656 tg3_flag_set(tp, NVRAM_BUFFERED); 14657 tg3_flag_set(tp, FLASH); 14658 14659 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14660 case FLASH_5752VENDOR_ST_M45PE10: 14661 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14662 break; 14663 case FLASH_5752VENDOR_ST_M45PE20: 14664 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14665 break; 14666 case FLASH_5752VENDOR_ST_M45PE40: 14667 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14668 break; 14669 } 14670 break; 14671 default: 14672 tg3_flag_set(tp, NO_NVRAM); 14673 return; 14674 } 14675 14676 tg3_nvram_get_pagesize(tp, nvcfg1); 14677 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14678 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14679 } 14680 14681 14682 static void tg3_get_5717_nvram_info(struct tg3 *tp) 14683 { 14684 u32 nvcfg1; 14685 14686 nvcfg1 = tr32(NVRAM_CFG1); 14687 14688 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14689 case FLASH_5717VENDOR_ATMEL_EEPROM: 14690 case FLASH_5717VENDOR_MICRO_EEPROM: 14691 tp->nvram_jedecnum = JEDEC_ATMEL; 14692 tg3_flag_set(tp, NVRAM_BUFFERED); 14693 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14694 14695 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14696 tw32(NVRAM_CFG1, nvcfg1); 14697 return; 14698 case FLASH_5717VENDOR_ATMEL_MDB011D: 14699 case FLASH_5717VENDOR_ATMEL_ADB011B: 14700 case FLASH_5717VENDOR_ATMEL_ADB011D: 14701 case FLASH_5717VENDOR_ATMEL_MDB021D: 14702 case FLASH_5717VENDOR_ATMEL_ADB021B: 14703 case FLASH_5717VENDOR_ATMEL_ADB021D: 14704 case FLASH_5717VENDOR_ATMEL_45USPT: 14705 tp->nvram_jedecnum = JEDEC_ATMEL; 14706 tg3_flag_set(tp, NVRAM_BUFFERED); 14707 tg3_flag_set(tp, FLASH); 14708 14709 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14710 case FLASH_5717VENDOR_ATMEL_MDB021D: 14711 /* Detect size with tg3_nvram_get_size() */ 14712 break; 14713 case FLASH_5717VENDOR_ATMEL_ADB021B: 14714 case FLASH_5717VENDOR_ATMEL_ADB021D: 14715 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14716 break; 14717 default: 14718 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14719 break; 14720 } 14721 break; 14722 case FLASH_5717VENDOR_ST_M_M25PE10: 14723 case FLASH_5717VENDOR_ST_A_M25PE10: 14724 case FLASH_5717VENDOR_ST_M_M45PE10: 14725 case FLASH_5717VENDOR_ST_A_M45PE10: 14726 case FLASH_5717VENDOR_ST_M_M25PE20: 14727 case FLASH_5717VENDOR_ST_A_M25PE20: 14728 case FLASH_5717VENDOR_ST_M_M45PE20: 14729 case FLASH_5717VENDOR_ST_A_M45PE20: 14730 case FLASH_5717VENDOR_ST_25USPT: 14731 case FLASH_5717VENDOR_ST_45USPT: 14732 tp->nvram_jedecnum = JEDEC_ST; 14733 tg3_flag_set(tp, NVRAM_BUFFERED); 14734 tg3_flag_set(tp, FLASH); 14735 14736 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14737 case FLASH_5717VENDOR_ST_M_M25PE20: 14738 case FLASH_5717VENDOR_ST_M_M45PE20: 14739 /* Detect size with tg3_nvram_get_size() */ 14740 break; 14741 case FLASH_5717VENDOR_ST_A_M25PE20: 14742 case FLASH_5717VENDOR_ST_A_M45PE20: 14743 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14744 break; 14745 default: 14746 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14747 break; 14748 } 14749 break; 14750 default: 14751 tg3_flag_set(tp, NO_NVRAM); 14752 return; 14753 } 14754 14755 tg3_nvram_get_pagesize(tp, nvcfg1); 14756 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14757 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14758 } 14759 14760 static void tg3_get_5720_nvram_info(struct tg3 *tp) 14761 { 14762 u32 nvcfg1, nvmpinstrp; 14763 14764 nvcfg1 = tr32(NVRAM_CFG1); 14765 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; 14766 14767 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14768 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) { 14769 tg3_flag_set(tp, NO_NVRAM); 14770 return; 14771 } 14772 14773 switch (nvmpinstrp) { 14774 case FLASH_5762_EEPROM_HD: 14775 nvmpinstrp = FLASH_5720_EEPROM_HD; 14776 break; 14777 case FLASH_5762_EEPROM_LD: 14778 nvmpinstrp = FLASH_5720_EEPROM_LD; 14779 break; 14780 case FLASH_5720VENDOR_M_ST_M45PE20: 14781 /* This pinstrap supports multiple sizes, so force it 14782 * to read the actual size from location 0xf0. 14783 */ 14784 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT; 14785 break; 14786 } 14787 } 14788 14789 switch (nvmpinstrp) { 14790 case FLASH_5720_EEPROM_HD: 14791 case FLASH_5720_EEPROM_LD: 14792 tp->nvram_jedecnum = JEDEC_ATMEL; 14793 tg3_flag_set(tp, NVRAM_BUFFERED); 14794 14795 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14796 tw32(NVRAM_CFG1, nvcfg1); 14797 if (nvmpinstrp == FLASH_5720_EEPROM_HD) 14798 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14799 else 14800 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; 14801 return; 14802 case FLASH_5720VENDOR_M_ATMEL_DB011D: 14803 case FLASH_5720VENDOR_A_ATMEL_DB011B: 14804 case FLASH_5720VENDOR_A_ATMEL_DB011D: 14805 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14806 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14807 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14808 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14809 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14810 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14811 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14812 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14813 case FLASH_5720VENDOR_ATMEL_45USPT: 14814 tp->nvram_jedecnum = JEDEC_ATMEL; 14815 tg3_flag_set(tp, NVRAM_BUFFERED); 14816 tg3_flag_set(tp, FLASH); 14817 14818 switch (nvmpinstrp) { 14819 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14820 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14821 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14822 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14823 break; 14824 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14825 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14826 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14827 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14828 break; 14829 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14830 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14831 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14832 break; 14833 default: 14834 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14835 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14836 break; 14837 } 14838 break; 14839 case FLASH_5720VENDOR_M_ST_M25PE10: 14840 case FLASH_5720VENDOR_M_ST_M45PE10: 14841 case FLASH_5720VENDOR_A_ST_M25PE10: 14842 case FLASH_5720VENDOR_A_ST_M45PE10: 14843 case FLASH_5720VENDOR_M_ST_M25PE20: 14844 case FLASH_5720VENDOR_M_ST_M45PE20: 14845 case FLASH_5720VENDOR_A_ST_M25PE20: 14846 case FLASH_5720VENDOR_A_ST_M45PE20: 14847 case FLASH_5720VENDOR_M_ST_M25PE40: 14848 case FLASH_5720VENDOR_M_ST_M45PE40: 14849 case FLASH_5720VENDOR_A_ST_M25PE40: 14850 case FLASH_5720VENDOR_A_ST_M45PE40: 14851 case FLASH_5720VENDOR_M_ST_M25PE80: 14852 case FLASH_5720VENDOR_M_ST_M45PE80: 14853 case FLASH_5720VENDOR_A_ST_M25PE80: 14854 case FLASH_5720VENDOR_A_ST_M45PE80: 14855 case FLASH_5720VENDOR_ST_25USPT: 14856 case FLASH_5720VENDOR_ST_45USPT: 14857 tp->nvram_jedecnum = JEDEC_ST; 14858 tg3_flag_set(tp, NVRAM_BUFFERED); 14859 tg3_flag_set(tp, FLASH); 14860 14861 switch (nvmpinstrp) { 14862 case FLASH_5720VENDOR_M_ST_M25PE20: 14863 case FLASH_5720VENDOR_M_ST_M45PE20: 14864 case FLASH_5720VENDOR_A_ST_M25PE20: 14865 case FLASH_5720VENDOR_A_ST_M45PE20: 14866 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14867 break; 14868 case FLASH_5720VENDOR_M_ST_M25PE40: 14869 case FLASH_5720VENDOR_M_ST_M45PE40: 14870 case FLASH_5720VENDOR_A_ST_M25PE40: 14871 case FLASH_5720VENDOR_A_ST_M45PE40: 14872 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14873 break; 14874 case FLASH_5720VENDOR_M_ST_M25PE80: 14875 case FLASH_5720VENDOR_M_ST_M45PE80: 14876 case FLASH_5720VENDOR_A_ST_M25PE80: 14877 case FLASH_5720VENDOR_A_ST_M45PE80: 14878 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14879 break; 14880 default: 14881 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14882 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14883 break; 14884 } 14885 break; 14886 default: 14887 tg3_flag_set(tp, NO_NVRAM); 14888 return; 14889 } 14890 14891 tg3_nvram_get_pagesize(tp, nvcfg1); 14892 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14893 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14894 14895 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14896 u32 val; 14897 14898 if (tg3_nvram_read(tp, 0, &val)) 14899 return; 14900 14901 if (val != TG3_EEPROM_MAGIC && 14902 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) 14903 tg3_flag_set(tp, NO_NVRAM); 14904 } 14905 } 14906 14907 /* Chips other than 5700/5701 use the NVRAM for fetching info. */ 14908 static void tg3_nvram_init(struct tg3 *tp) 14909 { 14910 if (tg3_flag(tp, IS_SSB_CORE)) { 14911 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */ 14912 tg3_flag_clear(tp, NVRAM); 14913 tg3_flag_clear(tp, NVRAM_BUFFERED); 14914 tg3_flag_set(tp, NO_NVRAM); 14915 return; 14916 } 14917 14918 tw32_f(GRC_EEPROM_ADDR, 14919 (EEPROM_ADDR_FSM_RESET | 14920 (EEPROM_DEFAULT_CLOCK_PERIOD << 14921 EEPROM_ADDR_CLKPERD_SHIFT))); 14922 14923 msleep(1); 14924 14925 /* Enable seeprom accesses. */ 14926 tw32_f(GRC_LOCAL_CTRL, 14927 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); 14928 udelay(100); 14929 14930 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 14931 tg3_asic_rev(tp) != ASIC_REV_5701) { 14932 tg3_flag_set(tp, NVRAM); 14933 14934 if (tg3_nvram_lock(tp)) { 14935 netdev_warn(tp->dev, 14936 "Cannot get nvram lock, %s failed\n", 14937 __func__); 14938 return; 14939 } 14940 tg3_enable_nvram_access(tp); 14941 14942 tp->nvram_size = 0; 14943 14944 if (tg3_asic_rev(tp) == ASIC_REV_5752) 14945 tg3_get_5752_nvram_info(tp); 14946 else if (tg3_asic_rev(tp) == ASIC_REV_5755) 14947 tg3_get_5755_nvram_info(tp); 14948 else if (tg3_asic_rev(tp) == ASIC_REV_5787 || 14949 tg3_asic_rev(tp) == ASIC_REV_5784 || 14950 tg3_asic_rev(tp) == ASIC_REV_5785) 14951 tg3_get_5787_nvram_info(tp); 14952 else if (tg3_asic_rev(tp) == ASIC_REV_5761) 14953 tg3_get_5761_nvram_info(tp); 14954 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 14955 tg3_get_5906_nvram_info(tp); 14956 else if (tg3_asic_rev(tp) == ASIC_REV_57780 || 14957 tg3_flag(tp, 57765_CLASS)) 14958 tg3_get_57780_nvram_info(tp); 14959 else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 14960 tg3_asic_rev(tp) == ASIC_REV_5719) 14961 tg3_get_5717_nvram_info(tp); 14962 else if (tg3_asic_rev(tp) == ASIC_REV_5720 || 14963 tg3_asic_rev(tp) == ASIC_REV_5762) 14964 tg3_get_5720_nvram_info(tp); 14965 else 14966 tg3_get_nvram_info(tp); 14967 14968 if (tp->nvram_size == 0) 14969 tg3_get_nvram_size(tp); 14970 14971 tg3_disable_nvram_access(tp); 14972 tg3_nvram_unlock(tp); 14973 14974 } else { 14975 tg3_flag_clear(tp, NVRAM); 14976 tg3_flag_clear(tp, NVRAM_BUFFERED); 14977 14978 tg3_get_eeprom_size(tp); 14979 } 14980 } 14981 14982 struct subsys_tbl_ent { 14983 u16 subsys_vendor, subsys_devid; 14984 u32 phy_id; 14985 }; 14986 14987 static struct subsys_tbl_ent subsys_id_to_phy_id[] = { 14988 /* Broadcom boards. */ 14989 { TG3PCI_SUBVENDOR_ID_BROADCOM, 14990 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, 14991 { TG3PCI_SUBVENDOR_ID_BROADCOM, 14992 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, 14993 { TG3PCI_SUBVENDOR_ID_BROADCOM, 14994 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, 14995 { TG3PCI_SUBVENDOR_ID_BROADCOM, 14996 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, 14997 { TG3PCI_SUBVENDOR_ID_BROADCOM, 14998 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, 14999 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15000 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, 15001 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15002 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, 15003 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15004 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, 15005 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15006 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, 15007 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15008 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, 15009 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15010 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, 15011 15012 /* 3com boards. */ 15013 { TG3PCI_SUBVENDOR_ID_3COM, 15014 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, 15015 { TG3PCI_SUBVENDOR_ID_3COM, 15016 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, 15017 { TG3PCI_SUBVENDOR_ID_3COM, 15018 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, 15019 { TG3PCI_SUBVENDOR_ID_3COM, 15020 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, 15021 { TG3PCI_SUBVENDOR_ID_3COM, 15022 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, 15023 15024 /* DELL boards. */ 15025 { TG3PCI_SUBVENDOR_ID_DELL, 15026 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, 15027 { TG3PCI_SUBVENDOR_ID_DELL, 15028 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, 15029 { TG3PCI_SUBVENDOR_ID_DELL, 15030 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, 15031 { TG3PCI_SUBVENDOR_ID_DELL, 15032 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, 15033 15034 /* Compaq boards. */ 15035 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15036 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, 15037 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15038 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, 15039 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15040 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, 15041 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15042 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, 15043 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15044 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, 15045 15046 /* IBM boards. */ 15047 { TG3PCI_SUBVENDOR_ID_IBM, 15048 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } 15049 }; 15050 15051 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp) 15052 { 15053 int i; 15054 15055 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { 15056 if ((subsys_id_to_phy_id[i].subsys_vendor == 15057 tp->pdev->subsystem_vendor) && 15058 (subsys_id_to_phy_id[i].subsys_devid == 15059 tp->pdev->subsystem_device)) 15060 return &subsys_id_to_phy_id[i]; 15061 } 15062 return NULL; 15063 } 15064 15065 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp) 15066 { 15067 u32 val; 15068 15069 tp->phy_id = TG3_PHY_ID_INVALID; 15070 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15071 15072 /* Assume an onboard device and WOL capable by default. */ 15073 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15074 tg3_flag_set(tp, WOL_CAP); 15075 15076 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15077 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { 15078 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15079 tg3_flag_set(tp, IS_NIC); 15080 } 15081 val = tr32(VCPU_CFGSHDW); 15082 if (val & VCPU_CFGSHDW_ASPM_DBNC) 15083 tg3_flag_set(tp, ASPM_WORKAROUND); 15084 if ((val & VCPU_CFGSHDW_WOL_ENABLE) && 15085 (val & VCPU_CFGSHDW_WOL_MAGPKT)) { 15086 tg3_flag_set(tp, WOL_ENABLE); 15087 device_set_wakeup_enable(&tp->pdev->dev, true); 15088 } 15089 goto done; 15090 } 15091 15092 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 15093 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 15094 u32 nic_cfg, led_cfg; 15095 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0; 15096 u32 nic_phy_id, ver, eeprom_phy_id; 15097 int eeprom_phy_serdes = 0; 15098 15099 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 15100 tp->nic_sram_data_cfg = nic_cfg; 15101 15102 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); 15103 ver >>= NIC_SRAM_DATA_VER_SHIFT; 15104 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 15105 tg3_asic_rev(tp) != ASIC_REV_5701 && 15106 tg3_asic_rev(tp) != ASIC_REV_5703 && 15107 (ver > 0) && (ver < 0x100)) 15108 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); 15109 15110 if (tg3_asic_rev(tp) == ASIC_REV_5785) 15111 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); 15112 15113 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 15114 tg3_asic_rev(tp) == ASIC_REV_5719 || 15115 tg3_asic_rev(tp) == ASIC_REV_5720) 15116 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5); 15117 15118 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == 15119 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) 15120 eeprom_phy_serdes = 1; 15121 15122 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); 15123 if (nic_phy_id != 0) { 15124 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; 15125 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; 15126 15127 eeprom_phy_id = (id1 >> 16) << 10; 15128 eeprom_phy_id |= (id2 & 0xfc00) << 16; 15129 eeprom_phy_id |= (id2 & 0x03ff) << 0; 15130 } else 15131 eeprom_phy_id = 0; 15132 15133 tp->phy_id = eeprom_phy_id; 15134 if (eeprom_phy_serdes) { 15135 if (!tg3_flag(tp, 5705_PLUS)) 15136 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15137 else 15138 tp->phy_flags |= TG3_PHYFLG_MII_SERDES; 15139 } 15140 15141 if (tg3_flag(tp, 5750_PLUS)) 15142 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | 15143 SHASTA_EXT_LED_MODE_MASK); 15144 else 15145 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; 15146 15147 switch (led_cfg) { 15148 default: 15149 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: 15150 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15151 break; 15152 15153 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: 15154 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15155 break; 15156 15157 case NIC_SRAM_DATA_CFG_LED_MODE_MAC: 15158 tp->led_ctrl = LED_CTRL_MODE_MAC; 15159 15160 /* Default to PHY_1_MODE if 0 (MAC_MODE) is 15161 * read on some older 5700/5701 bootcode. 15162 */ 15163 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 15164 tg3_asic_rev(tp) == ASIC_REV_5701) 15165 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15166 15167 break; 15168 15169 case SHASTA_EXT_LED_SHARED: 15170 tp->led_ctrl = LED_CTRL_MODE_SHARED; 15171 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 15172 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1) 15173 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15174 LED_CTRL_MODE_PHY_2); 15175 15176 if (tg3_flag(tp, 5717_PLUS) || 15177 tg3_asic_rev(tp) == ASIC_REV_5762) 15178 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE | 15179 LED_CTRL_BLINK_RATE_MASK; 15180 15181 break; 15182 15183 case SHASTA_EXT_LED_MAC: 15184 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; 15185 break; 15186 15187 case SHASTA_EXT_LED_COMBO: 15188 tp->led_ctrl = LED_CTRL_MODE_COMBO; 15189 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) 15190 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15191 LED_CTRL_MODE_PHY_2); 15192 break; 15193 15194 } 15195 15196 if ((tg3_asic_rev(tp) == ASIC_REV_5700 || 15197 tg3_asic_rev(tp) == ASIC_REV_5701) && 15198 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) 15199 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15200 15201 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) 15202 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15203 15204 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { 15205 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15206 if ((tp->pdev->subsystem_vendor == 15207 PCI_VENDOR_ID_ARIMA) && 15208 (tp->pdev->subsystem_device == 0x205a || 15209 tp->pdev->subsystem_device == 0x2063)) 15210 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15211 } else { 15212 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15213 tg3_flag_set(tp, IS_NIC); 15214 } 15215 15216 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 15217 tg3_flag_set(tp, ENABLE_ASF); 15218 if (tg3_flag(tp, 5750_PLUS)) 15219 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 15220 } 15221 15222 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && 15223 tg3_flag(tp, 5750_PLUS)) 15224 tg3_flag_set(tp, ENABLE_APE); 15225 15226 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && 15227 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) 15228 tg3_flag_clear(tp, WOL_CAP); 15229 15230 if (tg3_flag(tp, WOL_CAP) && 15231 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { 15232 tg3_flag_set(tp, WOL_ENABLE); 15233 device_set_wakeup_enable(&tp->pdev->dev, true); 15234 } 15235 15236 if (cfg2 & (1 << 17)) 15237 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; 15238 15239 /* serdes signal pre-emphasis in register 0x590 set by */ 15240 /* bootcode if bit 18 is set */ 15241 if (cfg2 & (1 << 18)) 15242 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; 15243 15244 if ((tg3_flag(tp, 57765_PLUS) || 15245 (tg3_asic_rev(tp) == ASIC_REV_5784 && 15246 tg3_chip_rev(tp) != CHIPREV_5784_AX)) && 15247 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 15248 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; 15249 15250 if (tg3_flag(tp, PCI_EXPRESS)) { 15251 u32 cfg3; 15252 15253 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); 15254 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 15255 !tg3_flag(tp, 57765_PLUS) && 15256 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)) 15257 tg3_flag_set(tp, ASPM_WORKAROUND); 15258 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID) 15259 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 15260 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK) 15261 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 15262 } 15263 15264 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) 15265 tg3_flag_set(tp, RGMII_INBAND_DISABLE); 15266 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) 15267 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); 15268 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) 15269 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); 15270 15271 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV) 15272 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV; 15273 } 15274 done: 15275 if (tg3_flag(tp, WOL_CAP)) 15276 device_set_wakeup_enable(&tp->pdev->dev, 15277 tg3_flag(tp, WOL_ENABLE)); 15278 else 15279 device_set_wakeup_capable(&tp->pdev->dev, false); 15280 } 15281 15282 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val) 15283 { 15284 int i, err; 15285 u32 val2, off = offset * 8; 15286 15287 err = tg3_nvram_lock(tp); 15288 if (err) 15289 return err; 15290 15291 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE); 15292 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN | 15293 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START); 15294 tg3_ape_read32(tp, TG3_APE_OTP_CTRL); 15295 udelay(10); 15296 15297 for (i = 0; i < 100; i++) { 15298 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS); 15299 if (val2 & APE_OTP_STATUS_CMD_DONE) { 15300 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA); 15301 break; 15302 } 15303 udelay(10); 15304 } 15305 15306 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0); 15307 15308 tg3_nvram_unlock(tp); 15309 if (val2 & APE_OTP_STATUS_CMD_DONE) 15310 return 0; 15311 15312 return -EBUSY; 15313 } 15314 15315 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd) 15316 { 15317 int i; 15318 u32 val; 15319 15320 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); 15321 tw32(OTP_CTRL, cmd); 15322 15323 /* Wait for up to 1 ms for command to execute. */ 15324 for (i = 0; i < 100; i++) { 15325 val = tr32(OTP_STATUS); 15326 if (val & OTP_STATUS_CMD_DONE) 15327 break; 15328 udelay(10); 15329 } 15330 15331 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; 15332 } 15333 15334 /* Read the gphy configuration from the OTP region of the chip. The gphy 15335 * configuration is a 32-bit value that straddles the alignment boundary. 15336 * We do two 32-bit reads and then shift and merge the results. 15337 */ 15338 static u32 tg3_read_otp_phycfg(struct tg3 *tp) 15339 { 15340 u32 bhalf_otp, thalf_otp; 15341 15342 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); 15343 15344 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) 15345 return 0; 15346 15347 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); 15348 15349 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15350 return 0; 15351 15352 thalf_otp = tr32(OTP_READ_DATA); 15353 15354 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); 15355 15356 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15357 return 0; 15358 15359 bhalf_otp = tr32(OTP_READ_DATA); 15360 15361 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); 15362 } 15363 15364 static void tg3_phy_init_link_config(struct tg3 *tp) 15365 { 15366 u32 adv = ADVERTISED_Autoneg; 15367 15368 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 15369 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV)) 15370 adv |= ADVERTISED_1000baseT_Half; 15371 adv |= ADVERTISED_1000baseT_Full; 15372 } 15373 15374 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 15375 adv |= ADVERTISED_100baseT_Half | 15376 ADVERTISED_100baseT_Full | 15377 ADVERTISED_10baseT_Half | 15378 ADVERTISED_10baseT_Full | 15379 ADVERTISED_TP; 15380 else 15381 adv |= ADVERTISED_FIBRE; 15382 15383 tp->link_config.advertising = adv; 15384 tp->link_config.speed = SPEED_UNKNOWN; 15385 tp->link_config.duplex = DUPLEX_UNKNOWN; 15386 tp->link_config.autoneg = AUTONEG_ENABLE; 15387 tp->link_config.active_speed = SPEED_UNKNOWN; 15388 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 15389 15390 tp->old_link = -1; 15391 } 15392 15393 static int tg3_phy_probe(struct tg3 *tp) 15394 { 15395 u32 hw_phy_id_1, hw_phy_id_2; 15396 u32 hw_phy_id, hw_phy_id_masked; 15397 int err; 15398 15399 /* flow control autonegotiation is default behavior */ 15400 tg3_flag_set(tp, PAUSE_AUTONEG); 15401 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; 15402 15403 if (tg3_flag(tp, ENABLE_APE)) { 15404 switch (tp->pci_fn) { 15405 case 0: 15406 tp->phy_ape_lock = TG3_APE_LOCK_PHY0; 15407 break; 15408 case 1: 15409 tp->phy_ape_lock = TG3_APE_LOCK_PHY1; 15410 break; 15411 case 2: 15412 tp->phy_ape_lock = TG3_APE_LOCK_PHY2; 15413 break; 15414 case 3: 15415 tp->phy_ape_lock = TG3_APE_LOCK_PHY3; 15416 break; 15417 } 15418 } 15419 15420 if (!tg3_flag(tp, ENABLE_ASF) && 15421 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15422 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 15423 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 15424 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 15425 15426 if (tg3_flag(tp, USE_PHYLIB)) 15427 return tg3_phy_init(tp); 15428 15429 /* Reading the PHY ID register can conflict with ASF 15430 * firmware access to the PHY hardware. 15431 */ 15432 err = 0; 15433 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { 15434 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; 15435 } else { 15436 /* Now read the physical PHY_ID from the chip and verify 15437 * that it is sane. If it doesn't look good, we fall back 15438 * to either the hard-coded table based PHY_ID and failing 15439 * that the value found in the eeprom area. 15440 */ 15441 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); 15442 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); 15443 15444 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; 15445 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; 15446 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; 15447 15448 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; 15449 } 15450 15451 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { 15452 tp->phy_id = hw_phy_id; 15453 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) 15454 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15455 else 15456 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; 15457 } else { 15458 if (tp->phy_id != TG3_PHY_ID_INVALID) { 15459 /* Do nothing, phy ID already set up in 15460 * tg3_get_eeprom_hw_cfg(). 15461 */ 15462 } else { 15463 struct subsys_tbl_ent *p; 15464 15465 /* No eeprom signature? Try the hardcoded 15466 * subsys device table. 15467 */ 15468 p = tg3_lookup_by_subsys(tp); 15469 if (p) { 15470 tp->phy_id = p->phy_id; 15471 } else if (!tg3_flag(tp, IS_SSB_CORE)) { 15472 /* For now we saw the IDs 0xbc050cd0, 15473 * 0xbc050f80 and 0xbc050c30 on devices 15474 * connected to an BCM4785 and there are 15475 * probably more. Just assume that the phy is 15476 * supported when it is connected to a SSB core 15477 * for now. 15478 */ 15479 return -ENODEV; 15480 } 15481 15482 if (!tp->phy_id || 15483 tp->phy_id == TG3_PHY_ID_BCM8002) 15484 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15485 } 15486 } 15487 15488 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15489 (tg3_asic_rev(tp) == ASIC_REV_5719 || 15490 tg3_asic_rev(tp) == ASIC_REV_5720 || 15491 tg3_asic_rev(tp) == ASIC_REV_57766 || 15492 tg3_asic_rev(tp) == ASIC_REV_5762 || 15493 (tg3_asic_rev(tp) == ASIC_REV_5717 && 15494 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || 15495 (tg3_asic_rev(tp) == ASIC_REV_57765 && 15496 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) { 15497 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 15498 15499 tp->eee.supported = SUPPORTED_100baseT_Full | 15500 SUPPORTED_1000baseT_Full; 15501 tp->eee.advertised = ADVERTISED_100baseT_Full | 15502 ADVERTISED_1000baseT_Full; 15503 tp->eee.eee_enabled = 1; 15504 tp->eee.tx_lpi_enabled = 1; 15505 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; 15506 } 15507 15508 tg3_phy_init_link_config(tp); 15509 15510 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 15511 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15512 !tg3_flag(tp, ENABLE_APE) && 15513 !tg3_flag(tp, ENABLE_ASF)) { 15514 u32 bmsr, dummy; 15515 15516 tg3_readphy(tp, MII_BMSR, &bmsr); 15517 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 15518 (bmsr & BMSR_LSTATUS)) 15519 goto skip_phy_reset; 15520 15521 err = tg3_phy_reset(tp); 15522 if (err) 15523 return err; 15524 15525 tg3_phy_set_wirespeed(tp); 15526 15527 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) { 15528 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, 15529 tp->link_config.flowctrl); 15530 15531 tg3_writephy(tp, MII_BMCR, 15532 BMCR_ANENABLE | BMCR_ANRESTART); 15533 } 15534 } 15535 15536 skip_phy_reset: 15537 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 15538 err = tg3_init_5401phy_dsp(tp); 15539 if (err) 15540 return err; 15541 15542 err = tg3_init_5401phy_dsp(tp); 15543 } 15544 15545 return err; 15546 } 15547 15548 static void tg3_read_vpd(struct tg3 *tp) 15549 { 15550 u8 *vpd_data; 15551 unsigned int block_end, rosize, len; 15552 u32 vpdlen; 15553 int j, i = 0; 15554 15555 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); 15556 if (!vpd_data) 15557 goto out_no_vpd; 15558 15559 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA); 15560 if (i < 0) 15561 goto out_not_found; 15562 15563 rosize = pci_vpd_lrdt_size(&vpd_data[i]); 15564 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize; 15565 i += PCI_VPD_LRDT_TAG_SIZE; 15566 15567 if (block_end > vpdlen) 15568 goto out_not_found; 15569 15570 j = pci_vpd_find_info_keyword(vpd_data, i, rosize, 15571 PCI_VPD_RO_KEYWORD_MFR_ID); 15572 if (j > 0) { 15573 len = pci_vpd_info_field_size(&vpd_data[j]); 15574 15575 j += PCI_VPD_INFO_FLD_HDR_SIZE; 15576 if (j + len > block_end || len != 4 || 15577 memcmp(&vpd_data[j], "1028", 4)) 15578 goto partno; 15579 15580 j = pci_vpd_find_info_keyword(vpd_data, i, rosize, 15581 PCI_VPD_RO_KEYWORD_VENDOR0); 15582 if (j < 0) 15583 goto partno; 15584 15585 len = pci_vpd_info_field_size(&vpd_data[j]); 15586 15587 j += PCI_VPD_INFO_FLD_HDR_SIZE; 15588 if (j + len > block_end) 15589 goto partno; 15590 15591 if (len >= sizeof(tp->fw_ver)) 15592 len = sizeof(tp->fw_ver) - 1; 15593 memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); 15594 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, 15595 &vpd_data[j]); 15596 } 15597 15598 partno: 15599 i = pci_vpd_find_info_keyword(vpd_data, i, rosize, 15600 PCI_VPD_RO_KEYWORD_PARTNO); 15601 if (i < 0) 15602 goto out_not_found; 15603 15604 len = pci_vpd_info_field_size(&vpd_data[i]); 15605 15606 i += PCI_VPD_INFO_FLD_HDR_SIZE; 15607 if (len > TG3_BPN_SIZE || 15608 (len + i) > vpdlen) 15609 goto out_not_found; 15610 15611 memcpy(tp->board_part_number, &vpd_data[i], len); 15612 15613 out_not_found: 15614 kfree(vpd_data); 15615 if (tp->board_part_number[0]) 15616 return; 15617 15618 out_no_vpd: 15619 if (tg3_asic_rev(tp) == ASIC_REV_5717) { 15620 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 15621 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C) 15622 strcpy(tp->board_part_number, "BCM5717"); 15623 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) 15624 strcpy(tp->board_part_number, "BCM5718"); 15625 else 15626 goto nomatch; 15627 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) { 15628 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) 15629 strcpy(tp->board_part_number, "BCM57780"); 15630 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) 15631 strcpy(tp->board_part_number, "BCM57760"); 15632 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) 15633 strcpy(tp->board_part_number, "BCM57790"); 15634 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) 15635 strcpy(tp->board_part_number, "BCM57788"); 15636 else 15637 goto nomatch; 15638 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) { 15639 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) 15640 strcpy(tp->board_part_number, "BCM57761"); 15641 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) 15642 strcpy(tp->board_part_number, "BCM57765"); 15643 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) 15644 strcpy(tp->board_part_number, "BCM57781"); 15645 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) 15646 strcpy(tp->board_part_number, "BCM57785"); 15647 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) 15648 strcpy(tp->board_part_number, "BCM57791"); 15649 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) 15650 strcpy(tp->board_part_number, "BCM57795"); 15651 else 15652 goto nomatch; 15653 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) { 15654 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) 15655 strcpy(tp->board_part_number, "BCM57762"); 15656 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) 15657 strcpy(tp->board_part_number, "BCM57766"); 15658 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782) 15659 strcpy(tp->board_part_number, "BCM57782"); 15660 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 15661 strcpy(tp->board_part_number, "BCM57786"); 15662 else 15663 goto nomatch; 15664 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15665 strcpy(tp->board_part_number, "BCM95906"); 15666 } else { 15667 nomatch: 15668 strcpy(tp->board_part_number, "none"); 15669 } 15670 } 15671 15672 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) 15673 { 15674 u32 val; 15675 15676 if (tg3_nvram_read(tp, offset, &val) || 15677 (val & 0xfc000000) != 0x0c000000 || 15678 tg3_nvram_read(tp, offset + 4, &val) || 15679 val != 0) 15680 return 0; 15681 15682 return 1; 15683 } 15684 15685 static void tg3_read_bc_ver(struct tg3 *tp) 15686 { 15687 u32 val, offset, start, ver_offset; 15688 int i, dst_off; 15689 bool newver = false; 15690 15691 if (tg3_nvram_read(tp, 0xc, &offset) || 15692 tg3_nvram_read(tp, 0x4, &start)) 15693 return; 15694 15695 offset = tg3_nvram_logical_addr(tp, offset); 15696 15697 if (tg3_nvram_read(tp, offset, &val)) 15698 return; 15699 15700 if ((val & 0xfc000000) == 0x0c000000) { 15701 if (tg3_nvram_read(tp, offset + 4, &val)) 15702 return; 15703 15704 if (val == 0) 15705 newver = true; 15706 } 15707 15708 dst_off = strlen(tp->fw_ver); 15709 15710 if (newver) { 15711 if (TG3_VER_SIZE - dst_off < 16 || 15712 tg3_nvram_read(tp, offset + 8, &ver_offset)) 15713 return; 15714 15715 offset = offset + ver_offset - start; 15716 for (i = 0; i < 16; i += 4) { 15717 __be32 v; 15718 if (tg3_nvram_read_be32(tp, offset + i, &v)) 15719 return; 15720 15721 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); 15722 } 15723 } else { 15724 u32 major, minor; 15725 15726 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) 15727 return; 15728 15729 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> 15730 TG3_NVM_BCVER_MAJSFT; 15731 minor = ver_offset & TG3_NVM_BCVER_MINMSK; 15732 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, 15733 "v%d.%02d", major, minor); 15734 } 15735 } 15736 15737 static void tg3_read_hwsb_ver(struct tg3 *tp) 15738 { 15739 u32 val, major, minor; 15740 15741 /* Use native endian representation */ 15742 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) 15743 return; 15744 15745 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> 15746 TG3_NVM_HWSB_CFG1_MAJSFT; 15747 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> 15748 TG3_NVM_HWSB_CFG1_MINSFT; 15749 15750 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); 15751 } 15752 15753 static void tg3_read_sb_ver(struct tg3 *tp, u32 val) 15754 { 15755 u32 offset, major, minor, build; 15756 15757 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); 15758 15759 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) 15760 return; 15761 15762 switch (val & TG3_EEPROM_SB_REVISION_MASK) { 15763 case TG3_EEPROM_SB_REVISION_0: 15764 offset = TG3_EEPROM_SB_F1R0_EDH_OFF; 15765 break; 15766 case TG3_EEPROM_SB_REVISION_2: 15767 offset = TG3_EEPROM_SB_F1R2_EDH_OFF; 15768 break; 15769 case TG3_EEPROM_SB_REVISION_3: 15770 offset = TG3_EEPROM_SB_F1R3_EDH_OFF; 15771 break; 15772 case TG3_EEPROM_SB_REVISION_4: 15773 offset = TG3_EEPROM_SB_F1R4_EDH_OFF; 15774 break; 15775 case TG3_EEPROM_SB_REVISION_5: 15776 offset = TG3_EEPROM_SB_F1R5_EDH_OFF; 15777 break; 15778 case TG3_EEPROM_SB_REVISION_6: 15779 offset = TG3_EEPROM_SB_F1R6_EDH_OFF; 15780 break; 15781 default: 15782 return; 15783 } 15784 15785 if (tg3_nvram_read(tp, offset, &val)) 15786 return; 15787 15788 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> 15789 TG3_EEPROM_SB_EDH_BLD_SHFT; 15790 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> 15791 TG3_EEPROM_SB_EDH_MAJ_SHFT; 15792 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; 15793 15794 if (minor > 99 || build > 26) 15795 return; 15796 15797 offset = strlen(tp->fw_ver); 15798 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, 15799 " v%d.%02d", major, minor); 15800 15801 if (build > 0) { 15802 offset = strlen(tp->fw_ver); 15803 if (offset < TG3_VER_SIZE - 1) 15804 tp->fw_ver[offset] = 'a' + build - 1; 15805 } 15806 } 15807 15808 static void tg3_read_mgmtfw_ver(struct tg3 *tp) 15809 { 15810 u32 val, offset, start; 15811 int i, vlen; 15812 15813 for (offset = TG3_NVM_DIR_START; 15814 offset < TG3_NVM_DIR_END; 15815 offset += TG3_NVM_DIRENT_SIZE) { 15816 if (tg3_nvram_read(tp, offset, &val)) 15817 return; 15818 15819 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) 15820 break; 15821 } 15822 15823 if (offset == TG3_NVM_DIR_END) 15824 return; 15825 15826 if (!tg3_flag(tp, 5705_PLUS)) 15827 start = 0x08000000; 15828 else if (tg3_nvram_read(tp, offset - 4, &start)) 15829 return; 15830 15831 if (tg3_nvram_read(tp, offset + 4, &offset) || 15832 !tg3_fw_img_is_valid(tp, offset) || 15833 tg3_nvram_read(tp, offset + 8, &val)) 15834 return; 15835 15836 offset += val - start; 15837 15838 vlen = strlen(tp->fw_ver); 15839 15840 tp->fw_ver[vlen++] = ','; 15841 tp->fw_ver[vlen++] = ' '; 15842 15843 for (i = 0; i < 4; i++) { 15844 __be32 v; 15845 if (tg3_nvram_read_be32(tp, offset, &v)) 15846 return; 15847 15848 offset += sizeof(v); 15849 15850 if (vlen > TG3_VER_SIZE - sizeof(v)) { 15851 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); 15852 break; 15853 } 15854 15855 memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); 15856 vlen += sizeof(v); 15857 } 15858 } 15859 15860 static void tg3_probe_ncsi(struct tg3 *tp) 15861 { 15862 u32 apedata; 15863 15864 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 15865 if (apedata != APE_SEG_SIG_MAGIC) 15866 return; 15867 15868 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 15869 if (!(apedata & APE_FW_STATUS_READY)) 15870 return; 15871 15872 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) 15873 tg3_flag_set(tp, APE_HAS_NCSI); 15874 } 15875 15876 static void tg3_read_dash_ver(struct tg3 *tp) 15877 { 15878 int vlen; 15879 u32 apedata; 15880 char *fwtype; 15881 15882 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); 15883 15884 if (tg3_flag(tp, APE_HAS_NCSI)) 15885 fwtype = "NCSI"; 15886 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725) 15887 fwtype = "SMASH"; 15888 else 15889 fwtype = "DASH"; 15890 15891 vlen = strlen(tp->fw_ver); 15892 15893 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", 15894 fwtype, 15895 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, 15896 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, 15897 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, 15898 (apedata & APE_FW_VERSION_BLDMSK)); 15899 } 15900 15901 static void tg3_read_otp_ver(struct tg3 *tp) 15902 { 15903 u32 val, val2; 15904 15905 if (tg3_asic_rev(tp) != ASIC_REV_5762) 15906 return; 15907 15908 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) && 15909 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) && 15910 TG3_OTP_MAGIC0_VALID(val)) { 15911 u64 val64 = (u64) val << 32 | val2; 15912 u32 ver = 0; 15913 int i, vlen; 15914 15915 for (i = 0; i < 7; i++) { 15916 if ((val64 & 0xff) == 0) 15917 break; 15918 ver = val64 & 0xff; 15919 val64 >>= 8; 15920 } 15921 vlen = strlen(tp->fw_ver); 15922 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver); 15923 } 15924 } 15925 15926 static void tg3_read_fw_ver(struct tg3 *tp) 15927 { 15928 u32 val; 15929 bool vpd_vers = false; 15930 15931 if (tp->fw_ver[0] != 0) 15932 vpd_vers = true; 15933 15934 if (tg3_flag(tp, NO_NVRAM)) { 15935 strcat(tp->fw_ver, "sb"); 15936 tg3_read_otp_ver(tp); 15937 return; 15938 } 15939 15940 if (tg3_nvram_read(tp, 0, &val)) 15941 return; 15942 15943 if (val == TG3_EEPROM_MAGIC) 15944 tg3_read_bc_ver(tp); 15945 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) 15946 tg3_read_sb_ver(tp, val); 15947 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 15948 tg3_read_hwsb_ver(tp); 15949 15950 if (tg3_flag(tp, ENABLE_ASF)) { 15951 if (tg3_flag(tp, ENABLE_APE)) { 15952 tg3_probe_ncsi(tp); 15953 if (!vpd_vers) 15954 tg3_read_dash_ver(tp); 15955 } else if (!vpd_vers) { 15956 tg3_read_mgmtfw_ver(tp); 15957 } 15958 } 15959 15960 tp->fw_ver[TG3_VER_SIZE - 1] = 0; 15961 } 15962 15963 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) 15964 { 15965 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 15966 return TG3_RX_RET_MAX_SIZE_5717; 15967 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) 15968 return TG3_RX_RET_MAX_SIZE_5700; 15969 else 15970 return TG3_RX_RET_MAX_SIZE_5705; 15971 } 15972 15973 static const struct pci_device_id tg3_write_reorder_chipsets[] = { 15974 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 15975 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, 15976 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, 15977 { }, 15978 }; 15979 15980 static struct pci_dev *tg3_find_peer(struct tg3 *tp) 15981 { 15982 struct pci_dev *peer; 15983 unsigned int func, devnr = tp->pdev->devfn & ~7; 15984 15985 for (func = 0; func < 8; func++) { 15986 peer = pci_get_slot(tp->pdev->bus, devnr | func); 15987 if (peer && peer != tp->pdev) 15988 break; 15989 pci_dev_put(peer); 15990 } 15991 /* 5704 can be configured in single-port mode, set peer to 15992 * tp->pdev in that case. 15993 */ 15994 if (!peer) { 15995 peer = tp->pdev; 15996 return peer; 15997 } 15998 15999 /* 16000 * We don't need to keep the refcount elevated; there's no way 16001 * to remove one half of this device without removing the other 16002 */ 16003 pci_dev_put(peer); 16004 16005 return peer; 16006 } 16007 16008 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) 16009 { 16010 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT; 16011 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) { 16012 u32 reg; 16013 16014 /* All devices that use the alternate 16015 * ASIC REV location have a CPMU. 16016 */ 16017 tg3_flag_set(tp, CPMU_PRESENT); 16018 16019 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 16020 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 16021 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 16022 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 16023 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 16024 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 16025 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 16026 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 16027 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 16028 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 16029 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) 16030 reg = TG3PCI_GEN2_PRODID_ASICREV; 16031 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || 16032 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || 16033 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || 16034 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || 16035 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || 16036 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || 16037 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 || 16038 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 || 16039 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 || 16040 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 16041 reg = TG3PCI_GEN15_PRODID_ASICREV; 16042 else 16043 reg = TG3PCI_PRODID_ASICREV; 16044 16045 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id); 16046 } 16047 16048 /* Wrong chip ID in 5752 A0. This code can be removed later 16049 * as A0 is not in production. 16050 */ 16051 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW) 16052 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; 16053 16054 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0) 16055 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0; 16056 16057 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16058 tg3_asic_rev(tp) == ASIC_REV_5719 || 16059 tg3_asic_rev(tp) == ASIC_REV_5720) 16060 tg3_flag_set(tp, 5717_PLUS); 16061 16062 if (tg3_asic_rev(tp) == ASIC_REV_57765 || 16063 tg3_asic_rev(tp) == ASIC_REV_57766) 16064 tg3_flag_set(tp, 57765_CLASS); 16065 16066 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) || 16067 tg3_asic_rev(tp) == ASIC_REV_5762) 16068 tg3_flag_set(tp, 57765_PLUS); 16069 16070 /* Intentionally exclude ASIC_REV_5906 */ 16071 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16072 tg3_asic_rev(tp) == ASIC_REV_5787 || 16073 tg3_asic_rev(tp) == ASIC_REV_5784 || 16074 tg3_asic_rev(tp) == ASIC_REV_5761 || 16075 tg3_asic_rev(tp) == ASIC_REV_5785 || 16076 tg3_asic_rev(tp) == ASIC_REV_57780 || 16077 tg3_flag(tp, 57765_PLUS)) 16078 tg3_flag_set(tp, 5755_PLUS); 16079 16080 if (tg3_asic_rev(tp) == ASIC_REV_5780 || 16081 tg3_asic_rev(tp) == ASIC_REV_5714) 16082 tg3_flag_set(tp, 5780_CLASS); 16083 16084 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16085 tg3_asic_rev(tp) == ASIC_REV_5752 || 16086 tg3_asic_rev(tp) == ASIC_REV_5906 || 16087 tg3_flag(tp, 5755_PLUS) || 16088 tg3_flag(tp, 5780_CLASS)) 16089 tg3_flag_set(tp, 5750_PLUS); 16090 16091 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 16092 tg3_flag(tp, 5750_PLUS)) 16093 tg3_flag_set(tp, 5705_PLUS); 16094 } 16095 16096 static bool tg3_10_100_only_device(struct tg3 *tp, 16097 const struct pci_device_id *ent) 16098 { 16099 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK; 16100 16101 if ((tg3_asic_rev(tp) == ASIC_REV_5703 && 16102 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 16103 (tp->phy_flags & TG3_PHYFLG_IS_FET)) 16104 return true; 16105 16106 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) { 16107 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 16108 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100) 16109 return true; 16110 } else { 16111 return true; 16112 } 16113 } 16114 16115 return false; 16116 } 16117 16118 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) 16119 { 16120 u32 misc_ctrl_reg; 16121 u32 pci_state_reg, grc_misc_cfg; 16122 u32 val; 16123 u16 pci_cmd; 16124 int err; 16125 16126 /* Force memory write invalidate off. If we leave it on, 16127 * then on 5700_BX chips we have to enable a workaround. 16128 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary 16129 * to match the cacheline size. The Broadcom driver have this 16130 * workaround but turns MWI off all the times so never uses 16131 * it. This seems to suggest that the workaround is insufficient. 16132 */ 16133 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16134 pci_cmd &= ~PCI_COMMAND_INVALIDATE; 16135 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16136 16137 /* Important! -- Make sure register accesses are byteswapped 16138 * correctly. Also, for those chips that require it, make 16139 * sure that indirect register accesses are enabled before 16140 * the first operation. 16141 */ 16142 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16143 &misc_ctrl_reg); 16144 tp->misc_host_ctrl |= (misc_ctrl_reg & 16145 MISC_HOST_CTRL_CHIPREV); 16146 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16147 tp->misc_host_ctrl); 16148 16149 tg3_detect_asic_rev(tp, misc_ctrl_reg); 16150 16151 /* If we have 5702/03 A1 or A2 on certain ICH chipsets, 16152 * we need to disable memory and use config. cycles 16153 * only to access all registers. The 5702/03 chips 16154 * can mistakenly decode the special cycles from the 16155 * ICH chipsets as memory write cycles, causing corruption 16156 * of register and memory space. Only certain ICH bridges 16157 * will drive special cycles with non-zero data during the 16158 * address phase which can fall within the 5703's address 16159 * range. This is not an ICH bug as the PCI spec allows 16160 * non-zero address during special cycles. However, only 16161 * these ICH bridges are known to drive non-zero addresses 16162 * during special cycles. 16163 * 16164 * Since special cycles do not cross PCI bridges, we only 16165 * enable this workaround if the 5703 is on the secondary 16166 * bus of these ICH bridges. 16167 */ 16168 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) || 16169 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) { 16170 static struct tg3_dev_id { 16171 u32 vendor; 16172 u32 device; 16173 u32 rev; 16174 } ich_chipsets[] = { 16175 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, 16176 PCI_ANY_ID }, 16177 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, 16178 PCI_ANY_ID }, 16179 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, 16180 0xa }, 16181 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, 16182 PCI_ANY_ID }, 16183 { }, 16184 }; 16185 struct tg3_dev_id *pci_id = &ich_chipsets[0]; 16186 struct pci_dev *bridge = NULL; 16187 16188 while (pci_id->vendor != 0) { 16189 bridge = pci_get_device(pci_id->vendor, pci_id->device, 16190 bridge); 16191 if (!bridge) { 16192 pci_id++; 16193 continue; 16194 } 16195 if (pci_id->rev != PCI_ANY_ID) { 16196 if (bridge->revision > pci_id->rev) 16197 continue; 16198 } 16199 if (bridge->subordinate && 16200 (bridge->subordinate->number == 16201 tp->pdev->bus->number)) { 16202 tg3_flag_set(tp, ICH_WORKAROUND); 16203 pci_dev_put(bridge); 16204 break; 16205 } 16206 } 16207 } 16208 16209 if (tg3_asic_rev(tp) == ASIC_REV_5701) { 16210 static struct tg3_dev_id { 16211 u32 vendor; 16212 u32 device; 16213 } bridge_chipsets[] = { 16214 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, 16215 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, 16216 { }, 16217 }; 16218 struct tg3_dev_id *pci_id = &bridge_chipsets[0]; 16219 struct pci_dev *bridge = NULL; 16220 16221 while (pci_id->vendor != 0) { 16222 bridge = pci_get_device(pci_id->vendor, 16223 pci_id->device, 16224 bridge); 16225 if (!bridge) { 16226 pci_id++; 16227 continue; 16228 } 16229 if (bridge->subordinate && 16230 (bridge->subordinate->number <= 16231 tp->pdev->bus->number) && 16232 (bridge->subordinate->busn_res.end >= 16233 tp->pdev->bus->number)) { 16234 tg3_flag_set(tp, 5701_DMA_BUG); 16235 pci_dev_put(bridge); 16236 break; 16237 } 16238 } 16239 } 16240 16241 /* The EPB bridge inside 5714, 5715, and 5780 cannot support 16242 * DMA addresses > 40-bit. This bridge may have other additional 16243 * 57xx devices behind it in some 4-port NIC designs for example. 16244 * Any tg3 device found behind the bridge will also need the 40-bit 16245 * DMA workaround. 16246 */ 16247 if (tg3_flag(tp, 5780_CLASS)) { 16248 tg3_flag_set(tp, 40BIT_DMA_BUG); 16249 tp->msi_cap = tp->pdev->msi_cap; 16250 } else { 16251 struct pci_dev *bridge = NULL; 16252 16253 do { 16254 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, 16255 PCI_DEVICE_ID_SERVERWORKS_EPB, 16256 bridge); 16257 if (bridge && bridge->subordinate && 16258 (bridge->subordinate->number <= 16259 tp->pdev->bus->number) && 16260 (bridge->subordinate->busn_res.end >= 16261 tp->pdev->bus->number)) { 16262 tg3_flag_set(tp, 40BIT_DMA_BUG); 16263 pci_dev_put(bridge); 16264 break; 16265 } 16266 } while (bridge); 16267 } 16268 16269 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16270 tg3_asic_rev(tp) == ASIC_REV_5714) 16271 tp->pdev_peer = tg3_find_peer(tp); 16272 16273 /* Determine TSO capabilities */ 16274 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0) 16275 ; /* Do nothing. HW bug. */ 16276 else if (tg3_flag(tp, 57765_PLUS)) 16277 tg3_flag_set(tp, HW_TSO_3); 16278 else if (tg3_flag(tp, 5755_PLUS) || 16279 tg3_asic_rev(tp) == ASIC_REV_5906) 16280 tg3_flag_set(tp, HW_TSO_2); 16281 else if (tg3_flag(tp, 5750_PLUS)) { 16282 tg3_flag_set(tp, HW_TSO_1); 16283 tg3_flag_set(tp, TSO_BUG); 16284 if (tg3_asic_rev(tp) == ASIC_REV_5750 && 16285 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2) 16286 tg3_flag_clear(tp, TSO_BUG); 16287 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 && 16288 tg3_asic_rev(tp) != ASIC_REV_5701 && 16289 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 16290 tg3_flag_set(tp, FW_TSO); 16291 tg3_flag_set(tp, TSO_BUG); 16292 if (tg3_asic_rev(tp) == ASIC_REV_5705) 16293 tp->fw_needed = FIRMWARE_TG3TSO5; 16294 else 16295 tp->fw_needed = FIRMWARE_TG3TSO; 16296 } 16297 16298 /* Selectively allow TSO based on operating conditions */ 16299 if (tg3_flag(tp, HW_TSO_1) || 16300 tg3_flag(tp, HW_TSO_2) || 16301 tg3_flag(tp, HW_TSO_3) || 16302 tg3_flag(tp, FW_TSO)) { 16303 /* For firmware TSO, assume ASF is disabled. 16304 * We'll disable TSO later if we discover ASF 16305 * is enabled in tg3_get_eeprom_hw_cfg(). 16306 */ 16307 tg3_flag_set(tp, TSO_CAPABLE); 16308 } else { 16309 tg3_flag_clear(tp, TSO_CAPABLE); 16310 tg3_flag_clear(tp, TSO_BUG); 16311 tp->fw_needed = NULL; 16312 } 16313 16314 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) 16315 tp->fw_needed = FIRMWARE_TG3; 16316 16317 if (tg3_asic_rev(tp) == ASIC_REV_57766) 16318 tp->fw_needed = FIRMWARE_TG357766; 16319 16320 tp->irq_max = 1; 16321 16322 if (tg3_flag(tp, 5750_PLUS)) { 16323 tg3_flag_set(tp, SUPPORT_MSI); 16324 if (tg3_chip_rev(tp) == CHIPREV_5750_AX || 16325 tg3_chip_rev(tp) == CHIPREV_5750_BX || 16326 (tg3_asic_rev(tp) == ASIC_REV_5714 && 16327 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 && 16328 tp->pdev_peer == tp->pdev)) 16329 tg3_flag_clear(tp, SUPPORT_MSI); 16330 16331 if (tg3_flag(tp, 5755_PLUS) || 16332 tg3_asic_rev(tp) == ASIC_REV_5906) { 16333 tg3_flag_set(tp, 1SHOT_MSI); 16334 } 16335 16336 if (tg3_flag(tp, 57765_PLUS)) { 16337 tg3_flag_set(tp, SUPPORT_MSIX); 16338 tp->irq_max = TG3_IRQ_MAX_VECS; 16339 } 16340 } 16341 16342 tp->txq_max = 1; 16343 tp->rxq_max = 1; 16344 if (tp->irq_max > 1) { 16345 tp->rxq_max = TG3_RSS_MAX_NUM_QS; 16346 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS); 16347 16348 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 16349 tg3_asic_rev(tp) == ASIC_REV_5720) 16350 tp->txq_max = tp->irq_max - 1; 16351 } 16352 16353 if (tg3_flag(tp, 5755_PLUS) || 16354 tg3_asic_rev(tp) == ASIC_REV_5906) 16355 tg3_flag_set(tp, SHORT_DMA_BUG); 16356 16357 if (tg3_asic_rev(tp) == ASIC_REV_5719) 16358 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; 16359 16360 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16361 tg3_asic_rev(tp) == ASIC_REV_5719 || 16362 tg3_asic_rev(tp) == ASIC_REV_5720 || 16363 tg3_asic_rev(tp) == ASIC_REV_5762) 16364 tg3_flag_set(tp, LRG_PROD_RING_CAP); 16365 16366 if (tg3_flag(tp, 57765_PLUS) && 16367 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0) 16368 tg3_flag_set(tp, USE_JUMBO_BDFLAG); 16369 16370 if (!tg3_flag(tp, 5705_PLUS) || 16371 tg3_flag(tp, 5780_CLASS) || 16372 tg3_flag(tp, USE_JUMBO_BDFLAG)) 16373 tg3_flag_set(tp, JUMBO_CAPABLE); 16374 16375 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16376 &pci_state_reg); 16377 16378 if (pci_is_pcie(tp->pdev)) { 16379 u16 lnkctl; 16380 16381 tg3_flag_set(tp, PCI_EXPRESS); 16382 16383 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl); 16384 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { 16385 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16386 tg3_flag_clear(tp, HW_TSO_2); 16387 tg3_flag_clear(tp, TSO_CAPABLE); 16388 } 16389 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 16390 tg3_asic_rev(tp) == ASIC_REV_5761 || 16391 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 || 16392 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1) 16393 tg3_flag_set(tp, CLKREQ_BUG); 16394 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) { 16395 tg3_flag_set(tp, L1PLLPD_EN); 16396 } 16397 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) { 16398 /* BCM5785 devices are effectively PCIe devices, and should 16399 * follow PCIe codepaths, but do not have a PCIe capabilities 16400 * section. 16401 */ 16402 tg3_flag_set(tp, PCI_EXPRESS); 16403 } else if (!tg3_flag(tp, 5705_PLUS) || 16404 tg3_flag(tp, 5780_CLASS)) { 16405 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); 16406 if (!tp->pcix_cap) { 16407 dev_err(&tp->pdev->dev, 16408 "Cannot find PCI-X capability, aborting\n"); 16409 return -EIO; 16410 } 16411 16412 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) 16413 tg3_flag_set(tp, PCIX_MODE); 16414 } 16415 16416 /* If we have an AMD 762 or VIA K8T800 chipset, write 16417 * reordering to the mailbox registers done by the host 16418 * controller can cause major troubles. We read back from 16419 * every mailbox register write to force the writes to be 16420 * posted to the chip in order. 16421 */ 16422 if (pci_dev_present(tg3_write_reorder_chipsets) && 16423 !tg3_flag(tp, PCI_EXPRESS)) 16424 tg3_flag_set(tp, MBOX_WRITE_REORDER); 16425 16426 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 16427 &tp->pci_cacheline_sz); 16428 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16429 &tp->pci_lat_timer); 16430 if (tg3_asic_rev(tp) == ASIC_REV_5703 && 16431 tp->pci_lat_timer < 64) { 16432 tp->pci_lat_timer = 64; 16433 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16434 tp->pci_lat_timer); 16435 } 16436 16437 /* Important! -- It is critical that the PCI-X hw workaround 16438 * situation is decided before the first MMIO register access. 16439 */ 16440 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) { 16441 /* 5700 BX chips need to have their TX producer index 16442 * mailboxes written twice to workaround a bug. 16443 */ 16444 tg3_flag_set(tp, TXD_MBOX_HWBUG); 16445 16446 /* If we are in PCI-X mode, enable register write workaround. 16447 * 16448 * The workaround is to use indirect register accesses 16449 * for all chip writes not to mailbox registers. 16450 */ 16451 if (tg3_flag(tp, PCIX_MODE)) { 16452 u32 pm_reg; 16453 16454 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16455 16456 /* The chip can have it's power management PCI config 16457 * space registers clobbered due to this bug. 16458 * So explicitly force the chip into D0 here. 16459 */ 16460 pci_read_config_dword(tp->pdev, 16461 tp->pdev->pm_cap + PCI_PM_CTRL, 16462 &pm_reg); 16463 pm_reg &= ~PCI_PM_CTRL_STATE_MASK; 16464 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; 16465 pci_write_config_dword(tp->pdev, 16466 tp->pdev->pm_cap + PCI_PM_CTRL, 16467 pm_reg); 16468 16469 /* Also, force SERR#/PERR# in PCI command. */ 16470 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16471 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; 16472 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16473 } 16474 } 16475 16476 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) 16477 tg3_flag_set(tp, PCI_HIGH_SPEED); 16478 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) 16479 tg3_flag_set(tp, PCI_32BIT); 16480 16481 /* Chip-specific fixup from Broadcom driver */ 16482 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) && 16483 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { 16484 pci_state_reg |= PCISTATE_RETRY_SAME_DMA; 16485 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); 16486 } 16487 16488 /* Default fast path register access methods */ 16489 tp->read32 = tg3_read32; 16490 tp->write32 = tg3_write32; 16491 tp->read32_mbox = tg3_read32; 16492 tp->write32_mbox = tg3_write32; 16493 tp->write32_tx_mbox = tg3_write32; 16494 tp->write32_rx_mbox = tg3_write32; 16495 16496 /* Various workaround register access methods */ 16497 if (tg3_flag(tp, PCIX_TARGET_HWBUG)) 16498 tp->write32 = tg3_write_indirect_reg32; 16499 else if (tg3_asic_rev(tp) == ASIC_REV_5701 || 16500 (tg3_flag(tp, PCI_EXPRESS) && 16501 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) { 16502 /* 16503 * Back to back register writes can cause problems on these 16504 * chips, the workaround is to read back all reg writes 16505 * except those to mailbox regs. 16506 * 16507 * See tg3_write_indirect_reg32(). 16508 */ 16509 tp->write32 = tg3_write_flush_reg32; 16510 } 16511 16512 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { 16513 tp->write32_tx_mbox = tg3_write32_tx_mbox; 16514 if (tg3_flag(tp, MBOX_WRITE_REORDER)) 16515 tp->write32_rx_mbox = tg3_write_flush_reg32; 16516 } 16517 16518 if (tg3_flag(tp, ICH_WORKAROUND)) { 16519 tp->read32 = tg3_read_indirect_reg32; 16520 tp->write32 = tg3_write_indirect_reg32; 16521 tp->read32_mbox = tg3_read_indirect_mbox; 16522 tp->write32_mbox = tg3_write_indirect_mbox; 16523 tp->write32_tx_mbox = tg3_write_indirect_mbox; 16524 tp->write32_rx_mbox = tg3_write_indirect_mbox; 16525 16526 iounmap(tp->regs); 16527 tp->regs = NULL; 16528 16529 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16530 pci_cmd &= ~PCI_COMMAND_MEMORY; 16531 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16532 } 16533 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16534 tp->read32_mbox = tg3_read32_mbox_5906; 16535 tp->write32_mbox = tg3_write32_mbox_5906; 16536 tp->write32_tx_mbox = tg3_write32_mbox_5906; 16537 tp->write32_rx_mbox = tg3_write32_mbox_5906; 16538 } 16539 16540 if (tp->write32 == tg3_write_indirect_reg32 || 16541 (tg3_flag(tp, PCIX_MODE) && 16542 (tg3_asic_rev(tp) == ASIC_REV_5700 || 16543 tg3_asic_rev(tp) == ASIC_REV_5701))) 16544 tg3_flag_set(tp, SRAM_USE_CONFIG); 16545 16546 /* The memory arbiter has to be enabled in order for SRAM accesses 16547 * to succeed. Normally on powerup the tg3 chip firmware will make 16548 * sure it is enabled, but other entities such as system netboot 16549 * code might disable it. 16550 */ 16551 val = tr32(MEMARB_MODE); 16552 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 16553 16554 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; 16555 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16556 tg3_flag(tp, 5780_CLASS)) { 16557 if (tg3_flag(tp, PCIX_MODE)) { 16558 pci_read_config_dword(tp->pdev, 16559 tp->pcix_cap + PCI_X_STATUS, 16560 &val); 16561 tp->pci_fn = val & 0x7; 16562 } 16563 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16564 tg3_asic_rev(tp) == ASIC_REV_5719 || 16565 tg3_asic_rev(tp) == ASIC_REV_5720) { 16566 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); 16567 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG) 16568 val = tr32(TG3_CPMU_STATUS); 16569 16570 if (tg3_asic_rev(tp) == ASIC_REV_5717) 16571 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0; 16572 else 16573 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >> 16574 TG3_CPMU_STATUS_FSHFT_5719; 16575 } 16576 16577 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 16578 tp->write32_tx_mbox = tg3_write_flush_reg32; 16579 tp->write32_rx_mbox = tg3_write_flush_reg32; 16580 } 16581 16582 /* Get eeprom hw config before calling tg3_set_power_state(). 16583 * In particular, the TG3_FLAG_IS_NIC flag must be 16584 * determined before calling tg3_set_power_state() so that 16585 * we know whether or not to switch out of Vaux power. 16586 * When the flag is set, it means that GPIO1 is used for eeprom 16587 * write protect and also implies that it is a LOM where GPIOs 16588 * are not used to switch power. 16589 */ 16590 tg3_get_eeprom_hw_cfg(tp); 16591 16592 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) { 16593 tg3_flag_clear(tp, TSO_CAPABLE); 16594 tg3_flag_clear(tp, TSO_BUG); 16595 tp->fw_needed = NULL; 16596 } 16597 16598 if (tg3_flag(tp, ENABLE_APE)) { 16599 /* Allow reads and writes to the 16600 * APE register and memory space. 16601 */ 16602 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | 16603 PCISTATE_ALLOW_APE_SHMEM_WR | 16604 PCISTATE_ALLOW_APE_PSPACE_WR; 16605 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, 16606 pci_state_reg); 16607 16608 tg3_ape_lock_init(tp); 16609 } 16610 16611 /* Set up tp->grc_local_ctrl before calling 16612 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high 16613 * will bring 5700's external PHY out of reset. 16614 * It is also used as eeprom write protect on LOMs. 16615 */ 16616 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; 16617 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16618 tg3_flag(tp, EEPROM_WRITE_PROT)) 16619 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 16620 GRC_LCLCTRL_GPIO_OUTPUT1); 16621 /* Unused GPIO3 must be driven as output on 5752 because there 16622 * are no pull-up resistors on unused GPIO pins. 16623 */ 16624 else if (tg3_asic_rev(tp) == ASIC_REV_5752) 16625 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 16626 16627 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16628 tg3_asic_rev(tp) == ASIC_REV_57780 || 16629 tg3_flag(tp, 57765_CLASS)) 16630 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16631 16632 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 16633 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 16634 /* Turn off the debug UART. */ 16635 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16636 if (tg3_flag(tp, IS_NIC)) 16637 /* Keep VMain power. */ 16638 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 16639 GRC_LCLCTRL_GPIO_OUTPUT0; 16640 } 16641 16642 if (tg3_asic_rev(tp) == ASIC_REV_5762) 16643 tp->grc_local_ctrl |= 16644 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL; 16645 16646 /* Switch out of Vaux if it is a NIC */ 16647 tg3_pwrsrc_switch_to_vmain(tp); 16648 16649 /* Derive initial jumbo mode from MTU assigned in 16650 * ether_setup() via the alloc_etherdev() call 16651 */ 16652 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) 16653 tg3_flag_set(tp, JUMBO_RING_ENABLE); 16654 16655 /* Determine WakeOnLan speed to use. */ 16656 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16657 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16658 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16659 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) { 16660 tg3_flag_clear(tp, WOL_SPEED_100MB); 16661 } else { 16662 tg3_flag_set(tp, WOL_SPEED_100MB); 16663 } 16664 16665 if (tg3_asic_rev(tp) == ASIC_REV_5906) 16666 tp->phy_flags |= TG3_PHYFLG_IS_FET; 16667 16668 /* A few boards don't want Ethernet@WireSpeed phy feature */ 16669 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16670 (tg3_asic_rev(tp) == ASIC_REV_5705 && 16671 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) && 16672 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) || 16673 (tp->phy_flags & TG3_PHYFLG_IS_FET) || 16674 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 16675 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; 16676 16677 if (tg3_chip_rev(tp) == CHIPREV_5703_AX || 16678 tg3_chip_rev(tp) == CHIPREV_5704_AX) 16679 tp->phy_flags |= TG3_PHYFLG_ADC_BUG; 16680 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) 16681 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; 16682 16683 if (tg3_flag(tp, 5705_PLUS) && 16684 !(tp->phy_flags & TG3_PHYFLG_IS_FET) && 16685 tg3_asic_rev(tp) != ASIC_REV_5785 && 16686 tg3_asic_rev(tp) != ASIC_REV_57780 && 16687 !tg3_flag(tp, 57765_PLUS)) { 16688 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16689 tg3_asic_rev(tp) == ASIC_REV_5787 || 16690 tg3_asic_rev(tp) == ASIC_REV_5784 || 16691 tg3_asic_rev(tp) == ASIC_REV_5761) { 16692 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && 16693 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) 16694 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; 16695 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) 16696 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; 16697 } else 16698 tp->phy_flags |= TG3_PHYFLG_BER_BUG; 16699 } 16700 16701 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 16702 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 16703 tp->phy_otp = tg3_read_otp_phycfg(tp); 16704 if (tp->phy_otp == 0) 16705 tp->phy_otp = TG3_OTP_DEFAULT; 16706 } 16707 16708 if (tg3_flag(tp, CPMU_PRESENT)) 16709 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; 16710 else 16711 tp->mi_mode = MAC_MI_MODE_BASE; 16712 16713 tp->coalesce_mode = 0; 16714 if (tg3_chip_rev(tp) != CHIPREV_5700_AX && 16715 tg3_chip_rev(tp) != CHIPREV_5700_BX) 16716 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 16717 16718 /* Set these bits to enable statistics workaround. */ 16719 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16720 tg3_asic_rev(tp) == ASIC_REV_5762 || 16721 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 16722 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) { 16723 tp->coalesce_mode |= HOSTCC_MODE_ATTN; 16724 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; 16725 } 16726 16727 if (tg3_asic_rev(tp) == ASIC_REV_5785 || 16728 tg3_asic_rev(tp) == ASIC_REV_57780) 16729 tg3_flag_set(tp, USE_PHYLIB); 16730 16731 err = tg3_mdio_init(tp); 16732 if (err) 16733 return err; 16734 16735 /* Initialize data/descriptor byte/word swapping. */ 16736 val = tr32(GRC_MODE); 16737 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 16738 tg3_asic_rev(tp) == ASIC_REV_5762) 16739 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | 16740 GRC_MODE_WORD_SWAP_B2HRX_DATA | 16741 GRC_MODE_B2HRX_ENABLE | 16742 GRC_MODE_HTX2B_ENABLE | 16743 GRC_MODE_HOST_STACKUP); 16744 else 16745 val &= GRC_MODE_HOST_STACKUP; 16746 16747 tw32(GRC_MODE, val | tp->grc_mode); 16748 16749 tg3_switch_clocks(tp); 16750 16751 /* Clear this out for sanity. */ 16752 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 16753 16754 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ 16755 tw32(TG3PCI_REG_BASE_ADDR, 0); 16756 16757 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16758 &pci_state_reg); 16759 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && 16760 !tg3_flag(tp, PCIX_TARGET_HWBUG)) { 16761 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16762 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16763 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 || 16764 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) { 16765 void __iomem *sram_base; 16766 16767 /* Write some dummy words into the SRAM status block 16768 * area, see if it reads back correctly. If the return 16769 * value is bad, force enable the PCIX workaround. 16770 */ 16771 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; 16772 16773 writel(0x00000000, sram_base); 16774 writel(0x00000000, sram_base + 4); 16775 writel(0xffffffff, sram_base + 4); 16776 if (readl(sram_base) != 0x00000000) 16777 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16778 } 16779 } 16780 16781 udelay(50); 16782 tg3_nvram_init(tp); 16783 16784 /* If the device has an NVRAM, no need to load patch firmware */ 16785 if (tg3_asic_rev(tp) == ASIC_REV_57766 && 16786 !tg3_flag(tp, NO_NVRAM)) 16787 tp->fw_needed = NULL; 16788 16789 grc_misc_cfg = tr32(GRC_MISC_CFG); 16790 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; 16791 16792 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 16793 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || 16794 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 16795 tg3_flag_set(tp, IS_5788); 16796 16797 if (!tg3_flag(tp, IS_5788) && 16798 tg3_asic_rev(tp) != ASIC_REV_5700) 16799 tg3_flag_set(tp, TAGGED_STATUS); 16800 if (tg3_flag(tp, TAGGED_STATUS)) { 16801 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | 16802 HOSTCC_MODE_CLRTICK_TXBD); 16803 16804 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; 16805 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16806 tp->misc_host_ctrl); 16807 } 16808 16809 /* Preserve the APE MAC_MODE bits */ 16810 if (tg3_flag(tp, ENABLE_APE)) 16811 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 16812 else 16813 tp->mac_mode = 0; 16814 16815 if (tg3_10_100_only_device(tp, ent)) 16816 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; 16817 16818 err = tg3_phy_probe(tp); 16819 if (err) { 16820 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); 16821 /* ... but do not return immediately ... */ 16822 tg3_mdio_fini(tp); 16823 } 16824 16825 tg3_read_vpd(tp); 16826 tg3_read_fw_ver(tp); 16827 16828 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 16829 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16830 } else { 16831 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16832 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16833 else 16834 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16835 } 16836 16837 /* 5700 {AX,BX} chips have a broken status block link 16838 * change bit implementation, so we must use the 16839 * status register in those cases. 16840 */ 16841 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16842 tg3_flag_set(tp, USE_LINKCHG_REG); 16843 else 16844 tg3_flag_clear(tp, USE_LINKCHG_REG); 16845 16846 /* The led_ctrl is set during tg3_phy_probe, here we might 16847 * have to force the link status polling mechanism based 16848 * upon subsystem IDs. 16849 */ 16850 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 16851 tg3_asic_rev(tp) == ASIC_REV_5701 && 16852 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 16853 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16854 tg3_flag_set(tp, USE_LINKCHG_REG); 16855 } 16856 16857 /* For all SERDES we poll the MAC status register. */ 16858 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 16859 tg3_flag_set(tp, POLL_SERDES); 16860 else 16861 tg3_flag_clear(tp, POLL_SERDES); 16862 16863 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF)) 16864 tg3_flag_set(tp, POLL_CPMU_LINK); 16865 16866 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN; 16867 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; 16868 if (tg3_asic_rev(tp) == ASIC_REV_5701 && 16869 tg3_flag(tp, PCIX_MODE)) { 16870 tp->rx_offset = NET_SKB_PAD; 16871 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 16872 tp->rx_copy_thresh = ~(u16)0; 16873 #endif 16874 } 16875 16876 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; 16877 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; 16878 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; 16879 16880 tp->rx_std_max_post = tp->rx_std_ring_mask + 1; 16881 16882 /* Increment the rx prod index on the rx std ring by at most 16883 * 8 for these chips to workaround hw errata. 16884 */ 16885 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16886 tg3_asic_rev(tp) == ASIC_REV_5752 || 16887 tg3_asic_rev(tp) == ASIC_REV_5755) 16888 tp->rx_std_max_post = 8; 16889 16890 if (tg3_flag(tp, ASPM_WORKAROUND)) 16891 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & 16892 PCIE_PWR_MGMT_L1_THRESH_MSK; 16893 16894 return err; 16895 } 16896 16897 #ifdef CONFIG_SPARC 16898 static int tg3_get_macaddr_sparc(struct tg3 *tp) 16899 { 16900 struct net_device *dev = tp->dev; 16901 struct pci_dev *pdev = tp->pdev; 16902 struct device_node *dp = pci_device_to_OF_node(pdev); 16903 const unsigned char *addr; 16904 int len; 16905 16906 addr = of_get_property(dp, "local-mac-address", &len); 16907 if (addr && len == ETH_ALEN) { 16908 memcpy(dev->dev_addr, addr, ETH_ALEN); 16909 return 0; 16910 } 16911 return -ENODEV; 16912 } 16913 16914 static int tg3_get_default_macaddr_sparc(struct tg3 *tp) 16915 { 16916 struct net_device *dev = tp->dev; 16917 16918 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); 16919 return 0; 16920 } 16921 #endif 16922 16923 static int tg3_get_device_address(struct tg3 *tp) 16924 { 16925 struct net_device *dev = tp->dev; 16926 u32 hi, lo, mac_offset; 16927 int addr_ok = 0; 16928 int err; 16929 16930 #ifdef CONFIG_SPARC 16931 if (!tg3_get_macaddr_sparc(tp)) 16932 return 0; 16933 #endif 16934 16935 if (tg3_flag(tp, IS_SSB_CORE)) { 16936 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]); 16937 if (!err && is_valid_ether_addr(&dev->dev_addr[0])) 16938 return 0; 16939 } 16940 16941 mac_offset = 0x7c; 16942 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16943 tg3_flag(tp, 5780_CLASS)) { 16944 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 16945 mac_offset = 0xcc; 16946 if (tg3_nvram_lock(tp)) 16947 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); 16948 else 16949 tg3_nvram_unlock(tp); 16950 } else if (tg3_flag(tp, 5717_PLUS)) { 16951 if (tp->pci_fn & 1) 16952 mac_offset = 0xcc; 16953 if (tp->pci_fn > 1) 16954 mac_offset += 0x18c; 16955 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) 16956 mac_offset = 0x10; 16957 16958 /* First try to get it from MAC address mailbox. */ 16959 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); 16960 if ((hi >> 16) == 0x484b) { 16961 dev->dev_addr[0] = (hi >> 8) & 0xff; 16962 dev->dev_addr[1] = (hi >> 0) & 0xff; 16963 16964 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); 16965 dev->dev_addr[2] = (lo >> 24) & 0xff; 16966 dev->dev_addr[3] = (lo >> 16) & 0xff; 16967 dev->dev_addr[4] = (lo >> 8) & 0xff; 16968 dev->dev_addr[5] = (lo >> 0) & 0xff; 16969 16970 /* Some old bootcode may report a 0 MAC address in SRAM */ 16971 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]); 16972 } 16973 if (!addr_ok) { 16974 /* Next, try NVRAM. */ 16975 if (!tg3_flag(tp, NO_NVRAM) && 16976 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && 16977 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { 16978 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); 16979 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); 16980 } 16981 /* Finally just fetch it out of the MAC control regs. */ 16982 else { 16983 hi = tr32(MAC_ADDR_0_HIGH); 16984 lo = tr32(MAC_ADDR_0_LOW); 16985 16986 dev->dev_addr[5] = lo & 0xff; 16987 dev->dev_addr[4] = (lo >> 8) & 0xff; 16988 dev->dev_addr[3] = (lo >> 16) & 0xff; 16989 dev->dev_addr[2] = (lo >> 24) & 0xff; 16990 dev->dev_addr[1] = hi & 0xff; 16991 dev->dev_addr[0] = (hi >> 8) & 0xff; 16992 } 16993 } 16994 16995 if (!is_valid_ether_addr(&dev->dev_addr[0])) { 16996 #ifdef CONFIG_SPARC 16997 if (!tg3_get_default_macaddr_sparc(tp)) 16998 return 0; 16999 #endif 17000 return -EINVAL; 17001 } 17002 return 0; 17003 } 17004 17005 #define BOUNDARY_SINGLE_CACHELINE 1 17006 #define BOUNDARY_MULTI_CACHELINE 2 17007 17008 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) 17009 { 17010 int cacheline_size; 17011 u8 byte; 17012 int goal; 17013 17014 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); 17015 if (byte == 0) 17016 cacheline_size = 1024; 17017 else 17018 cacheline_size = (int) byte * 4; 17019 17020 /* On 5703 and later chips, the boundary bits have no 17021 * effect. 17022 */ 17023 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17024 tg3_asic_rev(tp) != ASIC_REV_5701 && 17025 !tg3_flag(tp, PCI_EXPRESS)) 17026 goto out; 17027 17028 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) 17029 goal = BOUNDARY_MULTI_CACHELINE; 17030 #else 17031 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) 17032 goal = BOUNDARY_SINGLE_CACHELINE; 17033 #else 17034 goal = 0; 17035 #endif 17036 #endif 17037 17038 if (tg3_flag(tp, 57765_PLUS)) { 17039 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 17040 goto out; 17041 } 17042 17043 if (!goal) 17044 goto out; 17045 17046 /* PCI controllers on most RISC systems tend to disconnect 17047 * when a device tries to burst across a cache-line boundary. 17048 * Therefore, letting tg3 do so just wastes PCI bandwidth. 17049 * 17050 * Unfortunately, for PCI-E there are only limited 17051 * write-side controls for this, and thus for reads 17052 * we will still get the disconnects. We'll also waste 17053 * these PCI cycles for both read and write for chips 17054 * other than 5700 and 5701 which do not implement the 17055 * boundary bits. 17056 */ 17057 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { 17058 switch (cacheline_size) { 17059 case 16: 17060 case 32: 17061 case 64: 17062 case 128: 17063 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17064 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | 17065 DMA_RWCTRL_WRITE_BNDRY_128_PCIX); 17066 } else { 17067 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17068 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17069 } 17070 break; 17071 17072 case 256: 17073 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | 17074 DMA_RWCTRL_WRITE_BNDRY_256_PCIX); 17075 break; 17076 17077 default: 17078 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17079 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17080 break; 17081 } 17082 } else if (tg3_flag(tp, PCI_EXPRESS)) { 17083 switch (cacheline_size) { 17084 case 16: 17085 case 32: 17086 case 64: 17087 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17088 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17089 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; 17090 break; 17091 } 17092 /* fallthrough */ 17093 case 128: 17094 default: 17095 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17096 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; 17097 break; 17098 } 17099 } else { 17100 switch (cacheline_size) { 17101 case 16: 17102 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17103 val |= (DMA_RWCTRL_READ_BNDRY_16 | 17104 DMA_RWCTRL_WRITE_BNDRY_16); 17105 break; 17106 } 17107 /* fallthrough */ 17108 case 32: 17109 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17110 val |= (DMA_RWCTRL_READ_BNDRY_32 | 17111 DMA_RWCTRL_WRITE_BNDRY_32); 17112 break; 17113 } 17114 /* fallthrough */ 17115 case 64: 17116 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17117 val |= (DMA_RWCTRL_READ_BNDRY_64 | 17118 DMA_RWCTRL_WRITE_BNDRY_64); 17119 break; 17120 } 17121 /* fallthrough */ 17122 case 128: 17123 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17124 val |= (DMA_RWCTRL_READ_BNDRY_128 | 17125 DMA_RWCTRL_WRITE_BNDRY_128); 17126 break; 17127 } 17128 /* fallthrough */ 17129 case 256: 17130 val |= (DMA_RWCTRL_READ_BNDRY_256 | 17131 DMA_RWCTRL_WRITE_BNDRY_256); 17132 break; 17133 case 512: 17134 val |= (DMA_RWCTRL_READ_BNDRY_512 | 17135 DMA_RWCTRL_WRITE_BNDRY_512); 17136 break; 17137 case 1024: 17138 default: 17139 val |= (DMA_RWCTRL_READ_BNDRY_1024 | 17140 DMA_RWCTRL_WRITE_BNDRY_1024); 17141 break; 17142 } 17143 } 17144 17145 out: 17146 return val; 17147 } 17148 17149 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, 17150 int size, bool to_device) 17151 { 17152 struct tg3_internal_buffer_desc test_desc; 17153 u32 sram_dma_descs; 17154 int i, ret; 17155 17156 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; 17157 17158 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); 17159 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); 17160 tw32(RDMAC_STATUS, 0); 17161 tw32(WDMAC_STATUS, 0); 17162 17163 tw32(BUFMGR_MODE, 0); 17164 tw32(FTQ_RESET, 0); 17165 17166 test_desc.addr_hi = ((u64) buf_dma) >> 32; 17167 test_desc.addr_lo = buf_dma & 0xffffffff; 17168 test_desc.nic_mbuf = 0x00002100; 17169 test_desc.len = size; 17170 17171 /* 17172 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz 17173 * the *second* time the tg3 driver was getting loaded after an 17174 * initial scan. 17175 * 17176 * Broadcom tells me: 17177 * ...the DMA engine is connected to the GRC block and a DMA 17178 * reset may affect the GRC block in some unpredictable way... 17179 * The behavior of resets to individual blocks has not been tested. 17180 * 17181 * Broadcom noted the GRC reset will also reset all sub-components. 17182 */ 17183 if (to_device) { 17184 test_desc.cqid_sqid = (13 << 8) | 2; 17185 17186 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); 17187 udelay(40); 17188 } else { 17189 test_desc.cqid_sqid = (16 << 8) | 7; 17190 17191 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); 17192 udelay(40); 17193 } 17194 test_desc.flags = 0x00000005; 17195 17196 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { 17197 u32 val; 17198 17199 val = *(((u32 *)&test_desc) + i); 17200 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 17201 sram_dma_descs + (i * sizeof(u32))); 17202 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 17203 } 17204 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 17205 17206 if (to_device) 17207 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); 17208 else 17209 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); 17210 17211 ret = -ENODEV; 17212 for (i = 0; i < 40; i++) { 17213 u32 val; 17214 17215 if (to_device) 17216 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); 17217 else 17218 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); 17219 if ((val & 0xffff) == sram_dma_descs) { 17220 ret = 0; 17221 break; 17222 } 17223 17224 udelay(100); 17225 } 17226 17227 return ret; 17228 } 17229 17230 #define TEST_BUFFER_SIZE 0x2000 17231 17232 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = { 17233 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, 17234 { }, 17235 }; 17236 17237 static int tg3_test_dma(struct tg3 *tp) 17238 { 17239 dma_addr_t buf_dma; 17240 u32 *buf, saved_dma_rwctrl; 17241 int ret = 0; 17242 17243 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, 17244 &buf_dma, GFP_KERNEL); 17245 if (!buf) { 17246 ret = -ENOMEM; 17247 goto out_nofree; 17248 } 17249 17250 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 17251 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 17252 17253 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 17254 17255 if (tg3_flag(tp, 57765_PLUS)) 17256 goto out; 17257 17258 if (tg3_flag(tp, PCI_EXPRESS)) { 17259 /* DMA read watermark not used on PCIE */ 17260 tp->dma_rwctrl |= 0x00180000; 17261 } else if (!tg3_flag(tp, PCIX_MODE)) { 17262 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 17263 tg3_asic_rev(tp) == ASIC_REV_5750) 17264 tp->dma_rwctrl |= 0x003f0000; 17265 else 17266 tp->dma_rwctrl |= 0x003f000f; 17267 } else { 17268 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17269 tg3_asic_rev(tp) == ASIC_REV_5704) { 17270 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); 17271 u32 read_water = 0x7; 17272 17273 /* If the 5704 is behind the EPB bridge, we can 17274 * do the less restrictive ONE_DMA workaround for 17275 * better performance. 17276 */ 17277 if (tg3_flag(tp, 40BIT_DMA_BUG) && 17278 tg3_asic_rev(tp) == ASIC_REV_5704) 17279 tp->dma_rwctrl |= 0x8000; 17280 else if (ccval == 0x6 || ccval == 0x7) 17281 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17282 17283 if (tg3_asic_rev(tp) == ASIC_REV_5703) 17284 read_water = 4; 17285 /* Set bit 23 to enable PCIX hw bug fix */ 17286 tp->dma_rwctrl |= 17287 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | 17288 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | 17289 (1 << 23); 17290 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) { 17291 /* 5780 always in PCIX mode */ 17292 tp->dma_rwctrl |= 0x00144000; 17293 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) { 17294 /* 5714 always in PCIX mode */ 17295 tp->dma_rwctrl |= 0x00148000; 17296 } else { 17297 tp->dma_rwctrl |= 0x001b000f; 17298 } 17299 } 17300 if (tg3_flag(tp, ONE_DMA_AT_ONCE)) 17301 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17302 17303 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17304 tg3_asic_rev(tp) == ASIC_REV_5704) 17305 tp->dma_rwctrl &= 0xfffffff0; 17306 17307 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 17308 tg3_asic_rev(tp) == ASIC_REV_5701) { 17309 /* Remove this if it causes problems for some boards. */ 17310 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; 17311 17312 /* On 5700/5701 chips, we need to set this bit. 17313 * Otherwise the chip will issue cacheline transactions 17314 * to streamable DMA memory with not all the byte 17315 * enables turned on. This is an error on several 17316 * RISC PCI controllers, in particular sparc64. 17317 * 17318 * On 5703/5704 chips, this bit has been reassigned 17319 * a different meaning. In particular, it is used 17320 * on those chips to enable a PCI-X workaround. 17321 */ 17322 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; 17323 } 17324 17325 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17326 17327 17328 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17329 tg3_asic_rev(tp) != ASIC_REV_5701) 17330 goto out; 17331 17332 /* It is best to perform DMA test with maximum write burst size 17333 * to expose the 5700/5701 write DMA bug. 17334 */ 17335 saved_dma_rwctrl = tp->dma_rwctrl; 17336 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17337 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17338 17339 while (1) { 17340 u32 *p = buf, i; 17341 17342 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) 17343 p[i] = i; 17344 17345 /* Send the buffer to the chip. */ 17346 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true); 17347 if (ret) { 17348 dev_err(&tp->pdev->dev, 17349 "%s: Buffer write failed. err = %d\n", 17350 __func__, ret); 17351 break; 17352 } 17353 17354 /* Now read it back. */ 17355 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false); 17356 if (ret) { 17357 dev_err(&tp->pdev->dev, "%s: Buffer read failed. " 17358 "err = %d\n", __func__, ret); 17359 break; 17360 } 17361 17362 /* Verify it. */ 17363 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { 17364 if (p[i] == i) 17365 continue; 17366 17367 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17368 DMA_RWCTRL_WRITE_BNDRY_16) { 17369 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17370 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17371 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17372 break; 17373 } else { 17374 dev_err(&tp->pdev->dev, 17375 "%s: Buffer corrupted on read back! " 17376 "(%d != %d)\n", __func__, p[i], i); 17377 ret = -ENODEV; 17378 goto out; 17379 } 17380 } 17381 17382 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { 17383 /* Success. */ 17384 ret = 0; 17385 break; 17386 } 17387 } 17388 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17389 DMA_RWCTRL_WRITE_BNDRY_16) { 17390 /* DMA test passed without adjusting DMA boundary, 17391 * now look for chipsets that are known to expose the 17392 * DMA bug without failing the test. 17393 */ 17394 if (pci_dev_present(tg3_dma_wait_state_chipsets)) { 17395 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17396 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17397 } else { 17398 /* Safe to use the calculated DMA boundary. */ 17399 tp->dma_rwctrl = saved_dma_rwctrl; 17400 } 17401 17402 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17403 } 17404 17405 out: 17406 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); 17407 out_nofree: 17408 return ret; 17409 } 17410 17411 static void tg3_init_bufmgr_config(struct tg3 *tp) 17412 { 17413 if (tg3_flag(tp, 57765_PLUS)) { 17414 tp->bufmgr_config.mbuf_read_dma_low_water = 17415 DEFAULT_MB_RDMA_LOW_WATER_5705; 17416 tp->bufmgr_config.mbuf_mac_rx_low_water = 17417 DEFAULT_MB_MACRX_LOW_WATER_57765; 17418 tp->bufmgr_config.mbuf_high_water = 17419 DEFAULT_MB_HIGH_WATER_57765; 17420 17421 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17422 DEFAULT_MB_RDMA_LOW_WATER_5705; 17423 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17424 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; 17425 tp->bufmgr_config.mbuf_high_water_jumbo = 17426 DEFAULT_MB_HIGH_WATER_JUMBO_57765; 17427 } else if (tg3_flag(tp, 5705_PLUS)) { 17428 tp->bufmgr_config.mbuf_read_dma_low_water = 17429 DEFAULT_MB_RDMA_LOW_WATER_5705; 17430 tp->bufmgr_config.mbuf_mac_rx_low_water = 17431 DEFAULT_MB_MACRX_LOW_WATER_5705; 17432 tp->bufmgr_config.mbuf_high_water = 17433 DEFAULT_MB_HIGH_WATER_5705; 17434 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 17435 tp->bufmgr_config.mbuf_mac_rx_low_water = 17436 DEFAULT_MB_MACRX_LOW_WATER_5906; 17437 tp->bufmgr_config.mbuf_high_water = 17438 DEFAULT_MB_HIGH_WATER_5906; 17439 } 17440 17441 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17442 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; 17443 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17444 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; 17445 tp->bufmgr_config.mbuf_high_water_jumbo = 17446 DEFAULT_MB_HIGH_WATER_JUMBO_5780; 17447 } else { 17448 tp->bufmgr_config.mbuf_read_dma_low_water = 17449 DEFAULT_MB_RDMA_LOW_WATER; 17450 tp->bufmgr_config.mbuf_mac_rx_low_water = 17451 DEFAULT_MB_MACRX_LOW_WATER; 17452 tp->bufmgr_config.mbuf_high_water = 17453 DEFAULT_MB_HIGH_WATER; 17454 17455 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17456 DEFAULT_MB_RDMA_LOW_WATER_JUMBO; 17457 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17458 DEFAULT_MB_MACRX_LOW_WATER_JUMBO; 17459 tp->bufmgr_config.mbuf_high_water_jumbo = 17460 DEFAULT_MB_HIGH_WATER_JUMBO; 17461 } 17462 17463 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; 17464 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; 17465 } 17466 17467 static char *tg3_phy_string(struct tg3 *tp) 17468 { 17469 switch (tp->phy_id & TG3_PHY_ID_MASK) { 17470 case TG3_PHY_ID_BCM5400: return "5400"; 17471 case TG3_PHY_ID_BCM5401: return "5401"; 17472 case TG3_PHY_ID_BCM5411: return "5411"; 17473 case TG3_PHY_ID_BCM5701: return "5701"; 17474 case TG3_PHY_ID_BCM5703: return "5703"; 17475 case TG3_PHY_ID_BCM5704: return "5704"; 17476 case TG3_PHY_ID_BCM5705: return "5705"; 17477 case TG3_PHY_ID_BCM5750: return "5750"; 17478 case TG3_PHY_ID_BCM5752: return "5752"; 17479 case TG3_PHY_ID_BCM5714: return "5714"; 17480 case TG3_PHY_ID_BCM5780: return "5780"; 17481 case TG3_PHY_ID_BCM5755: return "5755"; 17482 case TG3_PHY_ID_BCM5787: return "5787"; 17483 case TG3_PHY_ID_BCM5784: return "5784"; 17484 case TG3_PHY_ID_BCM5756: return "5722/5756"; 17485 case TG3_PHY_ID_BCM5906: return "5906"; 17486 case TG3_PHY_ID_BCM5761: return "5761"; 17487 case TG3_PHY_ID_BCM5718C: return "5718C"; 17488 case TG3_PHY_ID_BCM5718S: return "5718S"; 17489 case TG3_PHY_ID_BCM57765: return "57765"; 17490 case TG3_PHY_ID_BCM5719C: return "5719C"; 17491 case TG3_PHY_ID_BCM5720C: return "5720C"; 17492 case TG3_PHY_ID_BCM5762: return "5762C"; 17493 case TG3_PHY_ID_BCM8002: return "8002/serdes"; 17494 case 0: return "serdes"; 17495 default: return "unknown"; 17496 } 17497 } 17498 17499 static char *tg3_bus_string(struct tg3 *tp, char *str) 17500 { 17501 if (tg3_flag(tp, PCI_EXPRESS)) { 17502 strcpy(str, "PCI Express"); 17503 return str; 17504 } else if (tg3_flag(tp, PCIX_MODE)) { 17505 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; 17506 17507 strcpy(str, "PCIX:"); 17508 17509 if ((clock_ctrl == 7) || 17510 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == 17511 GRC_MISC_CFG_BOARD_ID_5704CIOBE)) 17512 strcat(str, "133MHz"); 17513 else if (clock_ctrl == 0) 17514 strcat(str, "33MHz"); 17515 else if (clock_ctrl == 2) 17516 strcat(str, "50MHz"); 17517 else if (clock_ctrl == 4) 17518 strcat(str, "66MHz"); 17519 else if (clock_ctrl == 6) 17520 strcat(str, "100MHz"); 17521 } else { 17522 strcpy(str, "PCI:"); 17523 if (tg3_flag(tp, PCI_HIGH_SPEED)) 17524 strcat(str, "66MHz"); 17525 else 17526 strcat(str, "33MHz"); 17527 } 17528 if (tg3_flag(tp, PCI_32BIT)) 17529 strcat(str, ":32-bit"); 17530 else 17531 strcat(str, ":64-bit"); 17532 return str; 17533 } 17534 17535 static void tg3_init_coal(struct tg3 *tp) 17536 { 17537 struct ethtool_coalesce *ec = &tp->coal; 17538 17539 memset(ec, 0, sizeof(*ec)); 17540 ec->cmd = ETHTOOL_GCOALESCE; 17541 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; 17542 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; 17543 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; 17544 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; 17545 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; 17546 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; 17547 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; 17548 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; 17549 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; 17550 17551 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | 17552 HOSTCC_MODE_CLRTICK_TXBD)) { 17553 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; 17554 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; 17555 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; 17556 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; 17557 } 17558 17559 if (tg3_flag(tp, 5705_PLUS)) { 17560 ec->rx_coalesce_usecs_irq = 0; 17561 ec->tx_coalesce_usecs_irq = 0; 17562 ec->stats_block_coalesce_usecs = 0; 17563 } 17564 } 17565 17566 static int tg3_init_one(struct pci_dev *pdev, 17567 const struct pci_device_id *ent) 17568 { 17569 struct net_device *dev; 17570 struct tg3 *tp; 17571 int i, err; 17572 u32 sndmbx, rcvmbx, intmbx; 17573 char str[40]; 17574 u64 dma_mask, persist_dma_mask; 17575 netdev_features_t features = 0; 17576 17577 printk_once(KERN_INFO "%s\n", version); 17578 17579 err = pci_enable_device(pdev); 17580 if (err) { 17581 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 17582 return err; 17583 } 17584 17585 err = pci_request_regions(pdev, DRV_MODULE_NAME); 17586 if (err) { 17587 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 17588 goto err_out_disable_pdev; 17589 } 17590 17591 pci_set_master(pdev); 17592 17593 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); 17594 if (!dev) { 17595 err = -ENOMEM; 17596 goto err_out_free_res; 17597 } 17598 17599 SET_NETDEV_DEV(dev, &pdev->dev); 17600 17601 tp = netdev_priv(dev); 17602 tp->pdev = pdev; 17603 tp->dev = dev; 17604 tp->rx_mode = TG3_DEF_RX_MODE; 17605 tp->tx_mode = TG3_DEF_TX_MODE; 17606 tp->irq_sync = 1; 17607 tp->pcierr_recovery = false; 17608 17609 if (tg3_debug > 0) 17610 tp->msg_enable = tg3_debug; 17611 else 17612 tp->msg_enable = TG3_DEF_MSG_ENABLE; 17613 17614 if (pdev_is_ssb_gige_core(pdev)) { 17615 tg3_flag_set(tp, IS_SSB_CORE); 17616 if (ssb_gige_must_flush_posted_writes(pdev)) 17617 tg3_flag_set(tp, FLUSH_POSTED_WRITES); 17618 if (ssb_gige_one_dma_at_once(pdev)) 17619 tg3_flag_set(tp, ONE_DMA_AT_ONCE); 17620 if (ssb_gige_have_roboswitch(pdev)) { 17621 tg3_flag_set(tp, USE_PHYLIB); 17622 tg3_flag_set(tp, ROBOSWITCH); 17623 } 17624 if (ssb_gige_is_rgmii(pdev)) 17625 tg3_flag_set(tp, RGMII_MODE); 17626 } 17627 17628 /* The word/byte swap controls here control register access byte 17629 * swapping. DMA data byte swapping is controlled in the GRC_MODE 17630 * setting below. 17631 */ 17632 tp->misc_host_ctrl = 17633 MISC_HOST_CTRL_MASK_PCI_INT | 17634 MISC_HOST_CTRL_WORD_SWAP | 17635 MISC_HOST_CTRL_INDIR_ACCESS | 17636 MISC_HOST_CTRL_PCISTATE_RW; 17637 17638 /* The NONFRM (non-frame) byte/word swap controls take effect 17639 * on descriptor entries, anything which isn't packet data. 17640 * 17641 * The StrongARM chips on the board (one for tx, one for rx) 17642 * are running in big-endian mode. 17643 */ 17644 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | 17645 GRC_MODE_WSWAP_NONFRM_DATA); 17646 #ifdef __BIG_ENDIAN 17647 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; 17648 #endif 17649 spin_lock_init(&tp->lock); 17650 spin_lock_init(&tp->indirect_lock); 17651 INIT_WORK(&tp->reset_task, tg3_reset_task); 17652 17653 tp->regs = pci_ioremap_bar(pdev, BAR_0); 17654 if (!tp->regs) { 17655 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 17656 err = -ENOMEM; 17657 goto err_out_free_dev; 17658 } 17659 17660 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 17661 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || 17662 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || 17663 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || 17664 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 17665 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 17666 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 17667 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 17668 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 17669 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 17670 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 17671 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 17672 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 17673 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 17674 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) { 17675 tg3_flag_set(tp, ENABLE_APE); 17676 tp->aperegs = pci_ioremap_bar(pdev, BAR_2); 17677 if (!tp->aperegs) { 17678 dev_err(&pdev->dev, 17679 "Cannot map APE registers, aborting\n"); 17680 err = -ENOMEM; 17681 goto err_out_iounmap; 17682 } 17683 } 17684 17685 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 17686 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 17687 17688 dev->ethtool_ops = &tg3_ethtool_ops; 17689 dev->watchdog_timeo = TG3_TX_TIMEOUT; 17690 dev->netdev_ops = &tg3_netdev_ops; 17691 dev->irq = pdev->irq; 17692 17693 err = tg3_get_invariants(tp, ent); 17694 if (err) { 17695 dev_err(&pdev->dev, 17696 "Problem fetching invariants of chip, aborting\n"); 17697 goto err_out_apeunmap; 17698 } 17699 17700 /* The EPB bridge inside 5714, 5715, and 5780 and any 17701 * device behind the EPB cannot support DMA addresses > 40-bit. 17702 * On 64-bit systems with IOMMU, use 40-bit dma_mask. 17703 * On 64-bit systems without IOMMU, use 64-bit dma_mask and 17704 * do DMA address check in tg3_start_xmit(). 17705 */ 17706 if (tg3_flag(tp, IS_5788)) 17707 persist_dma_mask = dma_mask = DMA_BIT_MASK(32); 17708 else if (tg3_flag(tp, 40BIT_DMA_BUG)) { 17709 persist_dma_mask = dma_mask = DMA_BIT_MASK(40); 17710 #ifdef CONFIG_HIGHMEM 17711 dma_mask = DMA_BIT_MASK(64); 17712 #endif 17713 } else 17714 persist_dma_mask = dma_mask = DMA_BIT_MASK(64); 17715 17716 /* Configure DMA attributes. */ 17717 if (dma_mask > DMA_BIT_MASK(32)) { 17718 err = pci_set_dma_mask(pdev, dma_mask); 17719 if (!err) { 17720 features |= NETIF_F_HIGHDMA; 17721 err = pci_set_consistent_dma_mask(pdev, 17722 persist_dma_mask); 17723 if (err < 0) { 17724 dev_err(&pdev->dev, "Unable to obtain 64 bit " 17725 "DMA for consistent allocations\n"); 17726 goto err_out_apeunmap; 17727 } 17728 } 17729 } 17730 if (err || dma_mask == DMA_BIT_MASK(32)) { 17731 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 17732 if (err) { 17733 dev_err(&pdev->dev, 17734 "No usable DMA configuration, aborting\n"); 17735 goto err_out_apeunmap; 17736 } 17737 } 17738 17739 tg3_init_bufmgr_config(tp); 17740 17741 /* 5700 B0 chips do not support checksumming correctly due 17742 * to hardware bugs. 17743 */ 17744 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) { 17745 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 17746 17747 if (tg3_flag(tp, 5755_PLUS)) 17748 features |= NETIF_F_IPV6_CSUM; 17749 } 17750 17751 /* TSO is on by default on chips that support hardware TSO. 17752 * Firmware TSO on older chips gives lower performance, so it 17753 * is off by default, but can be enabled using ethtool. 17754 */ 17755 if ((tg3_flag(tp, HW_TSO_1) || 17756 tg3_flag(tp, HW_TSO_2) || 17757 tg3_flag(tp, HW_TSO_3)) && 17758 (features & NETIF_F_IP_CSUM)) 17759 features |= NETIF_F_TSO; 17760 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { 17761 if (features & NETIF_F_IPV6_CSUM) 17762 features |= NETIF_F_TSO6; 17763 if (tg3_flag(tp, HW_TSO_3) || 17764 tg3_asic_rev(tp) == ASIC_REV_5761 || 17765 (tg3_asic_rev(tp) == ASIC_REV_5784 && 17766 tg3_chip_rev(tp) != CHIPREV_5784_AX) || 17767 tg3_asic_rev(tp) == ASIC_REV_5785 || 17768 tg3_asic_rev(tp) == ASIC_REV_57780) 17769 features |= NETIF_F_TSO_ECN; 17770 } 17771 17772 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | 17773 NETIF_F_HW_VLAN_CTAG_RX; 17774 dev->vlan_features |= features; 17775 17776 /* 17777 * Add loopback capability only for a subset of devices that support 17778 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY 17779 * loopback for the remaining devices. 17780 */ 17781 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 17782 !tg3_flag(tp, CPMU_PRESENT)) 17783 /* Add the loopback capability */ 17784 features |= NETIF_F_LOOPBACK; 17785 17786 dev->hw_features |= features; 17787 dev->priv_flags |= IFF_UNICAST_FLT; 17788 17789 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 && 17790 !tg3_flag(tp, TSO_CAPABLE) && 17791 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 17792 tg3_flag_set(tp, MAX_RXPEND_64); 17793 tp->rx_pending = 63; 17794 } 17795 17796 err = tg3_get_device_address(tp); 17797 if (err) { 17798 dev_err(&pdev->dev, 17799 "Could not obtain valid ethernet address, aborting\n"); 17800 goto err_out_apeunmap; 17801 } 17802 17803 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; 17804 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; 17805 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; 17806 for (i = 0; i < tp->irq_max; i++) { 17807 struct tg3_napi *tnapi = &tp->napi[i]; 17808 17809 tnapi->tp = tp; 17810 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; 17811 17812 tnapi->int_mbox = intmbx; 17813 if (i <= 4) 17814 intmbx += 0x8; 17815 else 17816 intmbx += 0x4; 17817 17818 tnapi->consmbox = rcvmbx; 17819 tnapi->prodmbox = sndmbx; 17820 17821 if (i) 17822 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); 17823 else 17824 tnapi->coal_now = HOSTCC_MODE_NOW; 17825 17826 if (!tg3_flag(tp, SUPPORT_MSIX)) 17827 break; 17828 17829 /* 17830 * If we support MSIX, we'll be using RSS. If we're using 17831 * RSS, the first vector only handles link interrupts and the 17832 * remaining vectors handle rx and tx interrupts. Reuse the 17833 * mailbox values for the next iteration. The values we setup 17834 * above are still useful for the single vectored mode. 17835 */ 17836 if (!i) 17837 continue; 17838 17839 rcvmbx += 0x8; 17840 17841 if (sndmbx & 0x4) 17842 sndmbx -= 0x4; 17843 else 17844 sndmbx += 0xc; 17845 } 17846 17847 /* 17848 * Reset chip in case UNDI or EFI driver did not shutdown 17849 * DMA self test will enable WDMAC and we'll see (spurious) 17850 * pending DMA on the PCI bus at that point. 17851 */ 17852 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || 17853 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 17854 tg3_full_lock(tp, 0); 17855 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 17856 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 17857 tg3_full_unlock(tp); 17858 } 17859 17860 err = tg3_test_dma(tp); 17861 if (err) { 17862 dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); 17863 goto err_out_apeunmap; 17864 } 17865 17866 tg3_init_coal(tp); 17867 17868 pci_set_drvdata(pdev, dev); 17869 17870 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 17871 tg3_asic_rev(tp) == ASIC_REV_5720 || 17872 tg3_asic_rev(tp) == ASIC_REV_5762) 17873 tg3_flag_set(tp, PTP_CAPABLE); 17874 17875 tg3_timer_init(tp); 17876 17877 tg3_carrier_off(tp); 17878 17879 err = register_netdev(dev); 17880 if (err) { 17881 dev_err(&pdev->dev, "Cannot register net device, aborting\n"); 17882 goto err_out_apeunmap; 17883 } 17884 17885 if (tg3_flag(tp, PTP_CAPABLE)) { 17886 tg3_ptp_init(tp); 17887 tp->ptp_clock = ptp_clock_register(&tp->ptp_info, 17888 &tp->pdev->dev); 17889 if (IS_ERR(tp->ptp_clock)) 17890 tp->ptp_clock = NULL; 17891 } 17892 17893 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", 17894 tp->board_part_number, 17895 tg3_chip_rev_id(tp), 17896 tg3_bus_string(tp, str), 17897 dev->dev_addr); 17898 17899 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) { 17900 char *ethtype; 17901 17902 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 17903 ethtype = "10/100Base-TX"; 17904 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 17905 ethtype = "1000Base-SX"; 17906 else 17907 ethtype = "10/100/1000Base-T"; 17908 17909 netdev_info(dev, "attached PHY is %s (%s Ethernet) " 17910 "(WireSpeed[%d], EEE[%d])\n", 17911 tg3_phy_string(tp), ethtype, 17912 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, 17913 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); 17914 } 17915 17916 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", 17917 (dev->features & NETIF_F_RXCSUM) != 0, 17918 tg3_flag(tp, USE_LINKCHG_REG) != 0, 17919 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, 17920 tg3_flag(tp, ENABLE_ASF) != 0, 17921 tg3_flag(tp, TSO_CAPABLE) != 0); 17922 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", 17923 tp->dma_rwctrl, 17924 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : 17925 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); 17926 17927 pci_save_state(pdev); 17928 17929 return 0; 17930 17931 err_out_apeunmap: 17932 if (tp->aperegs) { 17933 iounmap(tp->aperegs); 17934 tp->aperegs = NULL; 17935 } 17936 17937 err_out_iounmap: 17938 if (tp->regs) { 17939 iounmap(tp->regs); 17940 tp->regs = NULL; 17941 } 17942 17943 err_out_free_dev: 17944 free_netdev(dev); 17945 17946 err_out_free_res: 17947 pci_release_regions(pdev); 17948 17949 err_out_disable_pdev: 17950 if (pci_is_enabled(pdev)) 17951 pci_disable_device(pdev); 17952 return err; 17953 } 17954 17955 static void tg3_remove_one(struct pci_dev *pdev) 17956 { 17957 struct net_device *dev = pci_get_drvdata(pdev); 17958 17959 if (dev) { 17960 struct tg3 *tp = netdev_priv(dev); 17961 17962 tg3_ptp_fini(tp); 17963 17964 release_firmware(tp->fw); 17965 17966 tg3_reset_task_cancel(tp); 17967 17968 if (tg3_flag(tp, USE_PHYLIB)) { 17969 tg3_phy_fini(tp); 17970 tg3_mdio_fini(tp); 17971 } 17972 17973 unregister_netdev(dev); 17974 if (tp->aperegs) { 17975 iounmap(tp->aperegs); 17976 tp->aperegs = NULL; 17977 } 17978 if (tp->regs) { 17979 iounmap(tp->regs); 17980 tp->regs = NULL; 17981 } 17982 free_netdev(dev); 17983 pci_release_regions(pdev); 17984 pci_disable_device(pdev); 17985 } 17986 } 17987 17988 #ifdef CONFIG_PM_SLEEP 17989 static int tg3_suspend(struct device *device) 17990 { 17991 struct pci_dev *pdev = to_pci_dev(device); 17992 struct net_device *dev = pci_get_drvdata(pdev); 17993 struct tg3 *tp = netdev_priv(dev); 17994 int err = 0; 17995 17996 rtnl_lock(); 17997 17998 if (!netif_running(dev)) 17999 goto unlock; 18000 18001 tg3_reset_task_cancel(tp); 18002 tg3_phy_stop(tp); 18003 tg3_netif_stop(tp); 18004 18005 tg3_timer_stop(tp); 18006 18007 tg3_full_lock(tp, 1); 18008 tg3_disable_ints(tp); 18009 tg3_full_unlock(tp); 18010 18011 netif_device_detach(dev); 18012 18013 tg3_full_lock(tp, 0); 18014 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 18015 tg3_flag_clear(tp, INIT_COMPLETE); 18016 tg3_full_unlock(tp); 18017 18018 err = tg3_power_down_prepare(tp); 18019 if (err) { 18020 int err2; 18021 18022 tg3_full_lock(tp, 0); 18023 18024 tg3_flag_set(tp, INIT_COMPLETE); 18025 err2 = tg3_restart_hw(tp, true); 18026 if (err2) 18027 goto out; 18028 18029 tg3_timer_start(tp); 18030 18031 netif_device_attach(dev); 18032 tg3_netif_start(tp); 18033 18034 out: 18035 tg3_full_unlock(tp); 18036 18037 if (!err2) 18038 tg3_phy_start(tp); 18039 } 18040 18041 unlock: 18042 rtnl_unlock(); 18043 return err; 18044 } 18045 18046 static int tg3_resume(struct device *device) 18047 { 18048 struct pci_dev *pdev = to_pci_dev(device); 18049 struct net_device *dev = pci_get_drvdata(pdev); 18050 struct tg3 *tp = netdev_priv(dev); 18051 int err = 0; 18052 18053 rtnl_lock(); 18054 18055 if (!netif_running(dev)) 18056 goto unlock; 18057 18058 netif_device_attach(dev); 18059 18060 tg3_full_lock(tp, 0); 18061 18062 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18063 18064 tg3_flag_set(tp, INIT_COMPLETE); 18065 err = tg3_restart_hw(tp, 18066 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); 18067 if (err) 18068 goto out; 18069 18070 tg3_timer_start(tp); 18071 18072 tg3_netif_start(tp); 18073 18074 out: 18075 tg3_full_unlock(tp); 18076 18077 if (!err) 18078 tg3_phy_start(tp); 18079 18080 unlock: 18081 rtnl_unlock(); 18082 return err; 18083 } 18084 #endif /* CONFIG_PM_SLEEP */ 18085 18086 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); 18087 18088 static void tg3_shutdown(struct pci_dev *pdev) 18089 { 18090 struct net_device *dev = pci_get_drvdata(pdev); 18091 struct tg3 *tp = netdev_priv(dev); 18092 18093 rtnl_lock(); 18094 netif_device_detach(dev); 18095 18096 if (netif_running(dev)) 18097 dev_close(dev); 18098 18099 if (system_state == SYSTEM_POWER_OFF) 18100 tg3_power_down(tp); 18101 18102 rtnl_unlock(); 18103 } 18104 18105 /** 18106 * tg3_io_error_detected - called when PCI error is detected 18107 * @pdev: Pointer to PCI device 18108 * @state: The current pci connection state 18109 * 18110 * This function is called after a PCI bus error affecting 18111 * this device has been detected. 18112 */ 18113 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, 18114 pci_channel_state_t state) 18115 { 18116 struct net_device *netdev = pci_get_drvdata(pdev); 18117 struct tg3 *tp = netdev_priv(netdev); 18118 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; 18119 18120 netdev_info(netdev, "PCI I/O error detected\n"); 18121 18122 rtnl_lock(); 18123 18124 /* We needn't recover from permanent error */ 18125 if (state == pci_channel_io_frozen) 18126 tp->pcierr_recovery = true; 18127 18128 /* We probably don't have netdev yet */ 18129 if (!netdev || !netif_running(netdev)) 18130 goto done; 18131 18132 tg3_phy_stop(tp); 18133 18134 tg3_netif_stop(tp); 18135 18136 tg3_timer_stop(tp); 18137 18138 /* Want to make sure that the reset task doesn't run */ 18139 tg3_reset_task_cancel(tp); 18140 18141 netif_device_detach(netdev); 18142 18143 /* Clean up software state, even if MMIO is blocked */ 18144 tg3_full_lock(tp, 0); 18145 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 18146 tg3_full_unlock(tp); 18147 18148 done: 18149 if (state == pci_channel_io_perm_failure) { 18150 if (netdev) { 18151 tg3_napi_enable(tp); 18152 dev_close(netdev); 18153 } 18154 err = PCI_ERS_RESULT_DISCONNECT; 18155 } else { 18156 pci_disable_device(pdev); 18157 } 18158 18159 rtnl_unlock(); 18160 18161 return err; 18162 } 18163 18164 /** 18165 * tg3_io_slot_reset - called after the pci bus has been reset. 18166 * @pdev: Pointer to PCI device 18167 * 18168 * Restart the card from scratch, as if from a cold-boot. 18169 * At this point, the card has exprienced a hard reset, 18170 * followed by fixups by BIOS, and has its config space 18171 * set up identically to what it was at cold boot. 18172 */ 18173 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) 18174 { 18175 struct net_device *netdev = pci_get_drvdata(pdev); 18176 struct tg3 *tp = netdev_priv(netdev); 18177 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 18178 int err; 18179 18180 rtnl_lock(); 18181 18182 if (pci_enable_device(pdev)) { 18183 dev_err(&pdev->dev, 18184 "Cannot re-enable PCI device after reset.\n"); 18185 goto done; 18186 } 18187 18188 pci_set_master(pdev); 18189 pci_restore_state(pdev); 18190 pci_save_state(pdev); 18191 18192 if (!netdev || !netif_running(netdev)) { 18193 rc = PCI_ERS_RESULT_RECOVERED; 18194 goto done; 18195 } 18196 18197 err = tg3_power_up(tp); 18198 if (err) 18199 goto done; 18200 18201 rc = PCI_ERS_RESULT_RECOVERED; 18202 18203 done: 18204 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { 18205 tg3_napi_enable(tp); 18206 dev_close(netdev); 18207 } 18208 rtnl_unlock(); 18209 18210 return rc; 18211 } 18212 18213 /** 18214 * tg3_io_resume - called when traffic can start flowing again. 18215 * @pdev: Pointer to PCI device 18216 * 18217 * This callback is called when the error recovery driver tells 18218 * us that its OK to resume normal operation. 18219 */ 18220 static void tg3_io_resume(struct pci_dev *pdev) 18221 { 18222 struct net_device *netdev = pci_get_drvdata(pdev); 18223 struct tg3 *tp = netdev_priv(netdev); 18224 int err; 18225 18226 rtnl_lock(); 18227 18228 if (!netif_running(netdev)) 18229 goto done; 18230 18231 tg3_full_lock(tp, 0); 18232 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18233 tg3_flag_set(tp, INIT_COMPLETE); 18234 err = tg3_restart_hw(tp, true); 18235 if (err) { 18236 tg3_full_unlock(tp); 18237 netdev_err(netdev, "Cannot restart hardware after reset.\n"); 18238 goto done; 18239 } 18240 18241 netif_device_attach(netdev); 18242 18243 tg3_timer_start(tp); 18244 18245 tg3_netif_start(tp); 18246 18247 tg3_full_unlock(tp); 18248 18249 tg3_phy_start(tp); 18250 18251 done: 18252 tp->pcierr_recovery = false; 18253 rtnl_unlock(); 18254 } 18255 18256 static const struct pci_error_handlers tg3_err_handler = { 18257 .error_detected = tg3_io_error_detected, 18258 .slot_reset = tg3_io_slot_reset, 18259 .resume = tg3_io_resume 18260 }; 18261 18262 static struct pci_driver tg3_driver = { 18263 .name = DRV_MODULE_NAME, 18264 .id_table = tg3_pci_tbl, 18265 .probe = tg3_init_one, 18266 .remove = tg3_remove_one, 18267 .err_handler = &tg3_err_handler, 18268 .driver.pm = &tg3_pm_ops, 18269 .shutdown = tg3_shutdown, 18270 }; 18271 18272 module_pci_driver(tg3_driver); 18273