1 /* 2 * tg3.c: Broadcom Tigon3 ethernet driver. 3 * 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 6 * Copyright (C) 2004 Sun Microsystems Inc. 7 * Copyright (C) 2005-2016 Broadcom Corporation. 8 * Copyright (C) 2016-2017 Broadcom Limited. 9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" 10 * refers to Broadcom Inc. and/or its subsidiaries. 11 * 12 * Firmware is: 13 * Derived from proprietary unpublished source code, 14 * Copyright (C) 2000-2016 Broadcom Corporation. 15 * Copyright (C) 2016-2017 Broadcom Ltd. 16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" 17 * refers to Broadcom Inc. and/or its subsidiaries. 18 * 19 * Permission is hereby granted for the distribution of this firmware 20 * data in hexadecimal or equivalent format, provided this copyright 21 * notice is accompanying it. 22 */ 23 24 25 #include <linux/module.h> 26 #include <linux/moduleparam.h> 27 #include <linux/stringify.h> 28 #include <linux/kernel.h> 29 #include <linux/sched/signal.h> 30 #include <linux/types.h> 31 #include <linux/compiler.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/in.h> 35 #include <linux/interrupt.h> 36 #include <linux/ioport.h> 37 #include <linux/pci.h> 38 #include <linux/netdevice.h> 39 #include <linux/etherdevice.h> 40 #include <linux/skbuff.h> 41 #include <linux/ethtool.h> 42 #include <linux/mdio.h> 43 #include <linux/mii.h> 44 #include <linux/phy.h> 45 #include <linux/brcmphy.h> 46 #include <linux/if.h> 47 #include <linux/if_vlan.h> 48 #include <linux/ip.h> 49 #include <linux/tcp.h> 50 #include <linux/workqueue.h> 51 #include <linux/prefetch.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/firmware.h> 54 #include <linux/ssb/ssb_driver_gige.h> 55 #include <linux/hwmon.h> 56 #include <linux/hwmon-sysfs.h> 57 #include <linux/crc32poly.h> 58 59 #include <net/checksum.h> 60 #include <net/ip.h> 61 62 #include <linux/io.h> 63 #include <asm/byteorder.h> 64 #include <linux/uaccess.h> 65 66 #include <uapi/linux/net_tstamp.h> 67 #include <linux/ptp_clock_kernel.h> 68 69 #define BAR_0 0 70 #define BAR_2 2 71 72 #include "tg3.h" 73 74 /* Functions & macros to verify TG3_FLAGS types */ 75 76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) 77 { 78 return test_bit(flag, bits); 79 } 80 81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) 82 { 83 set_bit(flag, bits); 84 } 85 86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) 87 { 88 clear_bit(flag, bits); 89 } 90 91 #define tg3_flag(tp, flag) \ 92 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) 93 #define tg3_flag_set(tp, flag) \ 94 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) 95 #define tg3_flag_clear(tp, flag) \ 96 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) 97 98 #define DRV_MODULE_NAME "tg3" 99 /* DO NOT UPDATE TG3_*_NUM defines */ 100 #define TG3_MAJ_NUM 3 101 #define TG3_MIN_NUM 137 102 103 #define RESET_KIND_SHUTDOWN 0 104 #define RESET_KIND_INIT 1 105 #define RESET_KIND_SUSPEND 2 106 107 #define TG3_DEF_RX_MODE 0 108 #define TG3_DEF_TX_MODE 0 109 #define TG3_DEF_MSG_ENABLE \ 110 (NETIF_MSG_DRV | \ 111 NETIF_MSG_PROBE | \ 112 NETIF_MSG_LINK | \ 113 NETIF_MSG_TIMER | \ 114 NETIF_MSG_IFDOWN | \ 115 NETIF_MSG_IFUP | \ 116 NETIF_MSG_RX_ERR | \ 117 NETIF_MSG_TX_ERR) 118 119 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100 120 121 /* length of time before we decide the hardware is borked, 122 * and dev->tx_timeout() should be called to fix the problem 123 */ 124 125 #define TG3_TX_TIMEOUT (5 * HZ) 126 127 /* hardware minimum and maximum for a single frame's data payload */ 128 #define TG3_MIN_MTU ETH_ZLEN 129 #define TG3_MAX_MTU(tp) \ 130 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) 131 132 /* These numbers seem to be hard coded in the NIC firmware somehow. 133 * You can't change the ring sizes, but you can change where you place 134 * them in the NIC onboard memory. 135 */ 136 #define TG3_RX_STD_RING_SIZE(tp) \ 137 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 138 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) 139 #define TG3_DEF_RX_RING_PENDING 200 140 #define TG3_RX_JMB_RING_SIZE(tp) \ 141 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 142 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) 143 #define TG3_DEF_RX_JUMBO_RING_PENDING 100 144 145 /* Do not place this n-ring entries value into the tp struct itself, 146 * we really want to expose these constants to GCC so that modulo et 147 * al. operations are done with shifts and masks instead of with 148 * hw multiply/modulo instructions. Another solution would be to 149 * replace things like '% foo' with '& (foo - 1)'. 150 */ 151 152 #define TG3_TX_RING_SIZE 512 153 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) 154 155 #define TG3_RX_STD_RING_BYTES(tp) \ 156 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) 157 #define TG3_RX_JMB_RING_BYTES(tp) \ 158 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) 159 #define TG3_RX_RCB_RING_BYTES(tp) \ 160 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) 161 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 162 TG3_TX_RING_SIZE) 163 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 164 165 #define TG3_DMA_BYTE_ENAB 64 166 167 #define TG3_RX_STD_DMA_SZ 1536 168 #define TG3_RX_JMB_DMA_SZ 9046 169 170 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) 171 172 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) 173 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 174 175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \ 176 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) 177 178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ 179 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) 180 181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses 182 * that are at least dword aligned when used in PCIX mode. The driver 183 * works around this bug by double copying the packet. This workaround 184 * is built into the normal double copy length check for efficiency. 185 * 186 * However, the double copy is only necessary on those architectures 187 * where unaligned memory accesses are inefficient. For those architectures 188 * where unaligned memory accesses incur little penalty, we can reintegrate 189 * the 5701 in the normal rx path. Doing so saves a device structure 190 * dereference by hardcoding the double copy threshold in place. 191 */ 192 #define TG3_RX_COPY_THRESHOLD 256 193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 194 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD 195 #else 196 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) 197 #endif 198 199 #if (NET_IP_ALIGN != 0) 200 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset) 201 #else 202 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD) 203 #endif 204 205 /* minimum number of free TX descriptors required to wake up TX process */ 206 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 207 #define TG3_TX_BD_DMA_MAX_2K 2048 208 #define TG3_TX_BD_DMA_MAX_4K 4096 209 210 #define TG3_RAW_IP_ALIGN 2 211 212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3) 213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1) 214 215 #define TG3_FW_UPDATE_TIMEOUT_SEC 5 216 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2) 217 218 #define FIRMWARE_TG3 "tigon/tg3.bin" 219 #define FIRMWARE_TG357766 "tigon/tg357766.bin" 220 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" 221 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" 222 223 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); 224 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); 225 MODULE_LICENSE("GPL"); 226 MODULE_FIRMWARE(FIRMWARE_TG3); 227 MODULE_FIRMWARE(FIRMWARE_TG3TSO); 228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5); 229 230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ 231 module_param(tg3_debug, int, 0); 232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 233 234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001 235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002 236 237 static const struct pci_device_id tg3_pci_tbl[] = { 238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, 239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, 240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, 241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, 242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, 243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, 244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, 245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, 246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, 247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, 248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, 249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, 250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, 251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, 252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, 253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, 254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, 255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, 256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901), 257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 258 TG3_DRV_DATA_FLAG_5705_10_100}, 259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2), 260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 261 TG3_DRV_DATA_FLAG_5705_10_100}, 262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, 263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F), 264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 265 TG3_DRV_DATA_FLAG_5705_10_100}, 266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, 267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, 268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, 269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, 270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, 271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F), 272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, 274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, 275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, 276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, 277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F), 278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, 280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, 281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, 282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, 283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, 284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, 285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, 286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M, 287 PCI_VENDOR_ID_LENOVO, 288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M), 289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, 291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F), 292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, 294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, 295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, 296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, 297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, 298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, 299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, 300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, 301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, 302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, 303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, 304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, 305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, 306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, 308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, 309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, 310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, 311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A), 313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B), 316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, 318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, 319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790), 320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, 323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)}, 324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, 325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, 326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, 327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, 328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, 329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791), 330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795), 332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, 334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, 335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)}, 336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)}, 337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)}, 338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)}, 339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)}, 340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)}, 341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)}, 342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)}, 343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)}, 344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)}, 345 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 346 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 347 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 348 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, 349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, 350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, 351 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, 352 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ 353 {} 354 }; 355 356 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); 357 358 static const struct { 359 const char string[ETH_GSTRING_LEN]; 360 } ethtool_stats_keys[] = { 361 { "rx_octets" }, 362 { "rx_fragments" }, 363 { "rx_ucast_packets" }, 364 { "rx_mcast_packets" }, 365 { "rx_bcast_packets" }, 366 { "rx_fcs_errors" }, 367 { "rx_align_errors" }, 368 { "rx_xon_pause_rcvd" }, 369 { "rx_xoff_pause_rcvd" }, 370 { "rx_mac_ctrl_rcvd" }, 371 { "rx_xoff_entered" }, 372 { "rx_frame_too_long_errors" }, 373 { "rx_jabbers" }, 374 { "rx_undersize_packets" }, 375 { "rx_in_length_errors" }, 376 { "rx_out_length_errors" }, 377 { "rx_64_or_less_octet_packets" }, 378 { "rx_65_to_127_octet_packets" }, 379 { "rx_128_to_255_octet_packets" }, 380 { "rx_256_to_511_octet_packets" }, 381 { "rx_512_to_1023_octet_packets" }, 382 { "rx_1024_to_1522_octet_packets" }, 383 { "rx_1523_to_2047_octet_packets" }, 384 { "rx_2048_to_4095_octet_packets" }, 385 { "rx_4096_to_8191_octet_packets" }, 386 { "rx_8192_to_9022_octet_packets" }, 387 388 { "tx_octets" }, 389 { "tx_collisions" }, 390 391 { "tx_xon_sent" }, 392 { "tx_xoff_sent" }, 393 { "tx_flow_control" }, 394 { "tx_mac_errors" }, 395 { "tx_single_collisions" }, 396 { "tx_mult_collisions" }, 397 { "tx_deferred" }, 398 { "tx_excessive_collisions" }, 399 { "tx_late_collisions" }, 400 { "tx_collide_2times" }, 401 { "tx_collide_3times" }, 402 { "tx_collide_4times" }, 403 { "tx_collide_5times" }, 404 { "tx_collide_6times" }, 405 { "tx_collide_7times" }, 406 { "tx_collide_8times" }, 407 { "tx_collide_9times" }, 408 { "tx_collide_10times" }, 409 { "tx_collide_11times" }, 410 { "tx_collide_12times" }, 411 { "tx_collide_13times" }, 412 { "tx_collide_14times" }, 413 { "tx_collide_15times" }, 414 { "tx_ucast_packets" }, 415 { "tx_mcast_packets" }, 416 { "tx_bcast_packets" }, 417 { "tx_carrier_sense_errors" }, 418 { "tx_discards" }, 419 { "tx_errors" }, 420 421 { "dma_writeq_full" }, 422 { "dma_write_prioq_full" }, 423 { "rxbds_empty" }, 424 { "rx_discards" }, 425 { "rx_errors" }, 426 { "rx_threshold_hit" }, 427 428 { "dma_readq_full" }, 429 { "dma_read_prioq_full" }, 430 { "tx_comp_queue_full" }, 431 432 { "ring_set_send_prod_index" }, 433 { "ring_status_update" }, 434 { "nic_irqs" }, 435 { "nic_avoided_irqs" }, 436 { "nic_tx_threshold_hit" }, 437 438 { "mbuf_lwm_thresh_hit" }, 439 }; 440 441 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) 442 #define TG3_NVRAM_TEST 0 443 #define TG3_LINK_TEST 1 444 #define TG3_REGISTER_TEST 2 445 #define TG3_MEMORY_TEST 3 446 #define TG3_MAC_LOOPB_TEST 4 447 #define TG3_PHY_LOOPB_TEST 5 448 #define TG3_EXT_LOOPB_TEST 6 449 #define TG3_INTERRUPT_TEST 7 450 451 452 static const struct { 453 const char string[ETH_GSTRING_LEN]; 454 } ethtool_test_keys[] = { 455 [TG3_NVRAM_TEST] = { "nvram test (online) " }, 456 [TG3_LINK_TEST] = { "link test (online) " }, 457 [TG3_REGISTER_TEST] = { "register test (offline)" }, 458 [TG3_MEMORY_TEST] = { "memory test (offline)" }, 459 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" }, 460 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" }, 461 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" }, 462 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" }, 463 }; 464 465 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) 466 467 468 static void tg3_write32(struct tg3 *tp, u32 off, u32 val) 469 { 470 writel(val, tp->regs + off); 471 } 472 473 static u32 tg3_read32(struct tg3 *tp, u32 off) 474 { 475 return readl(tp->regs + off); 476 } 477 478 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) 479 { 480 writel(val, tp->aperegs + off); 481 } 482 483 static u32 tg3_ape_read32(struct tg3 *tp, u32 off) 484 { 485 return readl(tp->aperegs + off); 486 } 487 488 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 489 { 490 unsigned long flags; 491 492 spin_lock_irqsave(&tp->indirect_lock, flags); 493 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 494 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 495 spin_unlock_irqrestore(&tp->indirect_lock, flags); 496 } 497 498 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) 499 { 500 writel(val, tp->regs + off); 501 readl(tp->regs + off); 502 } 503 504 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) 505 { 506 unsigned long flags; 507 u32 val; 508 509 spin_lock_irqsave(&tp->indirect_lock, flags); 510 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 511 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 512 spin_unlock_irqrestore(&tp->indirect_lock, flags); 513 return val; 514 } 515 516 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) 517 { 518 unsigned long flags; 519 520 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { 521 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + 522 TG3_64BIT_REG_LOW, val); 523 return; 524 } 525 if (off == TG3_RX_STD_PROD_IDX_REG) { 526 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + 527 TG3_64BIT_REG_LOW, val); 528 return; 529 } 530 531 spin_lock_irqsave(&tp->indirect_lock, flags); 532 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 533 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 534 spin_unlock_irqrestore(&tp->indirect_lock, flags); 535 536 /* In indirect mode when disabling interrupts, we also need 537 * to clear the interrupt bit in the GRC local ctrl register. 538 */ 539 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && 540 (val == 0x1)) { 541 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, 542 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); 543 } 544 } 545 546 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) 547 { 548 unsigned long flags; 549 u32 val; 550 551 spin_lock_irqsave(&tp->indirect_lock, flags); 552 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 553 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 554 spin_unlock_irqrestore(&tp->indirect_lock, flags); 555 return val; 556 } 557 558 /* usec_wait specifies the wait time in usec when writing to certain registers 559 * where it is unsafe to read back the register without some delay. 560 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. 561 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. 562 */ 563 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) 564 { 565 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) 566 /* Non-posted methods */ 567 tp->write32(tp, off, val); 568 else { 569 /* Posted method */ 570 tg3_write32(tp, off, val); 571 if (usec_wait) 572 udelay(usec_wait); 573 tp->read32(tp, off); 574 } 575 /* Wait again after the read for the posted method to guarantee that 576 * the wait time is met. 577 */ 578 if (usec_wait) 579 udelay(usec_wait); 580 } 581 582 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) 583 { 584 tp->write32_mbox(tp, off, val); 585 if (tg3_flag(tp, FLUSH_POSTED_WRITES) || 586 (!tg3_flag(tp, MBOX_WRITE_REORDER) && 587 !tg3_flag(tp, ICH_WORKAROUND))) 588 tp->read32_mbox(tp, off); 589 } 590 591 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) 592 { 593 void __iomem *mbox = tp->regs + off; 594 writel(val, mbox); 595 if (tg3_flag(tp, TXD_MBOX_HWBUG)) 596 writel(val, mbox); 597 if (tg3_flag(tp, MBOX_WRITE_REORDER) || 598 tg3_flag(tp, FLUSH_POSTED_WRITES)) 599 readl(mbox); 600 } 601 602 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) 603 { 604 return readl(tp->regs + off + GRCMBOX_BASE); 605 } 606 607 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) 608 { 609 writel(val, tp->regs + off + GRCMBOX_BASE); 610 } 611 612 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) 613 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) 614 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) 615 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) 616 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg) 617 618 #define tw32(reg, val) tp->write32(tp, reg, val) 619 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) 620 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) 621 #define tr32(reg) tp->read32(tp, reg) 622 623 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 624 { 625 unsigned long flags; 626 627 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 628 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) 629 return; 630 631 spin_lock_irqsave(&tp->indirect_lock, flags); 632 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 633 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 634 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 635 636 /* Always leave this as zero. */ 637 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 638 } else { 639 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 640 tw32_f(TG3PCI_MEM_WIN_DATA, val); 641 642 /* Always leave this as zero. */ 643 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 644 } 645 spin_unlock_irqrestore(&tp->indirect_lock, flags); 646 } 647 648 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 649 { 650 unsigned long flags; 651 652 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 653 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { 654 *val = 0; 655 return; 656 } 657 658 spin_lock_irqsave(&tp->indirect_lock, flags); 659 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 660 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 661 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 662 663 /* Always leave this as zero. */ 664 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 665 } else { 666 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 667 *val = tr32(TG3PCI_MEM_WIN_DATA); 668 669 /* Always leave this as zero. */ 670 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 671 } 672 spin_unlock_irqrestore(&tp->indirect_lock, flags); 673 } 674 675 static void tg3_ape_lock_init(struct tg3 *tp) 676 { 677 int i; 678 u32 regbase, bit; 679 680 if (tg3_asic_rev(tp) == ASIC_REV_5761) 681 regbase = TG3_APE_LOCK_GRANT; 682 else 683 regbase = TG3_APE_PER_LOCK_GRANT; 684 685 /* Make sure the driver hasn't any stale locks. */ 686 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) { 687 switch (i) { 688 case TG3_APE_LOCK_PHY0: 689 case TG3_APE_LOCK_PHY1: 690 case TG3_APE_LOCK_PHY2: 691 case TG3_APE_LOCK_PHY3: 692 bit = APE_LOCK_GRANT_DRIVER; 693 break; 694 default: 695 if (!tp->pci_fn) 696 bit = APE_LOCK_GRANT_DRIVER; 697 else 698 bit = 1 << tp->pci_fn; 699 } 700 tg3_ape_write32(tp, regbase + 4 * i, bit); 701 } 702 703 } 704 705 static int tg3_ape_lock(struct tg3 *tp, int locknum) 706 { 707 int i, off; 708 int ret = 0; 709 u32 status, req, gnt, bit; 710 711 if (!tg3_flag(tp, ENABLE_APE)) 712 return 0; 713 714 switch (locknum) { 715 case TG3_APE_LOCK_GPIO: 716 if (tg3_asic_rev(tp) == ASIC_REV_5761) 717 return 0; 718 fallthrough; 719 case TG3_APE_LOCK_GRC: 720 case TG3_APE_LOCK_MEM: 721 if (!tp->pci_fn) 722 bit = APE_LOCK_REQ_DRIVER; 723 else 724 bit = 1 << tp->pci_fn; 725 break; 726 case TG3_APE_LOCK_PHY0: 727 case TG3_APE_LOCK_PHY1: 728 case TG3_APE_LOCK_PHY2: 729 case TG3_APE_LOCK_PHY3: 730 bit = APE_LOCK_REQ_DRIVER; 731 break; 732 default: 733 return -EINVAL; 734 } 735 736 if (tg3_asic_rev(tp) == ASIC_REV_5761) { 737 req = TG3_APE_LOCK_REQ; 738 gnt = TG3_APE_LOCK_GRANT; 739 } else { 740 req = TG3_APE_PER_LOCK_REQ; 741 gnt = TG3_APE_PER_LOCK_GRANT; 742 } 743 744 off = 4 * locknum; 745 746 tg3_ape_write32(tp, req + off, bit); 747 748 /* Wait for up to 1 millisecond to acquire lock. */ 749 for (i = 0; i < 100; i++) { 750 status = tg3_ape_read32(tp, gnt + off); 751 if (status == bit) 752 break; 753 if (pci_channel_offline(tp->pdev)) 754 break; 755 756 udelay(10); 757 } 758 759 if (status != bit) { 760 /* Revoke the lock request. */ 761 tg3_ape_write32(tp, gnt + off, bit); 762 ret = -EBUSY; 763 } 764 765 return ret; 766 } 767 768 static void tg3_ape_unlock(struct tg3 *tp, int locknum) 769 { 770 u32 gnt, bit; 771 772 if (!tg3_flag(tp, ENABLE_APE)) 773 return; 774 775 switch (locknum) { 776 case TG3_APE_LOCK_GPIO: 777 if (tg3_asic_rev(tp) == ASIC_REV_5761) 778 return; 779 fallthrough; 780 case TG3_APE_LOCK_GRC: 781 case TG3_APE_LOCK_MEM: 782 if (!tp->pci_fn) 783 bit = APE_LOCK_GRANT_DRIVER; 784 else 785 bit = 1 << tp->pci_fn; 786 break; 787 case TG3_APE_LOCK_PHY0: 788 case TG3_APE_LOCK_PHY1: 789 case TG3_APE_LOCK_PHY2: 790 case TG3_APE_LOCK_PHY3: 791 bit = APE_LOCK_GRANT_DRIVER; 792 break; 793 default: 794 return; 795 } 796 797 if (tg3_asic_rev(tp) == ASIC_REV_5761) 798 gnt = TG3_APE_LOCK_GRANT; 799 else 800 gnt = TG3_APE_PER_LOCK_GRANT; 801 802 tg3_ape_write32(tp, gnt + 4 * locknum, bit); 803 } 804 805 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us) 806 { 807 u32 apedata; 808 809 while (timeout_us) { 810 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) 811 return -EBUSY; 812 813 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 814 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 815 break; 816 817 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 818 819 udelay(10); 820 timeout_us -= (timeout_us > 10) ? 10 : timeout_us; 821 } 822 823 return timeout_us ? 0 : -EBUSY; 824 } 825 826 #ifdef CONFIG_TIGON3_HWMON 827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us) 828 { 829 u32 i, apedata; 830 831 for (i = 0; i < timeout_us / 10; i++) { 832 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 833 834 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 835 break; 836 837 udelay(10); 838 } 839 840 return i == timeout_us / 10; 841 } 842 843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, 844 u32 len) 845 { 846 int err; 847 u32 i, bufoff, msgoff, maxlen, apedata; 848 849 if (!tg3_flag(tp, APE_HAS_NCSI)) 850 return 0; 851 852 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 853 if (apedata != APE_SEG_SIG_MAGIC) 854 return -ENODEV; 855 856 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 857 if (!(apedata & APE_FW_STATUS_READY)) 858 return -EAGAIN; 859 860 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) + 861 TG3_APE_SHMEM_BASE; 862 msgoff = bufoff + 2 * sizeof(u32); 863 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN); 864 865 while (len) { 866 u32 length; 867 868 /* Cap xfer sizes to scratchpad limits. */ 869 length = (len > maxlen) ? maxlen : len; 870 len -= length; 871 872 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 873 if (!(apedata & APE_FW_STATUS_READY)) 874 return -EAGAIN; 875 876 /* Wait for up to 1 msec for APE to service previous event. */ 877 err = tg3_ape_event_lock(tp, 1000); 878 if (err) 879 return err; 880 881 apedata = APE_EVENT_STATUS_DRIVER_EVNT | 882 APE_EVENT_STATUS_SCRTCHPD_READ | 883 APE_EVENT_STATUS_EVENT_PENDING; 884 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata); 885 886 tg3_ape_write32(tp, bufoff, base_off); 887 tg3_ape_write32(tp, bufoff + sizeof(u32), length); 888 889 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 890 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 891 892 base_off += length; 893 894 if (tg3_ape_wait_for_event(tp, 30000)) 895 return -EAGAIN; 896 897 for (i = 0; length; i += 4, length -= 4) { 898 u32 val = tg3_ape_read32(tp, msgoff + i); 899 memcpy(data, &val, sizeof(u32)); 900 data++; 901 } 902 } 903 904 return 0; 905 } 906 #endif 907 908 static int tg3_ape_send_event(struct tg3 *tp, u32 event) 909 { 910 int err; 911 u32 apedata; 912 913 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 914 if (apedata != APE_SEG_SIG_MAGIC) 915 return -EAGAIN; 916 917 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 918 if (!(apedata & APE_FW_STATUS_READY)) 919 return -EAGAIN; 920 921 /* Wait for up to 20 millisecond for APE to service previous event. */ 922 err = tg3_ape_event_lock(tp, 20000); 923 if (err) 924 return err; 925 926 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, 927 event | APE_EVENT_STATUS_EVENT_PENDING); 928 929 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 930 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 931 932 return 0; 933 } 934 935 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) 936 { 937 u32 event; 938 u32 apedata; 939 940 if (!tg3_flag(tp, ENABLE_APE)) 941 return; 942 943 switch (kind) { 944 case RESET_KIND_INIT: 945 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); 946 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 947 APE_HOST_SEG_SIG_MAGIC); 948 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, 949 APE_HOST_SEG_LEN_MAGIC); 950 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); 951 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); 952 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, 953 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); 954 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, 955 APE_HOST_BEHAV_NO_PHYLOCK); 956 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, 957 TG3_APE_HOST_DRVR_STATE_START); 958 959 event = APE_EVENT_STATUS_STATE_START; 960 break; 961 case RESET_KIND_SHUTDOWN: 962 if (device_may_wakeup(&tp->pdev->dev) && 963 tg3_flag(tp, WOL_ENABLE)) { 964 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, 965 TG3_APE_HOST_WOL_SPEED_AUTO); 966 apedata = TG3_APE_HOST_DRVR_STATE_WOL; 967 } else 968 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; 969 970 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); 971 972 event = APE_EVENT_STATUS_STATE_UNLOAD; 973 break; 974 default: 975 return; 976 } 977 978 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; 979 980 tg3_ape_send_event(tp, event); 981 } 982 983 static void tg3_send_ape_heartbeat(struct tg3 *tp, 984 unsigned long interval) 985 { 986 /* Check if hb interval has exceeded */ 987 if (!tg3_flag(tp, ENABLE_APE) || 988 time_before(jiffies, tp->ape_hb_jiffies + interval)) 989 return; 990 991 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); 992 tp->ape_hb_jiffies = jiffies; 993 } 994 995 static void tg3_disable_ints(struct tg3 *tp) 996 { 997 int i; 998 999 tw32(TG3PCI_MISC_HOST_CTRL, 1000 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); 1001 for (i = 0; i < tp->irq_max; i++) 1002 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); 1003 } 1004 1005 static void tg3_enable_ints(struct tg3 *tp) 1006 { 1007 int i; 1008 1009 tp->irq_sync = 0; 1010 wmb(); 1011 1012 tw32(TG3PCI_MISC_HOST_CTRL, 1013 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 1014 1015 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; 1016 for (i = 0; i < tp->irq_cnt; i++) { 1017 struct tg3_napi *tnapi = &tp->napi[i]; 1018 1019 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1020 if (tg3_flag(tp, 1SHOT_MSI)) 1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1022 1023 tp->coal_now |= tnapi->coal_now; 1024 } 1025 1026 /* Force an initial interrupt */ 1027 if (!tg3_flag(tp, TAGGED_STATUS) && 1028 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) 1029 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 1030 else 1031 tw32(HOSTCC_MODE, tp->coal_now); 1032 1033 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); 1034 } 1035 1036 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) 1037 { 1038 struct tg3 *tp = tnapi->tp; 1039 struct tg3_hw_status *sblk = tnapi->hw_status; 1040 unsigned int work_exists = 0; 1041 1042 /* check for phy events */ 1043 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 1044 if (sblk->status & SD_STATUS_LINK_CHG) 1045 work_exists = 1; 1046 } 1047 1048 /* check for TX work to do */ 1049 if (sblk->idx[0].tx_consumer != tnapi->tx_cons) 1050 work_exists = 1; 1051 1052 /* check for RX work to do */ 1053 if (tnapi->rx_rcb_prod_idx && 1054 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 1055 work_exists = 1; 1056 1057 return work_exists; 1058 } 1059 1060 /* tg3_int_reenable 1061 * similar to tg3_enable_ints, but it accurately determines whether there 1062 * is new work pending and can return without flushing the PIO write 1063 * which reenables interrupts 1064 */ 1065 static void tg3_int_reenable(struct tg3_napi *tnapi) 1066 { 1067 struct tg3 *tp = tnapi->tp; 1068 1069 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 1070 1071 /* When doing tagged status, this work check is unnecessary. 1072 * The last_tag we write above tells the chip which piece of 1073 * work we've completed. 1074 */ 1075 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) 1076 tw32(HOSTCC_MODE, tp->coalesce_mode | 1077 HOSTCC_MODE_ENABLE | tnapi->coal_now); 1078 } 1079 1080 static void tg3_switch_clocks(struct tg3 *tp) 1081 { 1082 u32 clock_ctrl; 1083 u32 orig_clock_ctrl; 1084 1085 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) 1086 return; 1087 1088 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); 1089 1090 orig_clock_ctrl = clock_ctrl; 1091 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | 1092 CLOCK_CTRL_CLKRUN_OENABLE | 1093 0x1f); 1094 tp->pci_clock_ctrl = clock_ctrl; 1095 1096 if (tg3_flag(tp, 5705_PLUS)) { 1097 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { 1098 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1099 clock_ctrl | CLOCK_CTRL_625_CORE, 40); 1100 } 1101 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { 1102 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1103 clock_ctrl | 1104 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), 1105 40); 1106 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1107 clock_ctrl | (CLOCK_CTRL_ALTCLK), 1108 40); 1109 } 1110 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); 1111 } 1112 1113 #define PHY_BUSY_LOOPS 5000 1114 1115 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg, 1116 u32 *val) 1117 { 1118 u32 frame_val; 1119 unsigned int loops; 1120 int ret; 1121 1122 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1123 tw32_f(MAC_MI_MODE, 1124 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1125 udelay(80); 1126 } 1127 1128 tg3_ape_lock(tp, tp->phy_ape_lock); 1129 1130 *val = 0x0; 1131 1132 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1133 MI_COM_PHY_ADDR_MASK); 1134 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1135 MI_COM_REG_ADDR_MASK); 1136 frame_val |= (MI_COM_CMD_READ | MI_COM_START); 1137 1138 tw32_f(MAC_MI_COM, frame_val); 1139 1140 loops = PHY_BUSY_LOOPS; 1141 while (loops != 0) { 1142 udelay(10); 1143 frame_val = tr32(MAC_MI_COM); 1144 1145 if ((frame_val & MI_COM_BUSY) == 0) { 1146 udelay(5); 1147 frame_val = tr32(MAC_MI_COM); 1148 break; 1149 } 1150 loops -= 1; 1151 } 1152 1153 ret = -EBUSY; 1154 if (loops != 0) { 1155 *val = frame_val & MI_COM_DATA_MASK; 1156 ret = 0; 1157 } 1158 1159 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1160 tw32_f(MAC_MI_MODE, tp->mi_mode); 1161 udelay(80); 1162 } 1163 1164 tg3_ape_unlock(tp, tp->phy_ape_lock); 1165 1166 return ret; 1167 } 1168 1169 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) 1170 { 1171 return __tg3_readphy(tp, tp->phy_addr, reg, val); 1172 } 1173 1174 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg, 1175 u32 val) 1176 { 1177 u32 frame_val; 1178 unsigned int loops; 1179 int ret; 1180 1181 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 1182 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) 1183 return 0; 1184 1185 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1186 tw32_f(MAC_MI_MODE, 1187 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1188 udelay(80); 1189 } 1190 1191 tg3_ape_lock(tp, tp->phy_ape_lock); 1192 1193 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1194 MI_COM_PHY_ADDR_MASK); 1195 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1196 MI_COM_REG_ADDR_MASK); 1197 frame_val |= (val & MI_COM_DATA_MASK); 1198 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); 1199 1200 tw32_f(MAC_MI_COM, frame_val); 1201 1202 loops = PHY_BUSY_LOOPS; 1203 while (loops != 0) { 1204 udelay(10); 1205 frame_val = tr32(MAC_MI_COM); 1206 if ((frame_val & MI_COM_BUSY) == 0) { 1207 udelay(5); 1208 frame_val = tr32(MAC_MI_COM); 1209 break; 1210 } 1211 loops -= 1; 1212 } 1213 1214 ret = -EBUSY; 1215 if (loops != 0) 1216 ret = 0; 1217 1218 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1219 tw32_f(MAC_MI_MODE, tp->mi_mode); 1220 udelay(80); 1221 } 1222 1223 tg3_ape_unlock(tp, tp->phy_ape_lock); 1224 1225 return ret; 1226 } 1227 1228 static int tg3_writephy(struct tg3 *tp, int reg, u32 val) 1229 { 1230 return __tg3_writephy(tp, tp->phy_addr, reg, val); 1231 } 1232 1233 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) 1234 { 1235 int err; 1236 1237 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1238 if (err) 1239 goto done; 1240 1241 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1242 if (err) 1243 goto done; 1244 1245 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1246 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1247 if (err) 1248 goto done; 1249 1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); 1251 1252 done: 1253 return err; 1254 } 1255 1256 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) 1257 { 1258 int err; 1259 1260 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1261 if (err) 1262 goto done; 1263 1264 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1265 if (err) 1266 goto done; 1267 1268 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1269 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1270 if (err) 1271 goto done; 1272 1273 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); 1274 1275 done: 1276 return err; 1277 } 1278 1279 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) 1280 { 1281 int err; 1282 1283 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1284 if (!err) 1285 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); 1286 1287 return err; 1288 } 1289 1290 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) 1291 { 1292 int err; 1293 1294 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1295 if (!err) 1296 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); 1297 1298 return err; 1299 } 1300 1301 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) 1302 { 1303 int err; 1304 1305 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 1306 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | 1307 MII_TG3_AUXCTL_SHDWSEL_MISC); 1308 if (!err) 1309 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); 1310 1311 return err; 1312 } 1313 1314 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) 1315 { 1316 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) 1317 set |= MII_TG3_AUXCTL_MISC_WREN; 1318 1319 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); 1320 } 1321 1322 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) 1323 { 1324 u32 val; 1325 int err; 1326 1327 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 1328 1329 if (err) 1330 return err; 1331 1332 if (enable) 1333 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1334 else 1335 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1336 1337 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 1338 val | MII_TG3_AUXCTL_ACTL_TX_6DB); 1339 1340 return err; 1341 } 1342 1343 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val) 1344 { 1345 return tg3_writephy(tp, MII_TG3_MISC_SHDW, 1346 reg | val | MII_TG3_MISC_SHDW_WREN); 1347 } 1348 1349 static int tg3_bmcr_reset(struct tg3 *tp) 1350 { 1351 u32 phy_control; 1352 int limit, err; 1353 1354 /* OK, reset it, and poll the BMCR_RESET bit until it 1355 * clears or we time out. 1356 */ 1357 phy_control = BMCR_RESET; 1358 err = tg3_writephy(tp, MII_BMCR, phy_control); 1359 if (err != 0) 1360 return -EBUSY; 1361 1362 limit = 5000; 1363 while (limit--) { 1364 err = tg3_readphy(tp, MII_BMCR, &phy_control); 1365 if (err != 0) 1366 return -EBUSY; 1367 1368 if ((phy_control & BMCR_RESET) == 0) { 1369 udelay(40); 1370 break; 1371 } 1372 udelay(10); 1373 } 1374 if (limit < 0) 1375 return -EBUSY; 1376 1377 return 0; 1378 } 1379 1380 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) 1381 { 1382 struct tg3 *tp = bp->priv; 1383 u32 val; 1384 1385 spin_lock_bh(&tp->lock); 1386 1387 if (__tg3_readphy(tp, mii_id, reg, &val)) 1388 val = -EIO; 1389 1390 spin_unlock_bh(&tp->lock); 1391 1392 return val; 1393 } 1394 1395 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) 1396 { 1397 struct tg3 *tp = bp->priv; 1398 u32 ret = 0; 1399 1400 spin_lock_bh(&tp->lock); 1401 1402 if (__tg3_writephy(tp, mii_id, reg, val)) 1403 ret = -EIO; 1404 1405 spin_unlock_bh(&tp->lock); 1406 1407 return ret; 1408 } 1409 1410 static void tg3_mdio_config_5785(struct tg3 *tp) 1411 { 1412 u32 val; 1413 struct phy_device *phydev; 1414 1415 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1416 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1417 case PHY_ID_BCM50610: 1418 case PHY_ID_BCM50610M: 1419 val = MAC_PHYCFG2_50610_LED_MODES; 1420 break; 1421 case PHY_ID_BCMAC131: 1422 val = MAC_PHYCFG2_AC131_LED_MODES; 1423 break; 1424 case PHY_ID_RTL8211C: 1425 val = MAC_PHYCFG2_RTL8211C_LED_MODES; 1426 break; 1427 case PHY_ID_RTL8201E: 1428 val = MAC_PHYCFG2_RTL8201E_LED_MODES; 1429 break; 1430 default: 1431 return; 1432 } 1433 1434 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { 1435 tw32(MAC_PHYCFG2, val); 1436 1437 val = tr32(MAC_PHYCFG1); 1438 val &= ~(MAC_PHYCFG1_RGMII_INT | 1439 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); 1440 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; 1441 tw32(MAC_PHYCFG1, val); 1442 1443 return; 1444 } 1445 1446 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) 1447 val |= MAC_PHYCFG2_EMODE_MASK_MASK | 1448 MAC_PHYCFG2_FMODE_MASK_MASK | 1449 MAC_PHYCFG2_GMODE_MASK_MASK | 1450 MAC_PHYCFG2_ACT_MASK_MASK | 1451 MAC_PHYCFG2_QUAL_MASK_MASK | 1452 MAC_PHYCFG2_INBAND_ENABLE; 1453 1454 tw32(MAC_PHYCFG2, val); 1455 1456 val = tr32(MAC_PHYCFG1); 1457 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | 1458 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); 1459 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1460 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1461 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; 1462 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1463 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; 1464 } 1465 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | 1466 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; 1467 tw32(MAC_PHYCFG1, val); 1468 1469 val = tr32(MAC_EXT_RGMII_MODE); 1470 val &= ~(MAC_RGMII_MODE_RX_INT_B | 1471 MAC_RGMII_MODE_RX_QUALITY | 1472 MAC_RGMII_MODE_RX_ACTIVITY | 1473 MAC_RGMII_MODE_RX_ENG_DET | 1474 MAC_RGMII_MODE_TX_ENABLE | 1475 MAC_RGMII_MODE_TX_LOWPWR | 1476 MAC_RGMII_MODE_TX_RESET); 1477 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1478 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1479 val |= MAC_RGMII_MODE_RX_INT_B | 1480 MAC_RGMII_MODE_RX_QUALITY | 1481 MAC_RGMII_MODE_RX_ACTIVITY | 1482 MAC_RGMII_MODE_RX_ENG_DET; 1483 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1484 val |= MAC_RGMII_MODE_TX_ENABLE | 1485 MAC_RGMII_MODE_TX_LOWPWR | 1486 MAC_RGMII_MODE_TX_RESET; 1487 } 1488 tw32(MAC_EXT_RGMII_MODE, val); 1489 } 1490 1491 static void tg3_mdio_start(struct tg3 *tp) 1492 { 1493 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; 1494 tw32_f(MAC_MI_MODE, tp->mi_mode); 1495 udelay(80); 1496 1497 if (tg3_flag(tp, MDIOBUS_INITED) && 1498 tg3_asic_rev(tp) == ASIC_REV_5785) 1499 tg3_mdio_config_5785(tp); 1500 } 1501 1502 static int tg3_mdio_init(struct tg3 *tp) 1503 { 1504 int i; 1505 u32 reg; 1506 struct phy_device *phydev; 1507 1508 if (tg3_flag(tp, 5717_PLUS)) { 1509 u32 is_serdes; 1510 1511 tp->phy_addr = tp->pci_fn + 1; 1512 1513 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) 1514 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; 1515 else 1516 is_serdes = tr32(TG3_CPMU_PHY_STRAP) & 1517 TG3_CPMU_PHY_STRAP_IS_SERDES; 1518 if (is_serdes) 1519 tp->phy_addr += 7; 1520 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) { 1521 int addr; 1522 1523 addr = ssb_gige_get_phyaddr(tp->pdev); 1524 if (addr < 0) 1525 return addr; 1526 tp->phy_addr = addr; 1527 } else 1528 tp->phy_addr = TG3_PHY_MII_ADDR; 1529 1530 tg3_mdio_start(tp); 1531 1532 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) 1533 return 0; 1534 1535 tp->mdio_bus = mdiobus_alloc(); 1536 if (tp->mdio_bus == NULL) 1537 return -ENOMEM; 1538 1539 tp->mdio_bus->name = "tg3 mdio bus"; 1540 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", 1541 (tp->pdev->bus->number << 8) | tp->pdev->devfn); 1542 tp->mdio_bus->priv = tp; 1543 tp->mdio_bus->parent = &tp->pdev->dev; 1544 tp->mdio_bus->read = &tg3_mdio_read; 1545 tp->mdio_bus->write = &tg3_mdio_write; 1546 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); 1547 1548 /* The bus registration will look for all the PHYs on the mdio bus. 1549 * Unfortunately, it does not ensure the PHY is powered up before 1550 * accessing the PHY ID registers. A chip reset is the 1551 * quickest way to bring the device back to an operational state.. 1552 */ 1553 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) 1554 tg3_bmcr_reset(tp); 1555 1556 i = mdiobus_register(tp->mdio_bus); 1557 if (i) { 1558 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); 1559 mdiobus_free(tp->mdio_bus); 1560 return i; 1561 } 1562 1563 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1564 1565 if (!phydev || !phydev->drv) { 1566 dev_warn(&tp->pdev->dev, "No PHY devices\n"); 1567 mdiobus_unregister(tp->mdio_bus); 1568 mdiobus_free(tp->mdio_bus); 1569 return -ENODEV; 1570 } 1571 1572 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1573 case PHY_ID_BCM57780: 1574 phydev->interface = PHY_INTERFACE_MODE_GMII; 1575 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1576 break; 1577 case PHY_ID_BCM50610: 1578 case PHY_ID_BCM50610M: 1579 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | 1580 PHY_BRCM_RX_REFCLK_UNUSED | 1581 PHY_BRCM_DIS_TXCRXC_NOENRGY | 1582 PHY_BRCM_AUTO_PWRDWN_ENABLE; 1583 fallthrough; 1584 case PHY_ID_RTL8211C: 1585 phydev->interface = PHY_INTERFACE_MODE_RGMII; 1586 break; 1587 case PHY_ID_RTL8201E: 1588 case PHY_ID_BCMAC131: 1589 phydev->interface = PHY_INTERFACE_MODE_MII; 1590 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1591 tp->phy_flags |= TG3_PHYFLG_IS_FET; 1592 break; 1593 } 1594 1595 tg3_flag_set(tp, MDIOBUS_INITED); 1596 1597 if (tg3_asic_rev(tp) == ASIC_REV_5785) 1598 tg3_mdio_config_5785(tp); 1599 1600 return 0; 1601 } 1602 1603 static void tg3_mdio_fini(struct tg3 *tp) 1604 { 1605 if (tg3_flag(tp, MDIOBUS_INITED)) { 1606 tg3_flag_clear(tp, MDIOBUS_INITED); 1607 mdiobus_unregister(tp->mdio_bus); 1608 mdiobus_free(tp->mdio_bus); 1609 } 1610 } 1611 1612 /* tp->lock is held. */ 1613 static inline void tg3_generate_fw_event(struct tg3 *tp) 1614 { 1615 u32 val; 1616 1617 val = tr32(GRC_RX_CPU_EVENT); 1618 val |= GRC_RX_CPU_DRIVER_EVENT; 1619 tw32_f(GRC_RX_CPU_EVENT, val); 1620 1621 tp->last_event_jiffies = jiffies; 1622 } 1623 1624 #define TG3_FW_EVENT_TIMEOUT_USEC 2500 1625 1626 /* tp->lock is held. */ 1627 static void tg3_wait_for_event_ack(struct tg3 *tp) 1628 { 1629 int i; 1630 unsigned int delay_cnt; 1631 long time_remain; 1632 1633 /* If enough time has passed, no wait is necessary. */ 1634 time_remain = (long)(tp->last_event_jiffies + 1 + 1635 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - 1636 (long)jiffies; 1637 if (time_remain < 0) 1638 return; 1639 1640 /* Check if we can shorten the wait time. */ 1641 delay_cnt = jiffies_to_usecs(time_remain); 1642 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) 1643 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; 1644 delay_cnt = (delay_cnt >> 3) + 1; 1645 1646 for (i = 0; i < delay_cnt; i++) { 1647 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) 1648 break; 1649 if (pci_channel_offline(tp->pdev)) 1650 break; 1651 1652 udelay(8); 1653 } 1654 } 1655 1656 /* tp->lock is held. */ 1657 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data) 1658 { 1659 u32 reg, val; 1660 1661 val = 0; 1662 if (!tg3_readphy(tp, MII_BMCR, ®)) 1663 val = reg << 16; 1664 if (!tg3_readphy(tp, MII_BMSR, ®)) 1665 val |= (reg & 0xffff); 1666 *data++ = val; 1667 1668 val = 0; 1669 if (!tg3_readphy(tp, MII_ADVERTISE, ®)) 1670 val = reg << 16; 1671 if (!tg3_readphy(tp, MII_LPA, ®)) 1672 val |= (reg & 0xffff); 1673 *data++ = val; 1674 1675 val = 0; 1676 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { 1677 if (!tg3_readphy(tp, MII_CTRL1000, ®)) 1678 val = reg << 16; 1679 if (!tg3_readphy(tp, MII_STAT1000, ®)) 1680 val |= (reg & 0xffff); 1681 } 1682 *data++ = val; 1683 1684 if (!tg3_readphy(tp, MII_PHYADDR, ®)) 1685 val = reg << 16; 1686 else 1687 val = 0; 1688 *data++ = val; 1689 } 1690 1691 /* tp->lock is held. */ 1692 static void tg3_ump_link_report(struct tg3 *tp) 1693 { 1694 u32 data[4]; 1695 1696 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) 1697 return; 1698 1699 tg3_phy_gather_ump_data(tp, data); 1700 1701 tg3_wait_for_event_ack(tp); 1702 1703 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); 1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); 1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]); 1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]); 1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]); 1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]); 1709 1710 tg3_generate_fw_event(tp); 1711 } 1712 1713 /* tp->lock is held. */ 1714 static void tg3_stop_fw(struct tg3 *tp) 1715 { 1716 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 1717 /* Wait for RX cpu to ACK the previous event. */ 1718 tg3_wait_for_event_ack(tp); 1719 1720 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); 1721 1722 tg3_generate_fw_event(tp); 1723 1724 /* Wait for RX cpu to ACK this event. */ 1725 tg3_wait_for_event_ack(tp); 1726 } 1727 } 1728 1729 /* tp->lock is held. */ 1730 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) 1731 { 1732 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, 1733 NIC_SRAM_FIRMWARE_MBOX_MAGIC1); 1734 1735 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1736 switch (kind) { 1737 case RESET_KIND_INIT: 1738 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1739 DRV_STATE_START); 1740 break; 1741 1742 case RESET_KIND_SHUTDOWN: 1743 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1744 DRV_STATE_UNLOAD); 1745 break; 1746 1747 case RESET_KIND_SUSPEND: 1748 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1749 DRV_STATE_SUSPEND); 1750 break; 1751 1752 default: 1753 break; 1754 } 1755 } 1756 } 1757 1758 /* tp->lock is held. */ 1759 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) 1760 { 1761 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1762 switch (kind) { 1763 case RESET_KIND_INIT: 1764 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1765 DRV_STATE_START_DONE); 1766 break; 1767 1768 case RESET_KIND_SHUTDOWN: 1769 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1770 DRV_STATE_UNLOAD_DONE); 1771 break; 1772 1773 default: 1774 break; 1775 } 1776 } 1777 } 1778 1779 /* tp->lock is held. */ 1780 static void tg3_write_sig_legacy(struct tg3 *tp, int kind) 1781 { 1782 if (tg3_flag(tp, ENABLE_ASF)) { 1783 switch (kind) { 1784 case RESET_KIND_INIT: 1785 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1786 DRV_STATE_START); 1787 break; 1788 1789 case RESET_KIND_SHUTDOWN: 1790 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1791 DRV_STATE_UNLOAD); 1792 break; 1793 1794 case RESET_KIND_SUSPEND: 1795 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1796 DRV_STATE_SUSPEND); 1797 break; 1798 1799 default: 1800 break; 1801 } 1802 } 1803 } 1804 1805 static int tg3_poll_fw(struct tg3 *tp) 1806 { 1807 int i; 1808 u32 val; 1809 1810 if (tg3_flag(tp, NO_FWARE_REPORTED)) 1811 return 0; 1812 1813 if (tg3_flag(tp, IS_SSB_CORE)) { 1814 /* We don't use firmware. */ 1815 return 0; 1816 } 1817 1818 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 1819 /* Wait up to 20ms for init done. */ 1820 for (i = 0; i < 200; i++) { 1821 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) 1822 return 0; 1823 if (pci_channel_offline(tp->pdev)) 1824 return -ENODEV; 1825 1826 udelay(100); 1827 } 1828 return -ENODEV; 1829 } 1830 1831 /* Wait for firmware initialization to complete. */ 1832 for (i = 0; i < 100000; i++) { 1833 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); 1834 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 1835 break; 1836 if (pci_channel_offline(tp->pdev)) { 1837 if (!tg3_flag(tp, NO_FWARE_REPORTED)) { 1838 tg3_flag_set(tp, NO_FWARE_REPORTED); 1839 netdev_info(tp->dev, "No firmware running\n"); 1840 } 1841 1842 break; 1843 } 1844 1845 udelay(10); 1846 } 1847 1848 /* Chip might not be fitted with firmware. Some Sun onboard 1849 * parts are configured like that. So don't signal the timeout 1850 * of the above loop as an error, but do report the lack of 1851 * running firmware once. 1852 */ 1853 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { 1854 tg3_flag_set(tp, NO_FWARE_REPORTED); 1855 1856 netdev_info(tp->dev, "No firmware running\n"); 1857 } 1858 1859 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 1860 /* The 57765 A0 needs a little more 1861 * time to do some important work. 1862 */ 1863 mdelay(10); 1864 } 1865 1866 return 0; 1867 } 1868 1869 static void tg3_link_report(struct tg3 *tp) 1870 { 1871 if (!netif_carrier_ok(tp->dev)) { 1872 netif_info(tp, link, tp->dev, "Link is down\n"); 1873 tg3_ump_link_report(tp); 1874 } else if (netif_msg_link(tp)) { 1875 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", 1876 (tp->link_config.active_speed == SPEED_1000 ? 1877 1000 : 1878 (tp->link_config.active_speed == SPEED_100 ? 1879 100 : 10)), 1880 (tp->link_config.active_duplex == DUPLEX_FULL ? 1881 "full" : "half")); 1882 1883 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", 1884 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? 1885 "on" : "off", 1886 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? 1887 "on" : "off"); 1888 1889 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 1890 netdev_info(tp->dev, "EEE is %s\n", 1891 tp->setlpicnt ? "enabled" : "disabled"); 1892 1893 tg3_ump_link_report(tp); 1894 } 1895 1896 tp->link_up = netif_carrier_ok(tp->dev); 1897 } 1898 1899 static u32 tg3_decode_flowctrl_1000T(u32 adv) 1900 { 1901 u32 flowctrl = 0; 1902 1903 if (adv & ADVERTISE_PAUSE_CAP) { 1904 flowctrl |= FLOW_CTRL_RX; 1905 if (!(adv & ADVERTISE_PAUSE_ASYM)) 1906 flowctrl |= FLOW_CTRL_TX; 1907 } else if (adv & ADVERTISE_PAUSE_ASYM) 1908 flowctrl |= FLOW_CTRL_TX; 1909 1910 return flowctrl; 1911 } 1912 1913 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) 1914 { 1915 u16 miireg; 1916 1917 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) 1918 miireg = ADVERTISE_1000XPAUSE; 1919 else if (flow_ctrl & FLOW_CTRL_TX) 1920 miireg = ADVERTISE_1000XPSE_ASYM; 1921 else if (flow_ctrl & FLOW_CTRL_RX) 1922 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; 1923 else 1924 miireg = 0; 1925 1926 return miireg; 1927 } 1928 1929 static u32 tg3_decode_flowctrl_1000X(u32 adv) 1930 { 1931 u32 flowctrl = 0; 1932 1933 if (adv & ADVERTISE_1000XPAUSE) { 1934 flowctrl |= FLOW_CTRL_RX; 1935 if (!(adv & ADVERTISE_1000XPSE_ASYM)) 1936 flowctrl |= FLOW_CTRL_TX; 1937 } else if (adv & ADVERTISE_1000XPSE_ASYM) 1938 flowctrl |= FLOW_CTRL_TX; 1939 1940 return flowctrl; 1941 } 1942 1943 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) 1944 { 1945 u8 cap = 0; 1946 1947 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) { 1948 cap = FLOW_CTRL_TX | FLOW_CTRL_RX; 1949 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) { 1950 if (lcladv & ADVERTISE_1000XPAUSE) 1951 cap = FLOW_CTRL_RX; 1952 if (rmtadv & ADVERTISE_1000XPAUSE) 1953 cap = FLOW_CTRL_TX; 1954 } 1955 1956 return cap; 1957 } 1958 1959 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) 1960 { 1961 u8 autoneg; 1962 u8 flowctrl = 0; 1963 u32 old_rx_mode = tp->rx_mode; 1964 u32 old_tx_mode = tp->tx_mode; 1965 1966 if (tg3_flag(tp, USE_PHYLIB)) 1967 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg; 1968 else 1969 autoneg = tp->link_config.autoneg; 1970 1971 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { 1972 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 1973 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); 1974 else 1975 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 1976 } else 1977 flowctrl = tp->link_config.flowctrl; 1978 1979 tp->link_config.active_flowctrl = flowctrl; 1980 1981 if (flowctrl & FLOW_CTRL_RX) 1982 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; 1983 else 1984 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; 1985 1986 if (old_rx_mode != tp->rx_mode) 1987 tw32_f(MAC_RX_MODE, tp->rx_mode); 1988 1989 if (flowctrl & FLOW_CTRL_TX) 1990 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; 1991 else 1992 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; 1993 1994 if (old_tx_mode != tp->tx_mode) 1995 tw32_f(MAC_TX_MODE, tp->tx_mode); 1996 } 1997 1998 static void tg3_adjust_link(struct net_device *dev) 1999 { 2000 u8 oldflowctrl, linkmesg = 0; 2001 u32 mac_mode, lcl_adv, rmt_adv; 2002 struct tg3 *tp = netdev_priv(dev); 2003 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2004 2005 spin_lock_bh(&tp->lock); 2006 2007 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | 2008 MAC_MODE_HALF_DUPLEX); 2009 2010 oldflowctrl = tp->link_config.active_flowctrl; 2011 2012 if (phydev->link) { 2013 lcl_adv = 0; 2014 rmt_adv = 0; 2015 2016 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) 2017 mac_mode |= MAC_MODE_PORT_MODE_MII; 2018 else if (phydev->speed == SPEED_1000 || 2019 tg3_asic_rev(tp) != ASIC_REV_5785) 2020 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2021 else 2022 mac_mode |= MAC_MODE_PORT_MODE_MII; 2023 2024 if (phydev->duplex == DUPLEX_HALF) 2025 mac_mode |= MAC_MODE_HALF_DUPLEX; 2026 else { 2027 lcl_adv = mii_advertise_flowctrl( 2028 tp->link_config.flowctrl); 2029 2030 if (phydev->pause) 2031 rmt_adv = LPA_PAUSE_CAP; 2032 if (phydev->asym_pause) 2033 rmt_adv |= LPA_PAUSE_ASYM; 2034 } 2035 2036 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 2037 } else 2038 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2039 2040 if (mac_mode != tp->mac_mode) { 2041 tp->mac_mode = mac_mode; 2042 tw32_f(MAC_MODE, tp->mac_mode); 2043 udelay(40); 2044 } 2045 2046 if (tg3_asic_rev(tp) == ASIC_REV_5785) { 2047 if (phydev->speed == SPEED_10) 2048 tw32(MAC_MI_STAT, 2049 MAC_MI_STAT_10MBPS_MODE | 2050 MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2051 else 2052 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2053 } 2054 2055 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) 2056 tw32(MAC_TX_LENGTHS, 2057 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2058 (6 << TX_LENGTHS_IPG_SHIFT) | 2059 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); 2060 else 2061 tw32(MAC_TX_LENGTHS, 2062 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2063 (6 << TX_LENGTHS_IPG_SHIFT) | 2064 (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); 2065 2066 if (phydev->link != tp->old_link || 2067 phydev->speed != tp->link_config.active_speed || 2068 phydev->duplex != tp->link_config.active_duplex || 2069 oldflowctrl != tp->link_config.active_flowctrl) 2070 linkmesg = 1; 2071 2072 tp->old_link = phydev->link; 2073 tp->link_config.active_speed = phydev->speed; 2074 tp->link_config.active_duplex = phydev->duplex; 2075 2076 spin_unlock_bh(&tp->lock); 2077 2078 if (linkmesg) 2079 tg3_link_report(tp); 2080 } 2081 2082 static int tg3_phy_init(struct tg3 *tp) 2083 { 2084 struct phy_device *phydev; 2085 2086 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) 2087 return 0; 2088 2089 /* Bring the PHY back to a known state. */ 2090 tg3_bmcr_reset(tp); 2091 2092 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2093 2094 /* Attach the MAC to the PHY. */ 2095 phydev = phy_connect(tp->dev, phydev_name(phydev), 2096 tg3_adjust_link, phydev->interface); 2097 if (IS_ERR(phydev)) { 2098 dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); 2099 return PTR_ERR(phydev); 2100 } 2101 2102 /* Mask with MAC supported features. */ 2103 switch (phydev->interface) { 2104 case PHY_INTERFACE_MODE_GMII: 2105 case PHY_INTERFACE_MODE_RGMII: 2106 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 2107 phy_set_max_speed(phydev, SPEED_1000); 2108 phy_support_asym_pause(phydev); 2109 break; 2110 } 2111 fallthrough; 2112 case PHY_INTERFACE_MODE_MII: 2113 phy_set_max_speed(phydev, SPEED_100); 2114 phy_support_asym_pause(phydev); 2115 break; 2116 default: 2117 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2118 return -EINVAL; 2119 } 2120 2121 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; 2122 2123 phy_attached_info(phydev); 2124 2125 return 0; 2126 } 2127 2128 static void tg3_phy_start(struct tg3 *tp) 2129 { 2130 struct phy_device *phydev; 2131 2132 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2133 return; 2134 2135 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2136 2137 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 2138 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 2139 phydev->speed = tp->link_config.speed; 2140 phydev->duplex = tp->link_config.duplex; 2141 phydev->autoneg = tp->link_config.autoneg; 2142 ethtool_convert_legacy_u32_to_link_mode( 2143 phydev->advertising, tp->link_config.advertising); 2144 } 2145 2146 phy_start(phydev); 2147 2148 phy_start_aneg(phydev); 2149 } 2150 2151 static void tg3_phy_stop(struct tg3 *tp) 2152 { 2153 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2154 return; 2155 2156 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2157 } 2158 2159 static void tg3_phy_fini(struct tg3 *tp) 2160 { 2161 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 2162 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2163 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; 2164 } 2165 } 2166 2167 static int tg3_phy_set_extloopbk(struct tg3 *tp) 2168 { 2169 int err; 2170 u32 val; 2171 2172 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 2173 return 0; 2174 2175 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2176 /* Cannot do read-modify-write on 5401 */ 2177 err = tg3_phy_auxctl_write(tp, 2178 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2179 MII_TG3_AUXCTL_ACTL_EXTLOOPBK | 2180 0x4c20); 2181 goto done; 2182 } 2183 2184 err = tg3_phy_auxctl_read(tp, 2185 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2186 if (err) 2187 return err; 2188 2189 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK; 2190 err = tg3_phy_auxctl_write(tp, 2191 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val); 2192 2193 done: 2194 return err; 2195 } 2196 2197 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) 2198 { 2199 u32 phytest; 2200 2201 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 2202 u32 phy; 2203 2204 tg3_writephy(tp, MII_TG3_FET_TEST, 2205 phytest | MII_TG3_FET_SHADOW_EN); 2206 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { 2207 if (enable) 2208 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; 2209 else 2210 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; 2211 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); 2212 } 2213 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 2214 } 2215 } 2216 2217 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) 2218 { 2219 u32 reg; 2220 2221 if (!tg3_flag(tp, 5705_PLUS) || 2222 (tg3_flag(tp, 5717_PLUS) && 2223 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) 2224 return; 2225 2226 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2227 tg3_phy_fet_toggle_apd(tp, enable); 2228 return; 2229 } 2230 2231 reg = MII_TG3_MISC_SHDW_SCR5_LPED | 2232 MII_TG3_MISC_SHDW_SCR5_DLPTLM | 2233 MII_TG3_MISC_SHDW_SCR5_SDTL | 2234 MII_TG3_MISC_SHDW_SCR5_C125OE; 2235 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable) 2236 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; 2237 2238 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg); 2239 2240 2241 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS; 2242 if (enable) 2243 reg |= MII_TG3_MISC_SHDW_APD_ENABLE; 2244 2245 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg); 2246 } 2247 2248 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable) 2249 { 2250 u32 phy; 2251 2252 if (!tg3_flag(tp, 5705_PLUS) || 2253 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 2254 return; 2255 2256 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2257 u32 ephy; 2258 2259 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { 2260 u32 reg = MII_TG3_FET_SHDW_MISCCTRL; 2261 2262 tg3_writephy(tp, MII_TG3_FET_TEST, 2263 ephy | MII_TG3_FET_SHADOW_EN); 2264 if (!tg3_readphy(tp, reg, &phy)) { 2265 if (enable) 2266 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2267 else 2268 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2269 tg3_writephy(tp, reg, phy); 2270 } 2271 tg3_writephy(tp, MII_TG3_FET_TEST, ephy); 2272 } 2273 } else { 2274 int ret; 2275 2276 ret = tg3_phy_auxctl_read(tp, 2277 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); 2278 if (!ret) { 2279 if (enable) 2280 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2281 else 2282 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2283 tg3_phy_auxctl_write(tp, 2284 MII_TG3_AUXCTL_SHDWSEL_MISC, phy); 2285 } 2286 } 2287 } 2288 2289 static void tg3_phy_set_wirespeed(struct tg3 *tp) 2290 { 2291 int ret; 2292 u32 val; 2293 2294 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) 2295 return; 2296 2297 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); 2298 if (!ret) 2299 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, 2300 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); 2301 } 2302 2303 static void tg3_phy_apply_otp(struct tg3 *tp) 2304 { 2305 u32 otp, phy; 2306 2307 if (!tp->phy_otp) 2308 return; 2309 2310 otp = tp->phy_otp; 2311 2312 if (tg3_phy_toggle_auxctl_smdsp(tp, true)) 2313 return; 2314 2315 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); 2316 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; 2317 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); 2318 2319 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | 2320 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); 2321 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); 2322 2323 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); 2324 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; 2325 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); 2326 2327 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); 2328 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); 2329 2330 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); 2331 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); 2332 2333 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | 2334 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); 2335 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); 2336 2337 tg3_phy_toggle_auxctl_smdsp(tp, false); 2338 } 2339 2340 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee) 2341 { 2342 u32 val; 2343 struct ethtool_eee *dest = &tp->eee; 2344 2345 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2346 return; 2347 2348 if (eee) 2349 dest = eee; 2350 2351 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val)) 2352 return; 2353 2354 /* Pull eee_active */ 2355 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || 2356 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) { 2357 dest->eee_active = 1; 2358 } else 2359 dest->eee_active = 0; 2360 2361 /* Pull lp advertised settings */ 2362 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val)) 2363 return; 2364 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2365 2366 /* Pull advertised and eee_enabled settings */ 2367 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) 2368 return; 2369 dest->eee_enabled = !!val; 2370 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2371 2372 /* Pull tx_lpi_enabled */ 2373 val = tr32(TG3_CPMU_EEE_MODE); 2374 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX); 2375 2376 /* Pull lpi timer value */ 2377 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff; 2378 } 2379 2380 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) 2381 { 2382 u32 val; 2383 2384 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2385 return; 2386 2387 tp->setlpicnt = 0; 2388 2389 if (tp->link_config.autoneg == AUTONEG_ENABLE && 2390 current_link_up && 2391 tp->link_config.active_duplex == DUPLEX_FULL && 2392 (tp->link_config.active_speed == SPEED_100 || 2393 tp->link_config.active_speed == SPEED_1000)) { 2394 u32 eeectl; 2395 2396 if (tp->link_config.active_speed == SPEED_1000) 2397 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; 2398 else 2399 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; 2400 2401 tw32(TG3_CPMU_EEE_CTRL, eeectl); 2402 2403 tg3_eee_pull_config(tp, NULL); 2404 if (tp->eee.eee_active) 2405 tp->setlpicnt = 2; 2406 } 2407 2408 if (!tp->setlpicnt) { 2409 if (current_link_up && 2410 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2411 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); 2412 tg3_phy_toggle_auxctl_smdsp(tp, false); 2413 } 2414 2415 val = tr32(TG3_CPMU_EEE_MODE); 2416 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); 2417 } 2418 } 2419 2420 static void tg3_phy_eee_enable(struct tg3 *tp) 2421 { 2422 u32 val; 2423 2424 if (tp->link_config.active_speed == SPEED_1000 && 2425 (tg3_asic_rev(tp) == ASIC_REV_5717 || 2426 tg3_asic_rev(tp) == ASIC_REV_5719 || 2427 tg3_flag(tp, 57765_CLASS)) && 2428 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2429 val = MII_TG3_DSP_TAP26_ALNOKO | 2430 MII_TG3_DSP_TAP26_RMRXSTO; 2431 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 2432 tg3_phy_toggle_auxctl_smdsp(tp, false); 2433 } 2434 2435 val = tr32(TG3_CPMU_EEE_MODE); 2436 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); 2437 } 2438 2439 static int tg3_wait_macro_done(struct tg3 *tp) 2440 { 2441 int limit = 100; 2442 2443 while (limit--) { 2444 u32 tmp32; 2445 2446 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { 2447 if ((tmp32 & 0x1000) == 0) 2448 break; 2449 } 2450 } 2451 if (limit < 0) 2452 return -EBUSY; 2453 2454 return 0; 2455 } 2456 2457 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) 2458 { 2459 static const u32 test_pat[4][6] = { 2460 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, 2461 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, 2462 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, 2463 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } 2464 }; 2465 int chan; 2466 2467 for (chan = 0; chan < 4; chan++) { 2468 int i; 2469 2470 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2471 (chan * 0x2000) | 0x0200); 2472 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2473 2474 for (i = 0; i < 6; i++) 2475 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 2476 test_pat[chan][i]); 2477 2478 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2479 if (tg3_wait_macro_done(tp)) { 2480 *resetp = 1; 2481 return -EBUSY; 2482 } 2483 2484 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2485 (chan * 0x2000) | 0x0200); 2486 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); 2487 if (tg3_wait_macro_done(tp)) { 2488 *resetp = 1; 2489 return -EBUSY; 2490 } 2491 2492 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); 2493 if (tg3_wait_macro_done(tp)) { 2494 *resetp = 1; 2495 return -EBUSY; 2496 } 2497 2498 for (i = 0; i < 6; i += 2) { 2499 u32 low, high; 2500 2501 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || 2502 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || 2503 tg3_wait_macro_done(tp)) { 2504 *resetp = 1; 2505 return -EBUSY; 2506 } 2507 low &= 0x7fff; 2508 high &= 0x000f; 2509 if (low != test_pat[chan][i] || 2510 high != test_pat[chan][i+1]) { 2511 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); 2512 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); 2513 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); 2514 2515 return -EBUSY; 2516 } 2517 } 2518 } 2519 2520 return 0; 2521 } 2522 2523 static int tg3_phy_reset_chanpat(struct tg3 *tp) 2524 { 2525 int chan; 2526 2527 for (chan = 0; chan < 4; chan++) { 2528 int i; 2529 2530 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2531 (chan * 0x2000) | 0x0200); 2532 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2533 for (i = 0; i < 6; i++) 2534 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); 2535 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2536 if (tg3_wait_macro_done(tp)) 2537 return -EBUSY; 2538 } 2539 2540 return 0; 2541 } 2542 2543 static int tg3_phy_reset_5703_4_5(struct tg3 *tp) 2544 { 2545 u32 reg32, phy9_orig; 2546 int retries, do_phy_reset, err; 2547 2548 retries = 10; 2549 do_phy_reset = 1; 2550 do { 2551 if (do_phy_reset) { 2552 err = tg3_bmcr_reset(tp); 2553 if (err) 2554 return err; 2555 do_phy_reset = 0; 2556 } 2557 2558 /* Disable transmitter and interrupt. */ 2559 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) 2560 continue; 2561 2562 reg32 |= 0x3000; 2563 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2564 2565 /* Set full-duplex, 1000 mbps. */ 2566 tg3_writephy(tp, MII_BMCR, 2567 BMCR_FULLDPLX | BMCR_SPEED1000); 2568 2569 /* Set to master mode. */ 2570 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) 2571 continue; 2572 2573 tg3_writephy(tp, MII_CTRL1000, 2574 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 2575 2576 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 2577 if (err) 2578 return err; 2579 2580 /* Block the PHY control access. */ 2581 tg3_phydsp_write(tp, 0x8005, 0x0800); 2582 2583 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); 2584 if (!err) 2585 break; 2586 } while (--retries); 2587 2588 err = tg3_phy_reset_chanpat(tp); 2589 if (err) 2590 return err; 2591 2592 tg3_phydsp_write(tp, 0x8005, 0x0000); 2593 2594 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); 2595 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); 2596 2597 tg3_phy_toggle_auxctl_smdsp(tp, false); 2598 2599 tg3_writephy(tp, MII_CTRL1000, phy9_orig); 2600 2601 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32); 2602 if (err) 2603 return err; 2604 2605 reg32 &= ~0x3000; 2606 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2607 2608 return 0; 2609 } 2610 2611 static void tg3_carrier_off(struct tg3 *tp) 2612 { 2613 netif_carrier_off(tp->dev); 2614 tp->link_up = false; 2615 } 2616 2617 static void tg3_warn_mgmt_link_flap(struct tg3 *tp) 2618 { 2619 if (tg3_flag(tp, ENABLE_ASF)) 2620 netdev_warn(tp->dev, 2621 "Management side-band traffic will be interrupted during phy settings change\n"); 2622 } 2623 2624 /* This will reset the tigon3 PHY if there is no valid 2625 * link unless the FORCE argument is non-zero. 2626 */ 2627 static int tg3_phy_reset(struct tg3 *tp) 2628 { 2629 u32 val, cpmuctrl; 2630 int err; 2631 2632 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2633 val = tr32(GRC_MISC_CFG); 2634 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); 2635 udelay(40); 2636 } 2637 err = tg3_readphy(tp, MII_BMSR, &val); 2638 err |= tg3_readphy(tp, MII_BMSR, &val); 2639 if (err != 0) 2640 return -EBUSY; 2641 2642 if (netif_running(tp->dev) && tp->link_up) { 2643 netif_carrier_off(tp->dev); 2644 tg3_link_report(tp); 2645 } 2646 2647 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 2648 tg3_asic_rev(tp) == ASIC_REV_5704 || 2649 tg3_asic_rev(tp) == ASIC_REV_5705) { 2650 err = tg3_phy_reset_5703_4_5(tp); 2651 if (err) 2652 return err; 2653 goto out; 2654 } 2655 2656 cpmuctrl = 0; 2657 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 2658 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 2659 cpmuctrl = tr32(TG3_CPMU_CTRL); 2660 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) 2661 tw32(TG3_CPMU_CTRL, 2662 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); 2663 } 2664 2665 err = tg3_bmcr_reset(tp); 2666 if (err) 2667 return err; 2668 2669 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { 2670 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; 2671 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); 2672 2673 tw32(TG3_CPMU_CTRL, cpmuctrl); 2674 } 2675 2676 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 2677 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 2678 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 2679 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == 2680 CPMU_LSPD_1000MB_MACCLK_12_5) { 2681 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 2682 udelay(40); 2683 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 2684 } 2685 } 2686 2687 if (tg3_flag(tp, 5717_PLUS) && 2688 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) 2689 return 0; 2690 2691 tg3_phy_apply_otp(tp); 2692 2693 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 2694 tg3_phy_toggle_apd(tp, true); 2695 else 2696 tg3_phy_toggle_apd(tp, false); 2697 2698 out: 2699 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && 2700 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2701 tg3_phydsp_write(tp, 0x201f, 0x2aaa); 2702 tg3_phydsp_write(tp, 0x000a, 0x0323); 2703 tg3_phy_toggle_auxctl_smdsp(tp, false); 2704 } 2705 2706 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { 2707 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2708 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2709 } 2710 2711 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { 2712 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2713 tg3_phydsp_write(tp, 0x000a, 0x310b); 2714 tg3_phydsp_write(tp, 0x201f, 0x9506); 2715 tg3_phydsp_write(tp, 0x401f, 0x14e2); 2716 tg3_phy_toggle_auxctl_smdsp(tp, false); 2717 } 2718 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { 2719 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2720 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 2721 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { 2722 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); 2723 tg3_writephy(tp, MII_TG3_TEST1, 2724 MII_TG3_TEST1_TRIM_EN | 0x4); 2725 } else 2726 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); 2727 2728 tg3_phy_toggle_auxctl_smdsp(tp, false); 2729 } 2730 } 2731 2732 /* Set Extended packet length bit (bit 14) on all chips that */ 2733 /* support jumbo frames */ 2734 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2735 /* Cannot do read-modify-write on 5401 */ 2736 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 2737 } else if (tg3_flag(tp, JUMBO_CAPABLE)) { 2738 /* Set bit 14 with read-modify-write to preserve other bits */ 2739 err = tg3_phy_auxctl_read(tp, 2740 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2741 if (!err) 2742 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2743 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); 2744 } 2745 2746 /* Set phy register 0x10 bit 0 to high fifo elasticity to support 2747 * jumbo frames transmission. 2748 */ 2749 if (tg3_flag(tp, JUMBO_CAPABLE)) { 2750 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) 2751 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2752 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); 2753 } 2754 2755 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2756 /* adjust output voltage */ 2757 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); 2758 } 2759 2760 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0) 2761 tg3_phydsp_write(tp, 0xffb, 0x4000); 2762 2763 tg3_phy_toggle_automdix(tp, true); 2764 tg3_phy_set_wirespeed(tp); 2765 return 0; 2766 } 2767 2768 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001 2769 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002 2770 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ 2771 TG3_GPIO_MSG_NEED_VAUX) 2772 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ 2773 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ 2774 (TG3_GPIO_MSG_DRVR_PRES << 4) | \ 2775 (TG3_GPIO_MSG_DRVR_PRES << 8) | \ 2776 (TG3_GPIO_MSG_DRVR_PRES << 12)) 2777 2778 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ 2779 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ 2780 (TG3_GPIO_MSG_NEED_VAUX << 4) | \ 2781 (TG3_GPIO_MSG_NEED_VAUX << 8) | \ 2782 (TG3_GPIO_MSG_NEED_VAUX << 12)) 2783 2784 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) 2785 { 2786 u32 status, shift; 2787 2788 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2789 tg3_asic_rev(tp) == ASIC_REV_5719) 2790 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); 2791 else 2792 status = tr32(TG3_CPMU_DRV_STATUS); 2793 2794 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; 2795 status &= ~(TG3_GPIO_MSG_MASK << shift); 2796 status |= (newstat << shift); 2797 2798 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2799 tg3_asic_rev(tp) == ASIC_REV_5719) 2800 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); 2801 else 2802 tw32(TG3_CPMU_DRV_STATUS, status); 2803 2804 return status >> TG3_APE_GPIO_MSG_SHIFT; 2805 } 2806 2807 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) 2808 { 2809 if (!tg3_flag(tp, IS_NIC)) 2810 return 0; 2811 2812 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2813 tg3_asic_rev(tp) == ASIC_REV_5719 || 2814 tg3_asic_rev(tp) == ASIC_REV_5720) { 2815 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2816 return -EIO; 2817 2818 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); 2819 2820 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2821 TG3_GRC_LCLCTL_PWRSW_DELAY); 2822 2823 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2824 } else { 2825 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2826 TG3_GRC_LCLCTL_PWRSW_DELAY); 2827 } 2828 2829 return 0; 2830 } 2831 2832 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) 2833 { 2834 u32 grc_local_ctrl; 2835 2836 if (!tg3_flag(tp, IS_NIC) || 2837 tg3_asic_rev(tp) == ASIC_REV_5700 || 2838 tg3_asic_rev(tp) == ASIC_REV_5701) 2839 return; 2840 2841 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; 2842 2843 tw32_wait_f(GRC_LOCAL_CTRL, 2844 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2845 TG3_GRC_LCLCTL_PWRSW_DELAY); 2846 2847 tw32_wait_f(GRC_LOCAL_CTRL, 2848 grc_local_ctrl, 2849 TG3_GRC_LCLCTL_PWRSW_DELAY); 2850 2851 tw32_wait_f(GRC_LOCAL_CTRL, 2852 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2853 TG3_GRC_LCLCTL_PWRSW_DELAY); 2854 } 2855 2856 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) 2857 { 2858 if (!tg3_flag(tp, IS_NIC)) 2859 return; 2860 2861 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 2862 tg3_asic_rev(tp) == ASIC_REV_5701) { 2863 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2864 (GRC_LCLCTRL_GPIO_OE0 | 2865 GRC_LCLCTRL_GPIO_OE1 | 2866 GRC_LCLCTRL_GPIO_OE2 | 2867 GRC_LCLCTRL_GPIO_OUTPUT0 | 2868 GRC_LCLCTRL_GPIO_OUTPUT1), 2869 TG3_GRC_LCLCTL_PWRSW_DELAY); 2870 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 2871 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 2872 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ 2873 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | 2874 GRC_LCLCTRL_GPIO_OE1 | 2875 GRC_LCLCTRL_GPIO_OE2 | 2876 GRC_LCLCTRL_GPIO_OUTPUT0 | 2877 GRC_LCLCTRL_GPIO_OUTPUT1 | 2878 tp->grc_local_ctrl; 2879 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2880 TG3_GRC_LCLCTL_PWRSW_DELAY); 2881 2882 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; 2883 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2884 TG3_GRC_LCLCTL_PWRSW_DELAY); 2885 2886 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; 2887 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2888 TG3_GRC_LCLCTL_PWRSW_DELAY); 2889 } else { 2890 u32 no_gpio2; 2891 u32 grc_local_ctrl = 0; 2892 2893 /* Workaround to prevent overdrawing Amps. */ 2894 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 2895 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 2896 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2897 grc_local_ctrl, 2898 TG3_GRC_LCLCTL_PWRSW_DELAY); 2899 } 2900 2901 /* On 5753 and variants, GPIO2 cannot be used. */ 2902 no_gpio2 = tp->nic_sram_data_cfg & 2903 NIC_SRAM_DATA_CFG_NO_GPIO2; 2904 2905 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 2906 GRC_LCLCTRL_GPIO_OE1 | 2907 GRC_LCLCTRL_GPIO_OE2 | 2908 GRC_LCLCTRL_GPIO_OUTPUT1 | 2909 GRC_LCLCTRL_GPIO_OUTPUT2; 2910 if (no_gpio2) { 2911 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | 2912 GRC_LCLCTRL_GPIO_OUTPUT2); 2913 } 2914 tw32_wait_f(GRC_LOCAL_CTRL, 2915 tp->grc_local_ctrl | grc_local_ctrl, 2916 TG3_GRC_LCLCTL_PWRSW_DELAY); 2917 2918 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; 2919 2920 tw32_wait_f(GRC_LOCAL_CTRL, 2921 tp->grc_local_ctrl | grc_local_ctrl, 2922 TG3_GRC_LCLCTL_PWRSW_DELAY); 2923 2924 if (!no_gpio2) { 2925 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; 2926 tw32_wait_f(GRC_LOCAL_CTRL, 2927 tp->grc_local_ctrl | grc_local_ctrl, 2928 TG3_GRC_LCLCTL_PWRSW_DELAY); 2929 } 2930 } 2931 } 2932 2933 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) 2934 { 2935 u32 msg = 0; 2936 2937 /* Serialize power state transitions */ 2938 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2939 return; 2940 2941 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) 2942 msg = TG3_GPIO_MSG_NEED_VAUX; 2943 2944 msg = tg3_set_function_status(tp, msg); 2945 2946 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) 2947 goto done; 2948 2949 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) 2950 tg3_pwrsrc_switch_to_vaux(tp); 2951 else 2952 tg3_pwrsrc_die_with_vmain(tp); 2953 2954 done: 2955 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2956 } 2957 2958 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) 2959 { 2960 bool need_vaux = false; 2961 2962 /* The GPIOs do something completely different on 57765. */ 2963 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS)) 2964 return; 2965 2966 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2967 tg3_asic_rev(tp) == ASIC_REV_5719 || 2968 tg3_asic_rev(tp) == ASIC_REV_5720) { 2969 tg3_frob_aux_power_5717(tp, include_wol ? 2970 tg3_flag(tp, WOL_ENABLE) != 0 : 0); 2971 return; 2972 } 2973 2974 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { 2975 struct net_device *dev_peer; 2976 2977 dev_peer = pci_get_drvdata(tp->pdev_peer); 2978 2979 /* remove_one() may have been run on the peer. */ 2980 if (dev_peer) { 2981 struct tg3 *tp_peer = netdev_priv(dev_peer); 2982 2983 if (tg3_flag(tp_peer, INIT_COMPLETE)) 2984 return; 2985 2986 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || 2987 tg3_flag(tp_peer, ENABLE_ASF)) 2988 need_vaux = true; 2989 } 2990 } 2991 2992 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || 2993 tg3_flag(tp, ENABLE_ASF)) 2994 need_vaux = true; 2995 2996 if (need_vaux) 2997 tg3_pwrsrc_switch_to_vaux(tp); 2998 else 2999 tg3_pwrsrc_die_with_vmain(tp); 3000 } 3001 3002 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) 3003 { 3004 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) 3005 return 1; 3006 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { 3007 if (speed != SPEED_10) 3008 return 1; 3009 } else if (speed == SPEED_10) 3010 return 1; 3011 3012 return 0; 3013 } 3014 3015 static bool tg3_phy_power_bug(struct tg3 *tp) 3016 { 3017 switch (tg3_asic_rev(tp)) { 3018 case ASIC_REV_5700: 3019 case ASIC_REV_5704: 3020 return true; 3021 case ASIC_REV_5780: 3022 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 3023 return true; 3024 return false; 3025 case ASIC_REV_5717: 3026 if (!tp->pci_fn) 3027 return true; 3028 return false; 3029 case ASIC_REV_5719: 3030 case ASIC_REV_5720: 3031 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 3032 !tp->pci_fn) 3033 return true; 3034 return false; 3035 } 3036 3037 return false; 3038 } 3039 3040 static bool tg3_phy_led_bug(struct tg3 *tp) 3041 { 3042 switch (tg3_asic_rev(tp)) { 3043 case ASIC_REV_5719: 3044 case ASIC_REV_5720: 3045 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 3046 !tp->pci_fn) 3047 return true; 3048 return false; 3049 } 3050 3051 return false; 3052 } 3053 3054 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) 3055 { 3056 u32 val; 3057 3058 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) 3059 return; 3060 3061 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 3062 if (tg3_asic_rev(tp) == ASIC_REV_5704) { 3063 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); 3064 u32 serdes_cfg = tr32(MAC_SERDES_CFG); 3065 3066 sg_dig_ctrl |= 3067 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; 3068 tw32(SG_DIG_CTRL, sg_dig_ctrl); 3069 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); 3070 } 3071 return; 3072 } 3073 3074 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3075 tg3_bmcr_reset(tp); 3076 val = tr32(GRC_MISC_CFG); 3077 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 3078 udelay(40); 3079 return; 3080 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 3081 u32 phytest; 3082 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 3083 u32 phy; 3084 3085 tg3_writephy(tp, MII_ADVERTISE, 0); 3086 tg3_writephy(tp, MII_BMCR, 3087 BMCR_ANENABLE | BMCR_ANRESTART); 3088 3089 tg3_writephy(tp, MII_TG3_FET_TEST, 3090 phytest | MII_TG3_FET_SHADOW_EN); 3091 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { 3092 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; 3093 tg3_writephy(tp, 3094 MII_TG3_FET_SHDW_AUXMODE4, 3095 phy); 3096 } 3097 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 3098 } 3099 return; 3100 } else if (do_low_power) { 3101 if (!tg3_phy_led_bug(tp)) 3102 tg3_writephy(tp, MII_TG3_EXT_CTRL, 3103 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 3104 3105 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | 3106 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | 3107 MII_TG3_AUXCTL_PCTL_VREG_11V; 3108 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); 3109 } 3110 3111 /* The PHY should not be powered down on some chips because 3112 * of bugs. 3113 */ 3114 if (tg3_phy_power_bug(tp)) 3115 return; 3116 3117 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 3118 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 3119 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 3120 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 3121 val |= CPMU_LSPD_1000MB_MACCLK_12_5; 3122 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 3123 } 3124 3125 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); 3126 } 3127 3128 /* tp->lock is held. */ 3129 static int tg3_nvram_lock(struct tg3 *tp) 3130 { 3131 if (tg3_flag(tp, NVRAM)) { 3132 int i; 3133 3134 if (tp->nvram_lock_cnt == 0) { 3135 tw32(NVRAM_SWARB, SWARB_REQ_SET1); 3136 for (i = 0; i < 8000; i++) { 3137 if (tr32(NVRAM_SWARB) & SWARB_GNT1) 3138 break; 3139 udelay(20); 3140 } 3141 if (i == 8000) { 3142 tw32(NVRAM_SWARB, SWARB_REQ_CLR1); 3143 return -ENODEV; 3144 } 3145 } 3146 tp->nvram_lock_cnt++; 3147 } 3148 return 0; 3149 } 3150 3151 /* tp->lock is held. */ 3152 static void tg3_nvram_unlock(struct tg3 *tp) 3153 { 3154 if (tg3_flag(tp, NVRAM)) { 3155 if (tp->nvram_lock_cnt > 0) 3156 tp->nvram_lock_cnt--; 3157 if (tp->nvram_lock_cnt == 0) 3158 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); 3159 } 3160 } 3161 3162 /* tp->lock is held. */ 3163 static void tg3_enable_nvram_access(struct tg3 *tp) 3164 { 3165 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3166 u32 nvaccess = tr32(NVRAM_ACCESS); 3167 3168 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); 3169 } 3170 } 3171 3172 /* tp->lock is held. */ 3173 static void tg3_disable_nvram_access(struct tg3 *tp) 3174 { 3175 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3176 u32 nvaccess = tr32(NVRAM_ACCESS); 3177 3178 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); 3179 } 3180 } 3181 3182 static int tg3_nvram_read_using_eeprom(struct tg3 *tp, 3183 u32 offset, u32 *val) 3184 { 3185 u32 tmp; 3186 int i; 3187 3188 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) 3189 return -EINVAL; 3190 3191 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | 3192 EEPROM_ADDR_DEVID_MASK | 3193 EEPROM_ADDR_READ); 3194 tw32(GRC_EEPROM_ADDR, 3195 tmp | 3196 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3197 ((offset << EEPROM_ADDR_ADDR_SHIFT) & 3198 EEPROM_ADDR_ADDR_MASK) | 3199 EEPROM_ADDR_READ | EEPROM_ADDR_START); 3200 3201 for (i = 0; i < 1000; i++) { 3202 tmp = tr32(GRC_EEPROM_ADDR); 3203 3204 if (tmp & EEPROM_ADDR_COMPLETE) 3205 break; 3206 msleep(1); 3207 } 3208 if (!(tmp & EEPROM_ADDR_COMPLETE)) 3209 return -EBUSY; 3210 3211 tmp = tr32(GRC_EEPROM_DATA); 3212 3213 /* 3214 * The data will always be opposite the native endian 3215 * format. Perform a blind byteswap to compensate. 3216 */ 3217 *val = swab32(tmp); 3218 3219 return 0; 3220 } 3221 3222 #define NVRAM_CMD_TIMEOUT 10000 3223 3224 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) 3225 { 3226 int i; 3227 3228 tw32(NVRAM_CMD, nvram_cmd); 3229 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { 3230 usleep_range(10, 40); 3231 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { 3232 udelay(10); 3233 break; 3234 } 3235 } 3236 3237 if (i == NVRAM_CMD_TIMEOUT) 3238 return -EBUSY; 3239 3240 return 0; 3241 } 3242 3243 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) 3244 { 3245 if (tg3_flag(tp, NVRAM) && 3246 tg3_flag(tp, NVRAM_BUFFERED) && 3247 tg3_flag(tp, FLASH) && 3248 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3249 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3250 3251 addr = ((addr / tp->nvram_pagesize) << 3252 ATMEL_AT45DB0X1B_PAGE_POS) + 3253 (addr % tp->nvram_pagesize); 3254 3255 return addr; 3256 } 3257 3258 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) 3259 { 3260 if (tg3_flag(tp, NVRAM) && 3261 tg3_flag(tp, NVRAM_BUFFERED) && 3262 tg3_flag(tp, FLASH) && 3263 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3264 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3265 3266 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * 3267 tp->nvram_pagesize) + 3268 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); 3269 3270 return addr; 3271 } 3272 3273 /* NOTE: Data read in from NVRAM is byteswapped according to 3274 * the byteswapping settings for all other register accesses. 3275 * tg3 devices are BE devices, so on a BE machine, the data 3276 * returned will be exactly as it is seen in NVRAM. On a LE 3277 * machine, the 32-bit value will be byteswapped. 3278 */ 3279 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) 3280 { 3281 int ret; 3282 3283 if (!tg3_flag(tp, NVRAM)) 3284 return tg3_nvram_read_using_eeprom(tp, offset, val); 3285 3286 offset = tg3_nvram_phys_addr(tp, offset); 3287 3288 if (offset > NVRAM_ADDR_MSK) 3289 return -EINVAL; 3290 3291 ret = tg3_nvram_lock(tp); 3292 if (ret) 3293 return ret; 3294 3295 tg3_enable_nvram_access(tp); 3296 3297 tw32(NVRAM_ADDR, offset); 3298 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | 3299 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); 3300 3301 if (ret == 0) 3302 *val = tr32(NVRAM_RDDATA); 3303 3304 tg3_disable_nvram_access(tp); 3305 3306 tg3_nvram_unlock(tp); 3307 3308 return ret; 3309 } 3310 3311 /* Ensures NVRAM data is in bytestream format. */ 3312 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) 3313 { 3314 u32 v; 3315 int res = tg3_nvram_read(tp, offset, &v); 3316 if (!res) 3317 *val = cpu_to_be32(v); 3318 return res; 3319 } 3320 3321 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, 3322 u32 offset, u32 len, u8 *buf) 3323 { 3324 int i, j, rc = 0; 3325 u32 val; 3326 3327 for (i = 0; i < len; i += 4) { 3328 u32 addr; 3329 __be32 data; 3330 3331 addr = offset + i; 3332 3333 memcpy(&data, buf + i, 4); 3334 3335 /* 3336 * The SEEPROM interface expects the data to always be opposite 3337 * the native endian format. We accomplish this by reversing 3338 * all the operations that would have been performed on the 3339 * data from a call to tg3_nvram_read_be32(). 3340 */ 3341 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); 3342 3343 val = tr32(GRC_EEPROM_ADDR); 3344 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); 3345 3346 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | 3347 EEPROM_ADDR_READ); 3348 tw32(GRC_EEPROM_ADDR, val | 3349 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3350 (addr & EEPROM_ADDR_ADDR_MASK) | 3351 EEPROM_ADDR_START | 3352 EEPROM_ADDR_WRITE); 3353 3354 for (j = 0; j < 1000; j++) { 3355 val = tr32(GRC_EEPROM_ADDR); 3356 3357 if (val & EEPROM_ADDR_COMPLETE) 3358 break; 3359 msleep(1); 3360 } 3361 if (!(val & EEPROM_ADDR_COMPLETE)) { 3362 rc = -EBUSY; 3363 break; 3364 } 3365 } 3366 3367 return rc; 3368 } 3369 3370 /* offset and length are dword aligned */ 3371 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, 3372 u8 *buf) 3373 { 3374 int ret = 0; 3375 u32 pagesize = tp->nvram_pagesize; 3376 u32 pagemask = pagesize - 1; 3377 u32 nvram_cmd; 3378 u8 *tmp; 3379 3380 tmp = kmalloc(pagesize, GFP_KERNEL); 3381 if (tmp == NULL) 3382 return -ENOMEM; 3383 3384 while (len) { 3385 int j; 3386 u32 phy_addr, page_off, size; 3387 3388 phy_addr = offset & ~pagemask; 3389 3390 for (j = 0; j < pagesize; j += 4) { 3391 ret = tg3_nvram_read_be32(tp, phy_addr + j, 3392 (__be32 *) (tmp + j)); 3393 if (ret) 3394 break; 3395 } 3396 if (ret) 3397 break; 3398 3399 page_off = offset & pagemask; 3400 size = pagesize; 3401 if (len < size) 3402 size = len; 3403 3404 len -= size; 3405 3406 memcpy(tmp + page_off, buf, size); 3407 3408 offset = offset + (pagesize - page_off); 3409 3410 tg3_enable_nvram_access(tp); 3411 3412 /* 3413 * Before we can erase the flash page, we need 3414 * to issue a special "write enable" command. 3415 */ 3416 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3417 3418 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3419 break; 3420 3421 /* Erase the target page */ 3422 tw32(NVRAM_ADDR, phy_addr); 3423 3424 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | 3425 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; 3426 3427 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3428 break; 3429 3430 /* Issue another write enable to start the write. */ 3431 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3432 3433 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3434 break; 3435 3436 for (j = 0; j < pagesize; j += 4) { 3437 __be32 data; 3438 3439 data = *((__be32 *) (tmp + j)); 3440 3441 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3442 3443 tw32(NVRAM_ADDR, phy_addr + j); 3444 3445 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | 3446 NVRAM_CMD_WR; 3447 3448 if (j == 0) 3449 nvram_cmd |= NVRAM_CMD_FIRST; 3450 else if (j == (pagesize - 4)) 3451 nvram_cmd |= NVRAM_CMD_LAST; 3452 3453 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3454 if (ret) 3455 break; 3456 } 3457 if (ret) 3458 break; 3459 } 3460 3461 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3462 tg3_nvram_exec_cmd(tp, nvram_cmd); 3463 3464 kfree(tmp); 3465 3466 return ret; 3467 } 3468 3469 /* offset and length are dword aligned */ 3470 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, 3471 u8 *buf) 3472 { 3473 int i, ret = 0; 3474 3475 for (i = 0; i < len; i += 4, offset += 4) { 3476 u32 page_off, phy_addr, nvram_cmd; 3477 __be32 data; 3478 3479 memcpy(&data, buf + i, 4); 3480 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3481 3482 page_off = offset % tp->nvram_pagesize; 3483 3484 phy_addr = tg3_nvram_phys_addr(tp, offset); 3485 3486 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; 3487 3488 if (page_off == 0 || i == 0) 3489 nvram_cmd |= NVRAM_CMD_FIRST; 3490 if (page_off == (tp->nvram_pagesize - 4)) 3491 nvram_cmd |= NVRAM_CMD_LAST; 3492 3493 if (i == (len - 4)) 3494 nvram_cmd |= NVRAM_CMD_LAST; 3495 3496 if ((nvram_cmd & NVRAM_CMD_FIRST) || 3497 !tg3_flag(tp, FLASH) || 3498 !tg3_flag(tp, 57765_PLUS)) 3499 tw32(NVRAM_ADDR, phy_addr); 3500 3501 if (tg3_asic_rev(tp) != ASIC_REV_5752 && 3502 !tg3_flag(tp, 5755_PLUS) && 3503 (tp->nvram_jedecnum == JEDEC_ST) && 3504 (nvram_cmd & NVRAM_CMD_FIRST)) { 3505 u32 cmd; 3506 3507 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3508 ret = tg3_nvram_exec_cmd(tp, cmd); 3509 if (ret) 3510 break; 3511 } 3512 if (!tg3_flag(tp, FLASH)) { 3513 /* We always do complete word writes to eeprom. */ 3514 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); 3515 } 3516 3517 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3518 if (ret) 3519 break; 3520 } 3521 return ret; 3522 } 3523 3524 /* offset and length are dword aligned */ 3525 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) 3526 { 3527 int ret; 3528 3529 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3530 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & 3531 ~GRC_LCLCTRL_GPIO_OUTPUT1); 3532 udelay(40); 3533 } 3534 3535 if (!tg3_flag(tp, NVRAM)) { 3536 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); 3537 } else { 3538 u32 grc_mode; 3539 3540 ret = tg3_nvram_lock(tp); 3541 if (ret) 3542 return ret; 3543 3544 tg3_enable_nvram_access(tp); 3545 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) 3546 tw32(NVRAM_WRITE1, 0x406); 3547 3548 grc_mode = tr32(GRC_MODE); 3549 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); 3550 3551 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { 3552 ret = tg3_nvram_write_block_buffered(tp, offset, len, 3553 buf); 3554 } else { 3555 ret = tg3_nvram_write_block_unbuffered(tp, offset, len, 3556 buf); 3557 } 3558 3559 grc_mode = tr32(GRC_MODE); 3560 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); 3561 3562 tg3_disable_nvram_access(tp); 3563 tg3_nvram_unlock(tp); 3564 } 3565 3566 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3567 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 3568 udelay(40); 3569 } 3570 3571 return ret; 3572 } 3573 3574 #define RX_CPU_SCRATCH_BASE 0x30000 3575 #define RX_CPU_SCRATCH_SIZE 0x04000 3576 #define TX_CPU_SCRATCH_BASE 0x34000 3577 #define TX_CPU_SCRATCH_SIZE 0x04000 3578 3579 /* tp->lock is held. */ 3580 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base) 3581 { 3582 int i; 3583 const int iters = 10000; 3584 3585 for (i = 0; i < iters; i++) { 3586 tw32(cpu_base + CPU_STATE, 0xffffffff); 3587 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3588 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) 3589 break; 3590 if (pci_channel_offline(tp->pdev)) 3591 return -EBUSY; 3592 } 3593 3594 return (i == iters) ? -EBUSY : 0; 3595 } 3596 3597 /* tp->lock is held. */ 3598 static int tg3_rxcpu_pause(struct tg3 *tp) 3599 { 3600 int rc = tg3_pause_cpu(tp, RX_CPU_BASE); 3601 3602 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 3603 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); 3604 udelay(10); 3605 3606 return rc; 3607 } 3608 3609 /* tp->lock is held. */ 3610 static int tg3_txcpu_pause(struct tg3 *tp) 3611 { 3612 return tg3_pause_cpu(tp, TX_CPU_BASE); 3613 } 3614 3615 /* tp->lock is held. */ 3616 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base) 3617 { 3618 tw32(cpu_base + CPU_STATE, 0xffffffff); 3619 tw32_f(cpu_base + CPU_MODE, 0x00000000); 3620 } 3621 3622 /* tp->lock is held. */ 3623 static void tg3_rxcpu_resume(struct tg3 *tp) 3624 { 3625 tg3_resume_cpu(tp, RX_CPU_BASE); 3626 } 3627 3628 /* tp->lock is held. */ 3629 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base) 3630 { 3631 int rc; 3632 3633 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); 3634 3635 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3636 u32 val = tr32(GRC_VCPU_EXT_CTRL); 3637 3638 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); 3639 return 0; 3640 } 3641 if (cpu_base == RX_CPU_BASE) { 3642 rc = tg3_rxcpu_pause(tp); 3643 } else { 3644 /* 3645 * There is only an Rx CPU for the 5750 derivative in the 3646 * BCM4785. 3647 */ 3648 if (tg3_flag(tp, IS_SSB_CORE)) 3649 return 0; 3650 3651 rc = tg3_txcpu_pause(tp); 3652 } 3653 3654 if (rc) { 3655 netdev_err(tp->dev, "%s timed out, %s CPU\n", 3656 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX"); 3657 return -ENODEV; 3658 } 3659 3660 /* Clear firmware's nvram arbitration. */ 3661 if (tg3_flag(tp, NVRAM)) 3662 tw32(NVRAM_SWARB, SWARB_REQ_CLR0); 3663 return 0; 3664 } 3665 3666 static int tg3_fw_data_len(struct tg3 *tp, 3667 const struct tg3_firmware_hdr *fw_hdr) 3668 { 3669 int fw_len; 3670 3671 /* Non fragmented firmware have one firmware header followed by a 3672 * contiguous chunk of data to be written. The length field in that 3673 * header is not the length of data to be written but the complete 3674 * length of the bss. The data length is determined based on 3675 * tp->fw->size minus headers. 3676 * 3677 * Fragmented firmware have a main header followed by multiple 3678 * fragments. Each fragment is identical to non fragmented firmware 3679 * with a firmware header followed by a contiguous chunk of data. In 3680 * the main header, the length field is unused and set to 0xffffffff. 3681 * In each fragment header the length is the entire size of that 3682 * fragment i.e. fragment data + header length. Data length is 3683 * therefore length field in the header minus TG3_FW_HDR_LEN. 3684 */ 3685 if (tp->fw_len == 0xffffffff) 3686 fw_len = be32_to_cpu(fw_hdr->len); 3687 else 3688 fw_len = tp->fw->size; 3689 3690 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32); 3691 } 3692 3693 /* tp->lock is held. */ 3694 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, 3695 u32 cpu_scratch_base, int cpu_scratch_size, 3696 const struct tg3_firmware_hdr *fw_hdr) 3697 { 3698 int err, i; 3699 void (*write_op)(struct tg3 *, u32, u32); 3700 int total_len = tp->fw->size; 3701 3702 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { 3703 netdev_err(tp->dev, 3704 "%s: Trying to load TX cpu firmware which is 5705\n", 3705 __func__); 3706 return -EINVAL; 3707 } 3708 3709 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766) 3710 write_op = tg3_write_mem; 3711 else 3712 write_op = tg3_write_indirect_reg32; 3713 3714 if (tg3_asic_rev(tp) != ASIC_REV_57766) { 3715 /* It is possible that bootcode is still loading at this point. 3716 * Get the nvram lock first before halting the cpu. 3717 */ 3718 int lock_err = tg3_nvram_lock(tp); 3719 err = tg3_halt_cpu(tp, cpu_base); 3720 if (!lock_err) 3721 tg3_nvram_unlock(tp); 3722 if (err) 3723 goto out; 3724 3725 for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) 3726 write_op(tp, cpu_scratch_base + i, 0); 3727 tw32(cpu_base + CPU_STATE, 0xffffffff); 3728 tw32(cpu_base + CPU_MODE, 3729 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT); 3730 } else { 3731 /* Subtract additional main header for fragmented firmware and 3732 * advance to the first fragment 3733 */ 3734 total_len -= TG3_FW_HDR_LEN; 3735 fw_hdr++; 3736 } 3737 3738 do { 3739 u32 *fw_data = (u32 *)(fw_hdr + 1); 3740 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++) 3741 write_op(tp, cpu_scratch_base + 3742 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) + 3743 (i * sizeof(u32)), 3744 be32_to_cpu(fw_data[i])); 3745 3746 total_len -= be32_to_cpu(fw_hdr->len); 3747 3748 /* Advance to next fragment */ 3749 fw_hdr = (struct tg3_firmware_hdr *) 3750 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len)); 3751 } while (total_len > 0); 3752 3753 err = 0; 3754 3755 out: 3756 return err; 3757 } 3758 3759 /* tp->lock is held. */ 3760 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc) 3761 { 3762 int i; 3763 const int iters = 5; 3764 3765 tw32(cpu_base + CPU_STATE, 0xffffffff); 3766 tw32_f(cpu_base + CPU_PC, pc); 3767 3768 for (i = 0; i < iters; i++) { 3769 if (tr32(cpu_base + CPU_PC) == pc) 3770 break; 3771 tw32(cpu_base + CPU_STATE, 0xffffffff); 3772 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3773 tw32_f(cpu_base + CPU_PC, pc); 3774 udelay(1000); 3775 } 3776 3777 return (i == iters) ? -EBUSY : 0; 3778 } 3779 3780 /* tp->lock is held. */ 3781 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) 3782 { 3783 const struct tg3_firmware_hdr *fw_hdr; 3784 int err; 3785 3786 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3787 3788 /* Firmware blob starts with version numbers, followed by 3789 start address and length. We are setting complete length. 3790 length = end_address_of_bss - start_address_of_text. 3791 Remainder is the blob to be loaded contiguously 3792 from start address. */ 3793 3794 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, 3795 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, 3796 fw_hdr); 3797 if (err) 3798 return err; 3799 3800 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, 3801 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, 3802 fw_hdr); 3803 if (err) 3804 return err; 3805 3806 /* Now startup only the RX cpu. */ 3807 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE, 3808 be32_to_cpu(fw_hdr->base_addr)); 3809 if (err) { 3810 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " 3811 "should be %08x\n", __func__, 3812 tr32(RX_CPU_BASE + CPU_PC), 3813 be32_to_cpu(fw_hdr->base_addr)); 3814 return -ENODEV; 3815 } 3816 3817 tg3_rxcpu_resume(tp); 3818 3819 return 0; 3820 } 3821 3822 static int tg3_validate_rxcpu_state(struct tg3 *tp) 3823 { 3824 const int iters = 1000; 3825 int i; 3826 u32 val; 3827 3828 /* Wait for boot code to complete initialization and enter service 3829 * loop. It is then safe to download service patches 3830 */ 3831 for (i = 0; i < iters; i++) { 3832 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP) 3833 break; 3834 3835 udelay(10); 3836 } 3837 3838 if (i == iters) { 3839 netdev_err(tp->dev, "Boot code not ready for service patches\n"); 3840 return -EBUSY; 3841 } 3842 3843 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE); 3844 if (val & 0xff) { 3845 netdev_warn(tp->dev, 3846 "Other patches exist. Not downloading EEE patch\n"); 3847 return -EEXIST; 3848 } 3849 3850 return 0; 3851 } 3852 3853 /* tp->lock is held. */ 3854 static void tg3_load_57766_firmware(struct tg3 *tp) 3855 { 3856 struct tg3_firmware_hdr *fw_hdr; 3857 3858 if (!tg3_flag(tp, NO_NVRAM)) 3859 return; 3860 3861 if (tg3_validate_rxcpu_state(tp)) 3862 return; 3863 3864 if (!tp->fw) 3865 return; 3866 3867 /* This firmware blob has a different format than older firmware 3868 * releases as given below. The main difference is we have fragmented 3869 * data to be written to non-contiguous locations. 3870 * 3871 * In the beginning we have a firmware header identical to other 3872 * firmware which consists of version, base addr and length. The length 3873 * here is unused and set to 0xffffffff. 3874 * 3875 * This is followed by a series of firmware fragments which are 3876 * individually identical to previous firmware. i.e. they have the 3877 * firmware header and followed by data for that fragment. The version 3878 * field of the individual fragment header is unused. 3879 */ 3880 3881 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3882 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR) 3883 return; 3884 3885 if (tg3_rxcpu_pause(tp)) 3886 return; 3887 3888 /* tg3_load_firmware_cpu() will always succeed for the 57766 */ 3889 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr); 3890 3891 tg3_rxcpu_resume(tp); 3892 } 3893 3894 /* tp->lock is held. */ 3895 static int tg3_load_tso_firmware(struct tg3 *tp) 3896 { 3897 const struct tg3_firmware_hdr *fw_hdr; 3898 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; 3899 int err; 3900 3901 if (!tg3_flag(tp, FW_TSO)) 3902 return 0; 3903 3904 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3905 3906 /* Firmware blob starts with version numbers, followed by 3907 start address and length. We are setting complete length. 3908 length = end_address_of_bss - start_address_of_text. 3909 Remainder is the blob to be loaded contiguously 3910 from start address. */ 3911 3912 cpu_scratch_size = tp->fw_len; 3913 3914 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 3915 cpu_base = RX_CPU_BASE; 3916 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; 3917 } else { 3918 cpu_base = TX_CPU_BASE; 3919 cpu_scratch_base = TX_CPU_SCRATCH_BASE; 3920 cpu_scratch_size = TX_CPU_SCRATCH_SIZE; 3921 } 3922 3923 err = tg3_load_firmware_cpu(tp, cpu_base, 3924 cpu_scratch_base, cpu_scratch_size, 3925 fw_hdr); 3926 if (err) 3927 return err; 3928 3929 /* Now startup the cpu. */ 3930 err = tg3_pause_cpu_and_set_pc(tp, cpu_base, 3931 be32_to_cpu(fw_hdr->base_addr)); 3932 if (err) { 3933 netdev_err(tp->dev, 3934 "%s fails to set CPU PC, is %08x should be %08x\n", 3935 __func__, tr32(cpu_base + CPU_PC), 3936 be32_to_cpu(fw_hdr->base_addr)); 3937 return -ENODEV; 3938 } 3939 3940 tg3_resume_cpu(tp, cpu_base); 3941 return 0; 3942 } 3943 3944 /* tp->lock is held. */ 3945 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr, 3946 int index) 3947 { 3948 u32 addr_high, addr_low; 3949 3950 addr_high = ((mac_addr[0] << 8) | mac_addr[1]); 3951 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) | 3952 (mac_addr[4] << 8) | mac_addr[5]); 3953 3954 if (index < 4) { 3955 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high); 3956 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low); 3957 } else { 3958 index -= 4; 3959 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high); 3960 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low); 3961 } 3962 } 3963 3964 /* tp->lock is held. */ 3965 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1) 3966 { 3967 u32 addr_high; 3968 int i; 3969 3970 for (i = 0; i < 4; i++) { 3971 if (i == 1 && skip_mac_1) 3972 continue; 3973 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3974 } 3975 3976 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 3977 tg3_asic_rev(tp) == ASIC_REV_5704) { 3978 for (i = 4; i < 16; i++) 3979 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3980 } 3981 3982 addr_high = (tp->dev->dev_addr[0] + 3983 tp->dev->dev_addr[1] + 3984 tp->dev->dev_addr[2] + 3985 tp->dev->dev_addr[3] + 3986 tp->dev->dev_addr[4] + 3987 tp->dev->dev_addr[5]) & 3988 TX_BACKOFF_SEED_MASK; 3989 tw32(MAC_TX_BACKOFF_SEED, addr_high); 3990 } 3991 3992 static void tg3_enable_register_access(struct tg3 *tp) 3993 { 3994 /* 3995 * Make sure register accesses (indirect or otherwise) will function 3996 * correctly. 3997 */ 3998 pci_write_config_dword(tp->pdev, 3999 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); 4000 } 4001 4002 static int tg3_power_up(struct tg3 *tp) 4003 { 4004 int err; 4005 4006 tg3_enable_register_access(tp); 4007 4008 err = pci_set_power_state(tp->pdev, PCI_D0); 4009 if (!err) { 4010 /* Switch out of Vaux if it is a NIC */ 4011 tg3_pwrsrc_switch_to_vmain(tp); 4012 } else { 4013 netdev_err(tp->dev, "Transition to D0 failed\n"); 4014 } 4015 4016 return err; 4017 } 4018 4019 static int tg3_setup_phy(struct tg3 *, bool); 4020 4021 static int tg3_power_down_prepare(struct tg3 *tp) 4022 { 4023 u32 misc_host_ctrl; 4024 bool device_should_wake, do_low_power; 4025 4026 tg3_enable_register_access(tp); 4027 4028 /* Restore the CLKREQ setting. */ 4029 if (tg3_flag(tp, CLKREQ_BUG)) 4030 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 4031 PCI_EXP_LNKCTL_CLKREQ_EN); 4032 4033 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 4034 tw32(TG3PCI_MISC_HOST_CTRL, 4035 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 4036 4037 device_should_wake = device_may_wakeup(&tp->pdev->dev) && 4038 tg3_flag(tp, WOL_ENABLE); 4039 4040 if (tg3_flag(tp, USE_PHYLIB)) { 4041 do_low_power = false; 4042 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && 4043 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4044 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, }; 4045 struct phy_device *phydev; 4046 u32 phyid; 4047 4048 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 4049 4050 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4051 4052 tp->link_config.speed = phydev->speed; 4053 tp->link_config.duplex = phydev->duplex; 4054 tp->link_config.autoneg = phydev->autoneg; 4055 ethtool_convert_link_mode_to_legacy_u32( 4056 &tp->link_config.advertising, 4057 phydev->advertising); 4058 4059 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising); 4060 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 4061 advertising); 4062 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 4063 advertising); 4064 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, 4065 advertising); 4066 4067 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { 4068 if (tg3_flag(tp, WOL_SPEED_100MB)) { 4069 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, 4070 advertising); 4071 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, 4072 advertising); 4073 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, 4074 advertising); 4075 } else { 4076 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, 4077 advertising); 4078 } 4079 } 4080 4081 linkmode_copy(phydev->advertising, advertising); 4082 phy_start_aneg(phydev); 4083 4084 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; 4085 if (phyid != PHY_ID_BCMAC131) { 4086 phyid &= PHY_BCM_OUI_MASK; 4087 if (phyid == PHY_BCM_OUI_1 || 4088 phyid == PHY_BCM_OUI_2 || 4089 phyid == PHY_BCM_OUI_3) 4090 do_low_power = true; 4091 } 4092 } 4093 } else { 4094 do_low_power = true; 4095 4096 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) 4097 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4098 4099 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 4100 tg3_setup_phy(tp, false); 4101 } 4102 4103 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 4104 u32 val; 4105 4106 val = tr32(GRC_VCPU_EXT_CTRL); 4107 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); 4108 } else if (!tg3_flag(tp, ENABLE_ASF)) { 4109 int i; 4110 u32 val; 4111 4112 for (i = 0; i < 200; i++) { 4113 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); 4114 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 4115 break; 4116 msleep(1); 4117 } 4118 } 4119 if (tg3_flag(tp, WOL_CAP)) 4120 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | 4121 WOL_DRV_STATE_SHUTDOWN | 4122 WOL_DRV_WOL | 4123 WOL_SET_MAGIC_PKT); 4124 4125 if (device_should_wake) { 4126 u32 mac_mode; 4127 4128 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 4129 if (do_low_power && 4130 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 4131 tg3_phy_auxctl_write(tp, 4132 MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 4133 MII_TG3_AUXCTL_PCTL_WOL_EN | 4134 MII_TG3_AUXCTL_PCTL_100TX_LPWR | 4135 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); 4136 udelay(40); 4137 } 4138 4139 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4140 mac_mode = MAC_MODE_PORT_MODE_GMII; 4141 else if (tp->phy_flags & 4142 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) { 4143 if (tp->link_config.active_speed == SPEED_1000) 4144 mac_mode = MAC_MODE_PORT_MODE_GMII; 4145 else 4146 mac_mode = MAC_MODE_PORT_MODE_MII; 4147 } else 4148 mac_mode = MAC_MODE_PORT_MODE_MII; 4149 4150 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; 4151 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 4152 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? 4153 SPEED_100 : SPEED_10; 4154 if (tg3_5700_link_polarity(tp, speed)) 4155 mac_mode |= MAC_MODE_LINK_POLARITY; 4156 else 4157 mac_mode &= ~MAC_MODE_LINK_POLARITY; 4158 } 4159 } else { 4160 mac_mode = MAC_MODE_PORT_MODE_TBI; 4161 } 4162 4163 if (!tg3_flag(tp, 5750_PLUS)) 4164 tw32(MAC_LED_CTRL, tp->led_ctrl); 4165 4166 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; 4167 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && 4168 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) 4169 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; 4170 4171 if (tg3_flag(tp, ENABLE_APE)) 4172 mac_mode |= MAC_MODE_APE_TX_EN | 4173 MAC_MODE_APE_RX_EN | 4174 MAC_MODE_TDE_ENABLE; 4175 4176 tw32_f(MAC_MODE, mac_mode); 4177 udelay(100); 4178 4179 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); 4180 udelay(10); 4181 } 4182 4183 if (!tg3_flag(tp, WOL_SPEED_100MB) && 4184 (tg3_asic_rev(tp) == ASIC_REV_5700 || 4185 tg3_asic_rev(tp) == ASIC_REV_5701)) { 4186 u32 base_val; 4187 4188 base_val = tp->pci_clock_ctrl; 4189 base_val |= (CLOCK_CTRL_RXCLK_DISABLE | 4190 CLOCK_CTRL_TXCLK_DISABLE); 4191 4192 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | 4193 CLOCK_CTRL_PWRDOWN_PLL133, 40); 4194 } else if (tg3_flag(tp, 5780_CLASS) || 4195 tg3_flag(tp, CPMU_PRESENT) || 4196 tg3_asic_rev(tp) == ASIC_REV_5906) { 4197 /* do nothing */ 4198 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { 4199 u32 newbits1, newbits2; 4200 4201 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4202 tg3_asic_rev(tp) == ASIC_REV_5701) { 4203 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | 4204 CLOCK_CTRL_TXCLK_DISABLE | 4205 CLOCK_CTRL_ALTCLK); 4206 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4207 } else if (tg3_flag(tp, 5705_PLUS)) { 4208 newbits1 = CLOCK_CTRL_625_CORE; 4209 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; 4210 } else { 4211 newbits1 = CLOCK_CTRL_ALTCLK; 4212 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4213 } 4214 4215 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, 4216 40); 4217 4218 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, 4219 40); 4220 4221 if (!tg3_flag(tp, 5705_PLUS)) { 4222 u32 newbits3; 4223 4224 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4225 tg3_asic_rev(tp) == ASIC_REV_5701) { 4226 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | 4227 CLOCK_CTRL_TXCLK_DISABLE | 4228 CLOCK_CTRL_44MHZ_CORE); 4229 } else { 4230 newbits3 = CLOCK_CTRL_44MHZ_CORE; 4231 } 4232 4233 tw32_wait_f(TG3PCI_CLOCK_CTRL, 4234 tp->pci_clock_ctrl | newbits3, 40); 4235 } 4236 } 4237 4238 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) 4239 tg3_power_down_phy(tp, do_low_power); 4240 4241 tg3_frob_aux_power(tp, true); 4242 4243 /* Workaround for unstable PLL clock */ 4244 if ((!tg3_flag(tp, IS_SSB_CORE)) && 4245 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) || 4246 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) { 4247 u32 val = tr32(0x7d00); 4248 4249 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); 4250 tw32(0x7d00, val); 4251 if (!tg3_flag(tp, ENABLE_ASF)) { 4252 int err; 4253 4254 err = tg3_nvram_lock(tp); 4255 tg3_halt_cpu(tp, RX_CPU_BASE); 4256 if (!err) 4257 tg3_nvram_unlock(tp); 4258 } 4259 } 4260 4261 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 4262 4263 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); 4264 4265 return 0; 4266 } 4267 4268 static void tg3_power_down(struct tg3 *tp) 4269 { 4270 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); 4271 pci_set_power_state(tp->pdev, PCI_D3hot); 4272 } 4273 4274 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex) 4275 { 4276 switch (val & MII_TG3_AUX_STAT_SPDMASK) { 4277 case MII_TG3_AUX_STAT_10HALF: 4278 *speed = SPEED_10; 4279 *duplex = DUPLEX_HALF; 4280 break; 4281 4282 case MII_TG3_AUX_STAT_10FULL: 4283 *speed = SPEED_10; 4284 *duplex = DUPLEX_FULL; 4285 break; 4286 4287 case MII_TG3_AUX_STAT_100HALF: 4288 *speed = SPEED_100; 4289 *duplex = DUPLEX_HALF; 4290 break; 4291 4292 case MII_TG3_AUX_STAT_100FULL: 4293 *speed = SPEED_100; 4294 *duplex = DUPLEX_FULL; 4295 break; 4296 4297 case MII_TG3_AUX_STAT_1000HALF: 4298 *speed = SPEED_1000; 4299 *duplex = DUPLEX_HALF; 4300 break; 4301 4302 case MII_TG3_AUX_STAT_1000FULL: 4303 *speed = SPEED_1000; 4304 *duplex = DUPLEX_FULL; 4305 break; 4306 4307 default: 4308 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4309 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : 4310 SPEED_10; 4311 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : 4312 DUPLEX_HALF; 4313 break; 4314 } 4315 *speed = SPEED_UNKNOWN; 4316 *duplex = DUPLEX_UNKNOWN; 4317 break; 4318 } 4319 } 4320 4321 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) 4322 { 4323 int err = 0; 4324 u32 val, new_adv; 4325 4326 new_adv = ADVERTISE_CSMA; 4327 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; 4328 new_adv |= mii_advertise_flowctrl(flowctrl); 4329 4330 err = tg3_writephy(tp, MII_ADVERTISE, new_adv); 4331 if (err) 4332 goto done; 4333 4334 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4335 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); 4336 4337 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4338 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) 4339 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4340 4341 err = tg3_writephy(tp, MII_CTRL1000, new_adv); 4342 if (err) 4343 goto done; 4344 } 4345 4346 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4347 goto done; 4348 4349 tw32(TG3_CPMU_EEE_MODE, 4350 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); 4351 4352 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 4353 if (!err) { 4354 u32 err2; 4355 4356 val = 0; 4357 /* Advertise 100-BaseTX EEE ability */ 4358 if (advertise & ADVERTISED_100baseT_Full) 4359 val |= MDIO_AN_EEE_ADV_100TX; 4360 /* Advertise 1000-BaseT EEE ability */ 4361 if (advertise & ADVERTISED_1000baseT_Full) 4362 val |= MDIO_AN_EEE_ADV_1000T; 4363 4364 if (!tp->eee.eee_enabled) { 4365 val = 0; 4366 tp->eee.advertised = 0; 4367 } else { 4368 tp->eee.advertised = advertise & 4369 (ADVERTISED_100baseT_Full | 4370 ADVERTISED_1000baseT_Full); 4371 } 4372 4373 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); 4374 if (err) 4375 val = 0; 4376 4377 switch (tg3_asic_rev(tp)) { 4378 case ASIC_REV_5717: 4379 case ASIC_REV_57765: 4380 case ASIC_REV_57766: 4381 case ASIC_REV_5719: 4382 /* If we advertised any eee advertisements above... */ 4383 if (val) 4384 val = MII_TG3_DSP_TAP26_ALNOKO | 4385 MII_TG3_DSP_TAP26_RMRXSTO | 4386 MII_TG3_DSP_TAP26_OPCSINPT; 4387 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 4388 fallthrough; 4389 case ASIC_REV_5720: 4390 case ASIC_REV_5762: 4391 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) 4392 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | 4393 MII_TG3_DSP_CH34TP2_HIBW01); 4394 } 4395 4396 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false); 4397 if (!err) 4398 err = err2; 4399 } 4400 4401 done: 4402 return err; 4403 } 4404 4405 static void tg3_phy_copper_begin(struct tg3 *tp) 4406 { 4407 if (tp->link_config.autoneg == AUTONEG_ENABLE || 4408 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4409 u32 adv, fc; 4410 4411 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4412 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4413 adv = ADVERTISED_10baseT_Half | 4414 ADVERTISED_10baseT_Full; 4415 if (tg3_flag(tp, WOL_SPEED_100MB)) 4416 adv |= ADVERTISED_100baseT_Half | 4417 ADVERTISED_100baseT_Full; 4418 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) { 4419 if (!(tp->phy_flags & 4420 TG3_PHYFLG_DISABLE_1G_HD_ADV)) 4421 adv |= ADVERTISED_1000baseT_Half; 4422 adv |= ADVERTISED_1000baseT_Full; 4423 } 4424 4425 fc = FLOW_CTRL_TX | FLOW_CTRL_RX; 4426 } else { 4427 adv = tp->link_config.advertising; 4428 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 4429 adv &= ~(ADVERTISED_1000baseT_Half | 4430 ADVERTISED_1000baseT_Full); 4431 4432 fc = tp->link_config.flowctrl; 4433 } 4434 4435 tg3_phy_autoneg_cfg(tp, adv, fc); 4436 4437 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4438 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4439 /* Normally during power down we want to autonegotiate 4440 * the lowest possible speed for WOL. However, to avoid 4441 * link flap, we leave it untouched. 4442 */ 4443 return; 4444 } 4445 4446 tg3_writephy(tp, MII_BMCR, 4447 BMCR_ANENABLE | BMCR_ANRESTART); 4448 } else { 4449 int i; 4450 u32 bmcr, orig_bmcr; 4451 4452 tp->link_config.active_speed = tp->link_config.speed; 4453 tp->link_config.active_duplex = tp->link_config.duplex; 4454 4455 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 4456 /* With autoneg disabled, 5715 only links up when the 4457 * advertisement register has the configured speed 4458 * enabled. 4459 */ 4460 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); 4461 } 4462 4463 bmcr = 0; 4464 switch (tp->link_config.speed) { 4465 default: 4466 case SPEED_10: 4467 break; 4468 4469 case SPEED_100: 4470 bmcr |= BMCR_SPEED100; 4471 break; 4472 4473 case SPEED_1000: 4474 bmcr |= BMCR_SPEED1000; 4475 break; 4476 } 4477 4478 if (tp->link_config.duplex == DUPLEX_FULL) 4479 bmcr |= BMCR_FULLDPLX; 4480 4481 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && 4482 (bmcr != orig_bmcr)) { 4483 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); 4484 for (i = 0; i < 1500; i++) { 4485 u32 tmp; 4486 4487 udelay(10); 4488 if (tg3_readphy(tp, MII_BMSR, &tmp) || 4489 tg3_readphy(tp, MII_BMSR, &tmp)) 4490 continue; 4491 if (!(tmp & BMSR_LSTATUS)) { 4492 udelay(40); 4493 break; 4494 } 4495 } 4496 tg3_writephy(tp, MII_BMCR, bmcr); 4497 udelay(40); 4498 } 4499 } 4500 } 4501 4502 static int tg3_phy_pull_config(struct tg3 *tp) 4503 { 4504 int err; 4505 u32 val; 4506 4507 err = tg3_readphy(tp, MII_BMCR, &val); 4508 if (err) 4509 goto done; 4510 4511 if (!(val & BMCR_ANENABLE)) { 4512 tp->link_config.autoneg = AUTONEG_DISABLE; 4513 tp->link_config.advertising = 0; 4514 tg3_flag_clear(tp, PAUSE_AUTONEG); 4515 4516 err = -EIO; 4517 4518 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) { 4519 case 0: 4520 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4521 goto done; 4522 4523 tp->link_config.speed = SPEED_10; 4524 break; 4525 case BMCR_SPEED100: 4526 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4527 goto done; 4528 4529 tp->link_config.speed = SPEED_100; 4530 break; 4531 case BMCR_SPEED1000: 4532 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4533 tp->link_config.speed = SPEED_1000; 4534 break; 4535 } 4536 fallthrough; 4537 default: 4538 goto done; 4539 } 4540 4541 if (val & BMCR_FULLDPLX) 4542 tp->link_config.duplex = DUPLEX_FULL; 4543 else 4544 tp->link_config.duplex = DUPLEX_HALF; 4545 4546 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; 4547 4548 err = 0; 4549 goto done; 4550 } 4551 4552 tp->link_config.autoneg = AUTONEG_ENABLE; 4553 tp->link_config.advertising = ADVERTISED_Autoneg; 4554 tg3_flag_set(tp, PAUSE_AUTONEG); 4555 4556 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4557 u32 adv; 4558 4559 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4560 if (err) 4561 goto done; 4562 4563 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL); 4564 tp->link_config.advertising |= adv | ADVERTISED_TP; 4565 4566 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val); 4567 } else { 4568 tp->link_config.advertising |= ADVERTISED_FIBRE; 4569 } 4570 4571 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4572 u32 adv; 4573 4574 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4575 err = tg3_readphy(tp, MII_CTRL1000, &val); 4576 if (err) 4577 goto done; 4578 4579 adv = mii_ctrl1000_to_ethtool_adv_t(val); 4580 } else { 4581 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4582 if (err) 4583 goto done; 4584 4585 adv = tg3_decode_flowctrl_1000X(val); 4586 tp->link_config.flowctrl = adv; 4587 4588 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL); 4589 adv = mii_adv_to_ethtool_adv_x(val); 4590 } 4591 4592 tp->link_config.advertising |= adv; 4593 } 4594 4595 done: 4596 return err; 4597 } 4598 4599 static int tg3_init_5401phy_dsp(struct tg3 *tp) 4600 { 4601 int err; 4602 4603 /* Turn off tap power management. */ 4604 /* Set Extended packet length bit */ 4605 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 4606 4607 err |= tg3_phydsp_write(tp, 0x0012, 0x1804); 4608 err |= tg3_phydsp_write(tp, 0x0013, 0x1204); 4609 err |= tg3_phydsp_write(tp, 0x8006, 0x0132); 4610 err |= tg3_phydsp_write(tp, 0x8006, 0x0232); 4611 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); 4612 4613 udelay(40); 4614 4615 return err; 4616 } 4617 4618 static bool tg3_phy_eee_config_ok(struct tg3 *tp) 4619 { 4620 struct ethtool_eee eee; 4621 4622 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4623 return true; 4624 4625 tg3_eee_pull_config(tp, &eee); 4626 4627 if (tp->eee.eee_enabled) { 4628 if (tp->eee.advertised != eee.advertised || 4629 tp->eee.tx_lpi_timer != eee.tx_lpi_timer || 4630 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) 4631 return false; 4632 } else { 4633 /* EEE is disabled but we're advertising */ 4634 if (eee.advertised) 4635 return false; 4636 } 4637 4638 return true; 4639 } 4640 4641 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) 4642 { 4643 u32 advmsk, tgtadv, advertising; 4644 4645 advertising = tp->link_config.advertising; 4646 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; 4647 4648 advmsk = ADVERTISE_ALL; 4649 if (tp->link_config.active_duplex == DUPLEX_FULL) { 4650 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); 4651 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4652 } 4653 4654 if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) 4655 return false; 4656 4657 if ((*lcladv & advmsk) != tgtadv) 4658 return false; 4659 4660 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4661 u32 tg3_ctrl; 4662 4663 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); 4664 4665 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) 4666 return false; 4667 4668 if (tgtadv && 4669 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4670 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) { 4671 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4672 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL | 4673 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 4674 } else { 4675 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); 4676 } 4677 4678 if (tg3_ctrl != tgtadv) 4679 return false; 4680 } 4681 4682 return true; 4683 } 4684 4685 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) 4686 { 4687 u32 lpeth = 0; 4688 4689 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4690 u32 val; 4691 4692 if (tg3_readphy(tp, MII_STAT1000, &val)) 4693 return false; 4694 4695 lpeth = mii_stat1000_to_ethtool_lpa_t(val); 4696 } 4697 4698 if (tg3_readphy(tp, MII_LPA, rmtadv)) 4699 return false; 4700 4701 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); 4702 tp->link_config.rmt_adv = lpeth; 4703 4704 return true; 4705 } 4706 4707 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up) 4708 { 4709 if (curr_link_up != tp->link_up) { 4710 if (curr_link_up) { 4711 netif_carrier_on(tp->dev); 4712 } else { 4713 netif_carrier_off(tp->dev); 4714 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4715 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 4716 } 4717 4718 tg3_link_report(tp); 4719 return true; 4720 } 4721 4722 return false; 4723 } 4724 4725 static void tg3_clear_mac_status(struct tg3 *tp) 4726 { 4727 tw32(MAC_EVENT, 0); 4728 4729 tw32_f(MAC_STATUS, 4730 MAC_STATUS_SYNC_CHANGED | 4731 MAC_STATUS_CFG_CHANGED | 4732 MAC_STATUS_MI_COMPLETION | 4733 MAC_STATUS_LNKSTATE_CHANGED); 4734 udelay(40); 4735 } 4736 4737 static void tg3_setup_eee(struct tg3 *tp) 4738 { 4739 u32 val; 4740 4741 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | 4742 TG3_CPMU_EEE_LNKIDL_UART_IDL; 4743 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 4744 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; 4745 4746 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); 4747 4748 tw32_f(TG3_CPMU_EEE_CTRL, 4749 TG3_CPMU_EEE_CTRL_EXIT_20_1_US); 4750 4751 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | 4752 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) | 4753 TG3_CPMU_EEEMD_LPI_IN_RX | 4754 TG3_CPMU_EEEMD_EEE_ENABLE; 4755 4756 if (tg3_asic_rev(tp) != ASIC_REV_5717) 4757 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; 4758 4759 if (tg3_flag(tp, ENABLE_APE)) 4760 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; 4761 4762 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0); 4763 4764 tw32_f(TG3_CPMU_EEE_DBTMR1, 4765 TG3_CPMU_DBTMR1_PCIEXIT_2047US | 4766 (tp->eee.tx_lpi_timer & 0xffff)); 4767 4768 tw32_f(TG3_CPMU_EEE_DBTMR2, 4769 TG3_CPMU_DBTMR2_APE_TX_2047US | 4770 TG3_CPMU_DBTMR2_TXIDXEQ_2047US); 4771 } 4772 4773 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) 4774 { 4775 bool current_link_up; 4776 u32 bmsr, val; 4777 u32 lcl_adv, rmt_adv; 4778 u32 current_speed; 4779 u8 current_duplex; 4780 int i, err; 4781 4782 tg3_clear_mac_status(tp); 4783 4784 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 4785 tw32_f(MAC_MI_MODE, 4786 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 4787 udelay(80); 4788 } 4789 4790 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); 4791 4792 /* Some third-party PHYs need to be reset on link going 4793 * down. 4794 */ 4795 if ((tg3_asic_rev(tp) == ASIC_REV_5703 || 4796 tg3_asic_rev(tp) == ASIC_REV_5704 || 4797 tg3_asic_rev(tp) == ASIC_REV_5705) && 4798 tp->link_up) { 4799 tg3_readphy(tp, MII_BMSR, &bmsr); 4800 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4801 !(bmsr & BMSR_LSTATUS)) 4802 force_reset = true; 4803 } 4804 if (force_reset) 4805 tg3_phy_reset(tp); 4806 4807 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 4808 tg3_readphy(tp, MII_BMSR, &bmsr); 4809 if (tg3_readphy(tp, MII_BMSR, &bmsr) || 4810 !tg3_flag(tp, INIT_COMPLETE)) 4811 bmsr = 0; 4812 4813 if (!(bmsr & BMSR_LSTATUS)) { 4814 err = tg3_init_5401phy_dsp(tp); 4815 if (err) 4816 return err; 4817 4818 tg3_readphy(tp, MII_BMSR, &bmsr); 4819 for (i = 0; i < 1000; i++) { 4820 udelay(10); 4821 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4822 (bmsr & BMSR_LSTATUS)) { 4823 udelay(40); 4824 break; 4825 } 4826 } 4827 4828 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == 4829 TG3_PHY_REV_BCM5401_B0 && 4830 !(bmsr & BMSR_LSTATUS) && 4831 tp->link_config.active_speed == SPEED_1000) { 4832 err = tg3_phy_reset(tp); 4833 if (!err) 4834 err = tg3_init_5401phy_dsp(tp); 4835 if (err) 4836 return err; 4837 } 4838 } 4839 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4840 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) { 4841 /* 5701 {A0,B0} CRC bug workaround */ 4842 tg3_writephy(tp, 0x15, 0x0a75); 4843 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4844 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 4845 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4846 } 4847 4848 /* Clear pending interrupts... */ 4849 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4850 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4851 4852 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) 4853 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); 4854 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) 4855 tg3_writephy(tp, MII_TG3_IMASK, ~0); 4856 4857 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4858 tg3_asic_rev(tp) == ASIC_REV_5701) { 4859 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) 4860 tg3_writephy(tp, MII_TG3_EXT_CTRL, 4861 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 4862 else 4863 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); 4864 } 4865 4866 current_link_up = false; 4867 current_speed = SPEED_UNKNOWN; 4868 current_duplex = DUPLEX_UNKNOWN; 4869 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; 4870 tp->link_config.rmt_adv = 0; 4871 4872 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { 4873 err = tg3_phy_auxctl_read(tp, 4874 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4875 &val); 4876 if (!err && !(val & (1 << 10))) { 4877 tg3_phy_auxctl_write(tp, 4878 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4879 val | (1 << 10)); 4880 goto relink; 4881 } 4882 } 4883 4884 bmsr = 0; 4885 for (i = 0; i < 100; i++) { 4886 tg3_readphy(tp, MII_BMSR, &bmsr); 4887 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4888 (bmsr & BMSR_LSTATUS)) 4889 break; 4890 udelay(40); 4891 } 4892 4893 if (bmsr & BMSR_LSTATUS) { 4894 u32 aux_stat, bmcr; 4895 4896 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); 4897 for (i = 0; i < 2000; i++) { 4898 udelay(10); 4899 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && 4900 aux_stat) 4901 break; 4902 } 4903 4904 tg3_aux_stat_to_speed_duplex(tp, aux_stat, 4905 ¤t_speed, 4906 ¤t_duplex); 4907 4908 bmcr = 0; 4909 for (i = 0; i < 200; i++) { 4910 tg3_readphy(tp, MII_BMCR, &bmcr); 4911 if (tg3_readphy(tp, MII_BMCR, &bmcr)) 4912 continue; 4913 if (bmcr && bmcr != 0x7fff) 4914 break; 4915 udelay(10); 4916 } 4917 4918 lcl_adv = 0; 4919 rmt_adv = 0; 4920 4921 tp->link_config.active_speed = current_speed; 4922 tp->link_config.active_duplex = current_duplex; 4923 4924 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 4925 bool eee_config_ok = tg3_phy_eee_config_ok(tp); 4926 4927 if ((bmcr & BMCR_ANENABLE) && 4928 eee_config_ok && 4929 tg3_phy_copper_an_config_ok(tp, &lcl_adv) && 4930 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) 4931 current_link_up = true; 4932 4933 /* EEE settings changes take effect only after a phy 4934 * reset. If we have skipped a reset due to Link Flap 4935 * Avoidance being enabled, do it now. 4936 */ 4937 if (!eee_config_ok && 4938 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 4939 !force_reset) { 4940 tg3_setup_eee(tp); 4941 tg3_phy_reset(tp); 4942 } 4943 } else { 4944 if (!(bmcr & BMCR_ANENABLE) && 4945 tp->link_config.speed == current_speed && 4946 tp->link_config.duplex == current_duplex) { 4947 current_link_up = true; 4948 } 4949 } 4950 4951 if (current_link_up && 4952 tp->link_config.active_duplex == DUPLEX_FULL) { 4953 u32 reg, bit; 4954 4955 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4956 reg = MII_TG3_FET_GEN_STAT; 4957 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; 4958 } else { 4959 reg = MII_TG3_EXT_STAT; 4960 bit = MII_TG3_EXT_STAT_MDIX; 4961 } 4962 4963 if (!tg3_readphy(tp, reg, &val) && (val & bit)) 4964 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; 4965 4966 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 4967 } 4968 } 4969 4970 relink: 4971 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4972 tg3_phy_copper_begin(tp); 4973 4974 if (tg3_flag(tp, ROBOSWITCH)) { 4975 current_link_up = true; 4976 /* FIXME: when BCM5325 switch is used use 100 MBit/s */ 4977 current_speed = SPEED_1000; 4978 current_duplex = DUPLEX_FULL; 4979 tp->link_config.active_speed = current_speed; 4980 tp->link_config.active_duplex = current_duplex; 4981 } 4982 4983 tg3_readphy(tp, MII_BMSR, &bmsr); 4984 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || 4985 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 4986 current_link_up = true; 4987 } 4988 4989 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 4990 if (current_link_up) { 4991 if (tp->link_config.active_speed == SPEED_100 || 4992 tp->link_config.active_speed == SPEED_10) 4993 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 4994 else 4995 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 4996 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) 4997 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 4998 else 4999 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5000 5001 /* In order for the 5750 core in BCM4785 chip to work properly 5002 * in RGMII mode, the Led Control Register must be set up. 5003 */ 5004 if (tg3_flag(tp, RGMII_MODE)) { 5005 u32 led_ctrl = tr32(MAC_LED_CTRL); 5006 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON); 5007 5008 if (tp->link_config.active_speed == SPEED_10) 5009 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE; 5010 else if (tp->link_config.active_speed == SPEED_100) 5011 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5012 LED_CTRL_100MBPS_ON); 5013 else if (tp->link_config.active_speed == SPEED_1000) 5014 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5015 LED_CTRL_1000MBPS_ON); 5016 5017 tw32(MAC_LED_CTRL, led_ctrl); 5018 udelay(40); 5019 } 5020 5021 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5022 if (tp->link_config.active_duplex == DUPLEX_HALF) 5023 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5024 5025 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 5026 if (current_link_up && 5027 tg3_5700_link_polarity(tp, tp->link_config.active_speed)) 5028 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 5029 else 5030 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 5031 } 5032 5033 /* ??? Without this setting Netgear GA302T PHY does not 5034 * ??? send/receive packets... 5035 */ 5036 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && 5037 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) { 5038 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; 5039 tw32_f(MAC_MI_MODE, tp->mi_mode); 5040 udelay(80); 5041 } 5042 5043 tw32_f(MAC_MODE, tp->mac_mode); 5044 udelay(40); 5045 5046 tg3_phy_eee_adjust(tp, current_link_up); 5047 5048 if (tg3_flag(tp, USE_LINKCHG_REG)) { 5049 /* Polled via timer. */ 5050 tw32_f(MAC_EVENT, 0); 5051 } else { 5052 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5053 } 5054 udelay(40); 5055 5056 if (tg3_asic_rev(tp) == ASIC_REV_5700 && 5057 current_link_up && 5058 tp->link_config.active_speed == SPEED_1000 && 5059 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { 5060 udelay(120); 5061 tw32_f(MAC_STATUS, 5062 (MAC_STATUS_SYNC_CHANGED | 5063 MAC_STATUS_CFG_CHANGED)); 5064 udelay(40); 5065 tg3_write_mem(tp, 5066 NIC_SRAM_FIRMWARE_MBOX, 5067 NIC_SRAM_FIRMWARE_MBOX_MAGIC2); 5068 } 5069 5070 /* Prevent send BD corruption. */ 5071 if (tg3_flag(tp, CLKREQ_BUG)) { 5072 if (tp->link_config.active_speed == SPEED_100 || 5073 tp->link_config.active_speed == SPEED_10) 5074 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL, 5075 PCI_EXP_LNKCTL_CLKREQ_EN); 5076 else 5077 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 5078 PCI_EXP_LNKCTL_CLKREQ_EN); 5079 } 5080 5081 tg3_test_and_report_link_chg(tp, current_link_up); 5082 5083 return 0; 5084 } 5085 5086 struct tg3_fiber_aneginfo { 5087 int state; 5088 #define ANEG_STATE_UNKNOWN 0 5089 #define ANEG_STATE_AN_ENABLE 1 5090 #define ANEG_STATE_RESTART_INIT 2 5091 #define ANEG_STATE_RESTART 3 5092 #define ANEG_STATE_DISABLE_LINK_OK 4 5093 #define ANEG_STATE_ABILITY_DETECT_INIT 5 5094 #define ANEG_STATE_ABILITY_DETECT 6 5095 #define ANEG_STATE_ACK_DETECT_INIT 7 5096 #define ANEG_STATE_ACK_DETECT 8 5097 #define ANEG_STATE_COMPLETE_ACK_INIT 9 5098 #define ANEG_STATE_COMPLETE_ACK 10 5099 #define ANEG_STATE_IDLE_DETECT_INIT 11 5100 #define ANEG_STATE_IDLE_DETECT 12 5101 #define ANEG_STATE_LINK_OK 13 5102 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 5103 #define ANEG_STATE_NEXT_PAGE_WAIT 15 5104 5105 u32 flags; 5106 #define MR_AN_ENABLE 0x00000001 5107 #define MR_RESTART_AN 0x00000002 5108 #define MR_AN_COMPLETE 0x00000004 5109 #define MR_PAGE_RX 0x00000008 5110 #define MR_NP_LOADED 0x00000010 5111 #define MR_TOGGLE_TX 0x00000020 5112 #define MR_LP_ADV_FULL_DUPLEX 0x00000040 5113 #define MR_LP_ADV_HALF_DUPLEX 0x00000080 5114 #define MR_LP_ADV_SYM_PAUSE 0x00000100 5115 #define MR_LP_ADV_ASYM_PAUSE 0x00000200 5116 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400 5117 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800 5118 #define MR_LP_ADV_NEXT_PAGE 0x00001000 5119 #define MR_TOGGLE_RX 0x00002000 5120 #define MR_NP_RX 0x00004000 5121 5122 #define MR_LINK_OK 0x80000000 5123 5124 unsigned long link_time, cur_time; 5125 5126 u32 ability_match_cfg; 5127 int ability_match_count; 5128 5129 char ability_match, idle_match, ack_match; 5130 5131 u32 txconfig, rxconfig; 5132 #define ANEG_CFG_NP 0x00000080 5133 #define ANEG_CFG_ACK 0x00000040 5134 #define ANEG_CFG_RF2 0x00000020 5135 #define ANEG_CFG_RF1 0x00000010 5136 #define ANEG_CFG_PS2 0x00000001 5137 #define ANEG_CFG_PS1 0x00008000 5138 #define ANEG_CFG_HD 0x00004000 5139 #define ANEG_CFG_FD 0x00002000 5140 #define ANEG_CFG_INVAL 0x00001f06 5141 5142 }; 5143 #define ANEG_OK 0 5144 #define ANEG_DONE 1 5145 #define ANEG_TIMER_ENAB 2 5146 #define ANEG_FAILED -1 5147 5148 #define ANEG_STATE_SETTLE_TIME 10000 5149 5150 static int tg3_fiber_aneg_smachine(struct tg3 *tp, 5151 struct tg3_fiber_aneginfo *ap) 5152 { 5153 u16 flowctrl; 5154 unsigned long delta; 5155 u32 rx_cfg_reg; 5156 int ret; 5157 5158 if (ap->state == ANEG_STATE_UNKNOWN) { 5159 ap->rxconfig = 0; 5160 ap->link_time = 0; 5161 ap->cur_time = 0; 5162 ap->ability_match_cfg = 0; 5163 ap->ability_match_count = 0; 5164 ap->ability_match = 0; 5165 ap->idle_match = 0; 5166 ap->ack_match = 0; 5167 } 5168 ap->cur_time++; 5169 5170 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { 5171 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); 5172 5173 if (rx_cfg_reg != ap->ability_match_cfg) { 5174 ap->ability_match_cfg = rx_cfg_reg; 5175 ap->ability_match = 0; 5176 ap->ability_match_count = 0; 5177 } else { 5178 if (++ap->ability_match_count > 1) { 5179 ap->ability_match = 1; 5180 ap->ability_match_cfg = rx_cfg_reg; 5181 } 5182 } 5183 if (rx_cfg_reg & ANEG_CFG_ACK) 5184 ap->ack_match = 1; 5185 else 5186 ap->ack_match = 0; 5187 5188 ap->idle_match = 0; 5189 } else { 5190 ap->idle_match = 1; 5191 ap->ability_match_cfg = 0; 5192 ap->ability_match_count = 0; 5193 ap->ability_match = 0; 5194 ap->ack_match = 0; 5195 5196 rx_cfg_reg = 0; 5197 } 5198 5199 ap->rxconfig = rx_cfg_reg; 5200 ret = ANEG_OK; 5201 5202 switch (ap->state) { 5203 case ANEG_STATE_UNKNOWN: 5204 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) 5205 ap->state = ANEG_STATE_AN_ENABLE; 5206 5207 fallthrough; 5208 case ANEG_STATE_AN_ENABLE: 5209 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); 5210 if (ap->flags & MR_AN_ENABLE) { 5211 ap->link_time = 0; 5212 ap->cur_time = 0; 5213 ap->ability_match_cfg = 0; 5214 ap->ability_match_count = 0; 5215 ap->ability_match = 0; 5216 ap->idle_match = 0; 5217 ap->ack_match = 0; 5218 5219 ap->state = ANEG_STATE_RESTART_INIT; 5220 } else { 5221 ap->state = ANEG_STATE_DISABLE_LINK_OK; 5222 } 5223 break; 5224 5225 case ANEG_STATE_RESTART_INIT: 5226 ap->link_time = ap->cur_time; 5227 ap->flags &= ~(MR_NP_LOADED); 5228 ap->txconfig = 0; 5229 tw32(MAC_TX_AUTO_NEG, 0); 5230 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5231 tw32_f(MAC_MODE, tp->mac_mode); 5232 udelay(40); 5233 5234 ret = ANEG_TIMER_ENAB; 5235 ap->state = ANEG_STATE_RESTART; 5236 5237 fallthrough; 5238 case ANEG_STATE_RESTART: 5239 delta = ap->cur_time - ap->link_time; 5240 if (delta > ANEG_STATE_SETTLE_TIME) 5241 ap->state = ANEG_STATE_ABILITY_DETECT_INIT; 5242 else 5243 ret = ANEG_TIMER_ENAB; 5244 break; 5245 5246 case ANEG_STATE_DISABLE_LINK_OK: 5247 ret = ANEG_DONE; 5248 break; 5249 5250 case ANEG_STATE_ABILITY_DETECT_INIT: 5251 ap->flags &= ~(MR_TOGGLE_TX); 5252 ap->txconfig = ANEG_CFG_FD; 5253 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5254 if (flowctrl & ADVERTISE_1000XPAUSE) 5255 ap->txconfig |= ANEG_CFG_PS1; 5256 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5257 ap->txconfig |= ANEG_CFG_PS2; 5258 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5259 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5260 tw32_f(MAC_MODE, tp->mac_mode); 5261 udelay(40); 5262 5263 ap->state = ANEG_STATE_ABILITY_DETECT; 5264 break; 5265 5266 case ANEG_STATE_ABILITY_DETECT: 5267 if (ap->ability_match != 0 && ap->rxconfig != 0) 5268 ap->state = ANEG_STATE_ACK_DETECT_INIT; 5269 break; 5270 5271 case ANEG_STATE_ACK_DETECT_INIT: 5272 ap->txconfig |= ANEG_CFG_ACK; 5273 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5274 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5275 tw32_f(MAC_MODE, tp->mac_mode); 5276 udelay(40); 5277 5278 ap->state = ANEG_STATE_ACK_DETECT; 5279 5280 fallthrough; 5281 case ANEG_STATE_ACK_DETECT: 5282 if (ap->ack_match != 0) { 5283 if ((ap->rxconfig & ~ANEG_CFG_ACK) == 5284 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { 5285 ap->state = ANEG_STATE_COMPLETE_ACK_INIT; 5286 } else { 5287 ap->state = ANEG_STATE_AN_ENABLE; 5288 } 5289 } else if (ap->ability_match != 0 && 5290 ap->rxconfig == 0) { 5291 ap->state = ANEG_STATE_AN_ENABLE; 5292 } 5293 break; 5294 5295 case ANEG_STATE_COMPLETE_ACK_INIT: 5296 if (ap->rxconfig & ANEG_CFG_INVAL) { 5297 ret = ANEG_FAILED; 5298 break; 5299 } 5300 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | 5301 MR_LP_ADV_HALF_DUPLEX | 5302 MR_LP_ADV_SYM_PAUSE | 5303 MR_LP_ADV_ASYM_PAUSE | 5304 MR_LP_ADV_REMOTE_FAULT1 | 5305 MR_LP_ADV_REMOTE_FAULT2 | 5306 MR_LP_ADV_NEXT_PAGE | 5307 MR_TOGGLE_RX | 5308 MR_NP_RX); 5309 if (ap->rxconfig & ANEG_CFG_FD) 5310 ap->flags |= MR_LP_ADV_FULL_DUPLEX; 5311 if (ap->rxconfig & ANEG_CFG_HD) 5312 ap->flags |= MR_LP_ADV_HALF_DUPLEX; 5313 if (ap->rxconfig & ANEG_CFG_PS1) 5314 ap->flags |= MR_LP_ADV_SYM_PAUSE; 5315 if (ap->rxconfig & ANEG_CFG_PS2) 5316 ap->flags |= MR_LP_ADV_ASYM_PAUSE; 5317 if (ap->rxconfig & ANEG_CFG_RF1) 5318 ap->flags |= MR_LP_ADV_REMOTE_FAULT1; 5319 if (ap->rxconfig & ANEG_CFG_RF2) 5320 ap->flags |= MR_LP_ADV_REMOTE_FAULT2; 5321 if (ap->rxconfig & ANEG_CFG_NP) 5322 ap->flags |= MR_LP_ADV_NEXT_PAGE; 5323 5324 ap->link_time = ap->cur_time; 5325 5326 ap->flags ^= (MR_TOGGLE_TX); 5327 if (ap->rxconfig & 0x0008) 5328 ap->flags |= MR_TOGGLE_RX; 5329 if (ap->rxconfig & ANEG_CFG_NP) 5330 ap->flags |= MR_NP_RX; 5331 ap->flags |= MR_PAGE_RX; 5332 5333 ap->state = ANEG_STATE_COMPLETE_ACK; 5334 ret = ANEG_TIMER_ENAB; 5335 break; 5336 5337 case ANEG_STATE_COMPLETE_ACK: 5338 if (ap->ability_match != 0 && 5339 ap->rxconfig == 0) { 5340 ap->state = ANEG_STATE_AN_ENABLE; 5341 break; 5342 } 5343 delta = ap->cur_time - ap->link_time; 5344 if (delta > ANEG_STATE_SETTLE_TIME) { 5345 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { 5346 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5347 } else { 5348 if ((ap->txconfig & ANEG_CFG_NP) == 0 && 5349 !(ap->flags & MR_NP_RX)) { 5350 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5351 } else { 5352 ret = ANEG_FAILED; 5353 } 5354 } 5355 } 5356 break; 5357 5358 case ANEG_STATE_IDLE_DETECT_INIT: 5359 ap->link_time = ap->cur_time; 5360 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5361 tw32_f(MAC_MODE, tp->mac_mode); 5362 udelay(40); 5363 5364 ap->state = ANEG_STATE_IDLE_DETECT; 5365 ret = ANEG_TIMER_ENAB; 5366 break; 5367 5368 case ANEG_STATE_IDLE_DETECT: 5369 if (ap->ability_match != 0 && 5370 ap->rxconfig == 0) { 5371 ap->state = ANEG_STATE_AN_ENABLE; 5372 break; 5373 } 5374 delta = ap->cur_time - ap->link_time; 5375 if (delta > ANEG_STATE_SETTLE_TIME) { 5376 /* XXX another gem from the Broadcom driver :( */ 5377 ap->state = ANEG_STATE_LINK_OK; 5378 } 5379 break; 5380 5381 case ANEG_STATE_LINK_OK: 5382 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); 5383 ret = ANEG_DONE; 5384 break; 5385 5386 case ANEG_STATE_NEXT_PAGE_WAIT_INIT: 5387 /* ??? unimplemented */ 5388 break; 5389 5390 case ANEG_STATE_NEXT_PAGE_WAIT: 5391 /* ??? unimplemented */ 5392 break; 5393 5394 default: 5395 ret = ANEG_FAILED; 5396 break; 5397 } 5398 5399 return ret; 5400 } 5401 5402 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) 5403 { 5404 int res = 0; 5405 struct tg3_fiber_aneginfo aninfo; 5406 int status = ANEG_FAILED; 5407 unsigned int tick; 5408 u32 tmp; 5409 5410 tw32_f(MAC_TX_AUTO_NEG, 0); 5411 5412 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 5413 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); 5414 udelay(40); 5415 5416 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); 5417 udelay(40); 5418 5419 memset(&aninfo, 0, sizeof(aninfo)); 5420 aninfo.flags |= MR_AN_ENABLE; 5421 aninfo.state = ANEG_STATE_UNKNOWN; 5422 aninfo.cur_time = 0; 5423 tick = 0; 5424 while (++tick < 195000) { 5425 status = tg3_fiber_aneg_smachine(tp, &aninfo); 5426 if (status == ANEG_DONE || status == ANEG_FAILED) 5427 break; 5428 5429 udelay(1); 5430 } 5431 5432 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5433 tw32_f(MAC_MODE, tp->mac_mode); 5434 udelay(40); 5435 5436 *txflags = aninfo.txconfig; 5437 *rxflags = aninfo.flags; 5438 5439 if (status == ANEG_DONE && 5440 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | 5441 MR_LP_ADV_FULL_DUPLEX))) 5442 res = 1; 5443 5444 return res; 5445 } 5446 5447 static void tg3_init_bcm8002(struct tg3 *tp) 5448 { 5449 u32 mac_status = tr32(MAC_STATUS); 5450 int i; 5451 5452 /* Reset when initting first time or we have a link. */ 5453 if (tg3_flag(tp, INIT_COMPLETE) && 5454 !(mac_status & MAC_STATUS_PCS_SYNCED)) 5455 return; 5456 5457 /* Set PLL lock range. */ 5458 tg3_writephy(tp, 0x16, 0x8007); 5459 5460 /* SW reset */ 5461 tg3_writephy(tp, MII_BMCR, BMCR_RESET); 5462 5463 /* Wait for reset to complete. */ 5464 /* XXX schedule_timeout() ... */ 5465 for (i = 0; i < 500; i++) 5466 udelay(10); 5467 5468 /* Config mode; select PMA/Ch 1 regs. */ 5469 tg3_writephy(tp, 0x10, 0x8411); 5470 5471 /* Enable auto-lock and comdet, select txclk for tx. */ 5472 tg3_writephy(tp, 0x11, 0x0a10); 5473 5474 tg3_writephy(tp, 0x18, 0x00a0); 5475 tg3_writephy(tp, 0x16, 0x41ff); 5476 5477 /* Assert and deassert POR. */ 5478 tg3_writephy(tp, 0x13, 0x0400); 5479 udelay(40); 5480 tg3_writephy(tp, 0x13, 0x0000); 5481 5482 tg3_writephy(tp, 0x11, 0x0a50); 5483 udelay(40); 5484 tg3_writephy(tp, 0x11, 0x0a10); 5485 5486 /* Wait for signal to stabilize */ 5487 /* XXX schedule_timeout() ... */ 5488 for (i = 0; i < 15000; i++) 5489 udelay(10); 5490 5491 /* Deselect the channel register so we can read the PHYID 5492 * later. 5493 */ 5494 tg3_writephy(tp, 0x10, 0x8011); 5495 } 5496 5497 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) 5498 { 5499 u16 flowctrl; 5500 bool current_link_up; 5501 u32 sg_dig_ctrl, sg_dig_status; 5502 u32 serdes_cfg, expected_sg_dig_ctrl; 5503 int workaround, port_a; 5504 5505 serdes_cfg = 0; 5506 expected_sg_dig_ctrl = 0; 5507 workaround = 0; 5508 port_a = 1; 5509 current_link_up = false; 5510 5511 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 && 5512 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) { 5513 workaround = 1; 5514 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 5515 port_a = 0; 5516 5517 /* preserve bits 0-11,13,14 for signal pre-emphasis */ 5518 /* preserve bits 20-23 for voltage regulator */ 5519 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; 5520 } 5521 5522 sg_dig_ctrl = tr32(SG_DIG_CTRL); 5523 5524 if (tp->link_config.autoneg != AUTONEG_ENABLE) { 5525 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { 5526 if (workaround) { 5527 u32 val = serdes_cfg; 5528 5529 if (port_a) 5530 val |= 0xc010000; 5531 else 5532 val |= 0x4010000; 5533 tw32_f(MAC_SERDES_CFG, val); 5534 } 5535 5536 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5537 } 5538 if (mac_status & MAC_STATUS_PCS_SYNCED) { 5539 tg3_setup_flow_control(tp, 0, 0); 5540 current_link_up = true; 5541 } 5542 goto out; 5543 } 5544 5545 /* Want auto-negotiation. */ 5546 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; 5547 5548 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5549 if (flowctrl & ADVERTISE_1000XPAUSE) 5550 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; 5551 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5552 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; 5553 5554 if (sg_dig_ctrl != expected_sg_dig_ctrl) { 5555 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && 5556 tp->serdes_counter && 5557 ((mac_status & (MAC_STATUS_PCS_SYNCED | 5558 MAC_STATUS_RCVD_CFG)) == 5559 MAC_STATUS_PCS_SYNCED)) { 5560 tp->serdes_counter--; 5561 current_link_up = true; 5562 goto out; 5563 } 5564 restart_autoneg: 5565 if (workaround) 5566 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); 5567 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); 5568 udelay(5); 5569 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); 5570 5571 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5572 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5573 } else if (mac_status & (MAC_STATUS_PCS_SYNCED | 5574 MAC_STATUS_SIGNAL_DET)) { 5575 sg_dig_status = tr32(SG_DIG_STATUS); 5576 mac_status = tr32(MAC_STATUS); 5577 5578 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && 5579 (mac_status & MAC_STATUS_PCS_SYNCED)) { 5580 u32 local_adv = 0, remote_adv = 0; 5581 5582 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) 5583 local_adv |= ADVERTISE_1000XPAUSE; 5584 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) 5585 local_adv |= ADVERTISE_1000XPSE_ASYM; 5586 5587 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) 5588 remote_adv |= LPA_1000XPAUSE; 5589 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) 5590 remote_adv |= LPA_1000XPAUSE_ASYM; 5591 5592 tp->link_config.rmt_adv = 5593 mii_adv_to_ethtool_adv_x(remote_adv); 5594 5595 tg3_setup_flow_control(tp, local_adv, remote_adv); 5596 current_link_up = true; 5597 tp->serdes_counter = 0; 5598 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5599 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { 5600 if (tp->serdes_counter) 5601 tp->serdes_counter--; 5602 else { 5603 if (workaround) { 5604 u32 val = serdes_cfg; 5605 5606 if (port_a) 5607 val |= 0xc010000; 5608 else 5609 val |= 0x4010000; 5610 5611 tw32_f(MAC_SERDES_CFG, val); 5612 } 5613 5614 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5615 udelay(40); 5616 5617 /* Link parallel detection - link is up */ 5618 /* only if we have PCS_SYNC and not */ 5619 /* receiving config code words */ 5620 mac_status = tr32(MAC_STATUS); 5621 if ((mac_status & MAC_STATUS_PCS_SYNCED) && 5622 !(mac_status & MAC_STATUS_RCVD_CFG)) { 5623 tg3_setup_flow_control(tp, 0, 0); 5624 current_link_up = true; 5625 tp->phy_flags |= 5626 TG3_PHYFLG_PARALLEL_DETECT; 5627 tp->serdes_counter = 5628 SERDES_PARALLEL_DET_TIMEOUT; 5629 } else 5630 goto restart_autoneg; 5631 } 5632 } 5633 } else { 5634 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5635 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5636 } 5637 5638 out: 5639 return current_link_up; 5640 } 5641 5642 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) 5643 { 5644 bool current_link_up = false; 5645 5646 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) 5647 goto out; 5648 5649 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5650 u32 txflags, rxflags; 5651 int i; 5652 5653 if (fiber_autoneg(tp, &txflags, &rxflags)) { 5654 u32 local_adv = 0, remote_adv = 0; 5655 5656 if (txflags & ANEG_CFG_PS1) 5657 local_adv |= ADVERTISE_1000XPAUSE; 5658 if (txflags & ANEG_CFG_PS2) 5659 local_adv |= ADVERTISE_1000XPSE_ASYM; 5660 5661 if (rxflags & MR_LP_ADV_SYM_PAUSE) 5662 remote_adv |= LPA_1000XPAUSE; 5663 if (rxflags & MR_LP_ADV_ASYM_PAUSE) 5664 remote_adv |= LPA_1000XPAUSE_ASYM; 5665 5666 tp->link_config.rmt_adv = 5667 mii_adv_to_ethtool_adv_x(remote_adv); 5668 5669 tg3_setup_flow_control(tp, local_adv, remote_adv); 5670 5671 current_link_up = true; 5672 } 5673 for (i = 0; i < 30; i++) { 5674 udelay(20); 5675 tw32_f(MAC_STATUS, 5676 (MAC_STATUS_SYNC_CHANGED | 5677 MAC_STATUS_CFG_CHANGED)); 5678 udelay(40); 5679 if ((tr32(MAC_STATUS) & 5680 (MAC_STATUS_SYNC_CHANGED | 5681 MAC_STATUS_CFG_CHANGED)) == 0) 5682 break; 5683 } 5684 5685 mac_status = tr32(MAC_STATUS); 5686 if (!current_link_up && 5687 (mac_status & MAC_STATUS_PCS_SYNCED) && 5688 !(mac_status & MAC_STATUS_RCVD_CFG)) 5689 current_link_up = true; 5690 } else { 5691 tg3_setup_flow_control(tp, 0, 0); 5692 5693 /* Forcing 1000FD link up. */ 5694 current_link_up = true; 5695 5696 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); 5697 udelay(40); 5698 5699 tw32_f(MAC_MODE, tp->mac_mode); 5700 udelay(40); 5701 } 5702 5703 out: 5704 return current_link_up; 5705 } 5706 5707 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) 5708 { 5709 u32 orig_pause_cfg; 5710 u32 orig_active_speed; 5711 u8 orig_active_duplex; 5712 u32 mac_status; 5713 bool current_link_up; 5714 int i; 5715 5716 orig_pause_cfg = tp->link_config.active_flowctrl; 5717 orig_active_speed = tp->link_config.active_speed; 5718 orig_active_duplex = tp->link_config.active_duplex; 5719 5720 if (!tg3_flag(tp, HW_AUTONEG) && 5721 tp->link_up && 5722 tg3_flag(tp, INIT_COMPLETE)) { 5723 mac_status = tr32(MAC_STATUS); 5724 mac_status &= (MAC_STATUS_PCS_SYNCED | 5725 MAC_STATUS_SIGNAL_DET | 5726 MAC_STATUS_CFG_CHANGED | 5727 MAC_STATUS_RCVD_CFG); 5728 if (mac_status == (MAC_STATUS_PCS_SYNCED | 5729 MAC_STATUS_SIGNAL_DET)) { 5730 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5731 MAC_STATUS_CFG_CHANGED)); 5732 return 0; 5733 } 5734 } 5735 5736 tw32_f(MAC_TX_AUTO_NEG, 0); 5737 5738 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 5739 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; 5740 tw32_f(MAC_MODE, tp->mac_mode); 5741 udelay(40); 5742 5743 if (tp->phy_id == TG3_PHY_ID_BCM8002) 5744 tg3_init_bcm8002(tp); 5745 5746 /* Enable link change event even when serdes polling. */ 5747 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5748 udelay(40); 5749 5750 current_link_up = false; 5751 tp->link_config.rmt_adv = 0; 5752 mac_status = tr32(MAC_STATUS); 5753 5754 if (tg3_flag(tp, HW_AUTONEG)) 5755 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); 5756 else 5757 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); 5758 5759 tp->napi[0].hw_status->status = 5760 (SD_STATUS_UPDATED | 5761 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); 5762 5763 for (i = 0; i < 100; i++) { 5764 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5765 MAC_STATUS_CFG_CHANGED)); 5766 udelay(5); 5767 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | 5768 MAC_STATUS_CFG_CHANGED | 5769 MAC_STATUS_LNKSTATE_CHANGED)) == 0) 5770 break; 5771 } 5772 5773 mac_status = tr32(MAC_STATUS); 5774 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { 5775 current_link_up = false; 5776 if (tp->link_config.autoneg == AUTONEG_ENABLE && 5777 tp->serdes_counter == 0) { 5778 tw32_f(MAC_MODE, (tp->mac_mode | 5779 MAC_MODE_SEND_CONFIGS)); 5780 udelay(1); 5781 tw32_f(MAC_MODE, tp->mac_mode); 5782 } 5783 } 5784 5785 if (current_link_up) { 5786 tp->link_config.active_speed = SPEED_1000; 5787 tp->link_config.active_duplex = DUPLEX_FULL; 5788 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5789 LED_CTRL_LNKLED_OVERRIDE | 5790 LED_CTRL_1000MBPS_ON)); 5791 } else { 5792 tp->link_config.active_speed = SPEED_UNKNOWN; 5793 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 5794 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5795 LED_CTRL_LNKLED_OVERRIDE | 5796 LED_CTRL_TRAFFIC_OVERRIDE)); 5797 } 5798 5799 if (!tg3_test_and_report_link_chg(tp, current_link_up)) { 5800 u32 now_pause_cfg = tp->link_config.active_flowctrl; 5801 if (orig_pause_cfg != now_pause_cfg || 5802 orig_active_speed != tp->link_config.active_speed || 5803 orig_active_duplex != tp->link_config.active_duplex) 5804 tg3_link_report(tp); 5805 } 5806 5807 return 0; 5808 } 5809 5810 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) 5811 { 5812 int err = 0; 5813 u32 bmsr, bmcr; 5814 u32 current_speed = SPEED_UNKNOWN; 5815 u8 current_duplex = DUPLEX_UNKNOWN; 5816 bool current_link_up = false; 5817 u32 local_adv, remote_adv, sgsr; 5818 5819 if ((tg3_asic_rev(tp) == ASIC_REV_5719 || 5820 tg3_asic_rev(tp) == ASIC_REV_5720) && 5821 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) && 5822 (sgsr & SERDES_TG3_SGMII_MODE)) { 5823 5824 if (force_reset) 5825 tg3_phy_reset(tp); 5826 5827 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 5828 5829 if (!(sgsr & SERDES_TG3_LINK_UP)) { 5830 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5831 } else { 5832 current_link_up = true; 5833 if (sgsr & SERDES_TG3_SPEED_1000) { 5834 current_speed = SPEED_1000; 5835 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5836 } else if (sgsr & SERDES_TG3_SPEED_100) { 5837 current_speed = SPEED_100; 5838 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5839 } else { 5840 current_speed = SPEED_10; 5841 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5842 } 5843 5844 if (sgsr & SERDES_TG3_FULL_DUPLEX) 5845 current_duplex = DUPLEX_FULL; 5846 else 5847 current_duplex = DUPLEX_HALF; 5848 } 5849 5850 tw32_f(MAC_MODE, tp->mac_mode); 5851 udelay(40); 5852 5853 tg3_clear_mac_status(tp); 5854 5855 goto fiber_setup_done; 5856 } 5857 5858 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5859 tw32_f(MAC_MODE, tp->mac_mode); 5860 udelay(40); 5861 5862 tg3_clear_mac_status(tp); 5863 5864 if (force_reset) 5865 tg3_phy_reset(tp); 5866 5867 tp->link_config.rmt_adv = 0; 5868 5869 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5870 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5871 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5872 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5873 bmsr |= BMSR_LSTATUS; 5874 else 5875 bmsr &= ~BMSR_LSTATUS; 5876 } 5877 5878 err |= tg3_readphy(tp, MII_BMCR, &bmcr); 5879 5880 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && 5881 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 5882 /* do nothing, just check for link up at the end */ 5883 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5884 u32 adv, newadv; 5885 5886 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5887 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | 5888 ADVERTISE_1000XPAUSE | 5889 ADVERTISE_1000XPSE_ASYM | 5890 ADVERTISE_SLCT); 5891 5892 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5893 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); 5894 5895 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { 5896 tg3_writephy(tp, MII_ADVERTISE, newadv); 5897 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; 5898 tg3_writephy(tp, MII_BMCR, bmcr); 5899 5900 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5901 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; 5902 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5903 5904 return err; 5905 } 5906 } else { 5907 u32 new_bmcr; 5908 5909 bmcr &= ~BMCR_SPEED1000; 5910 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); 5911 5912 if (tp->link_config.duplex == DUPLEX_FULL) 5913 new_bmcr |= BMCR_FULLDPLX; 5914 5915 if (new_bmcr != bmcr) { 5916 /* BMCR_SPEED1000 is a reserved bit that needs 5917 * to be set on write. 5918 */ 5919 new_bmcr |= BMCR_SPEED1000; 5920 5921 /* Force a linkdown */ 5922 if (tp->link_up) { 5923 u32 adv; 5924 5925 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5926 adv &= ~(ADVERTISE_1000XFULL | 5927 ADVERTISE_1000XHALF | 5928 ADVERTISE_SLCT); 5929 tg3_writephy(tp, MII_ADVERTISE, adv); 5930 tg3_writephy(tp, MII_BMCR, bmcr | 5931 BMCR_ANRESTART | 5932 BMCR_ANENABLE); 5933 udelay(10); 5934 tg3_carrier_off(tp); 5935 } 5936 tg3_writephy(tp, MII_BMCR, new_bmcr); 5937 bmcr = new_bmcr; 5938 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5939 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5940 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5941 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5942 bmsr |= BMSR_LSTATUS; 5943 else 5944 bmsr &= ~BMSR_LSTATUS; 5945 } 5946 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5947 } 5948 } 5949 5950 if (bmsr & BMSR_LSTATUS) { 5951 current_speed = SPEED_1000; 5952 current_link_up = true; 5953 if (bmcr & BMCR_FULLDPLX) 5954 current_duplex = DUPLEX_FULL; 5955 else 5956 current_duplex = DUPLEX_HALF; 5957 5958 local_adv = 0; 5959 remote_adv = 0; 5960 5961 if (bmcr & BMCR_ANENABLE) { 5962 u32 common; 5963 5964 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); 5965 err |= tg3_readphy(tp, MII_LPA, &remote_adv); 5966 common = local_adv & remote_adv; 5967 if (common & (ADVERTISE_1000XHALF | 5968 ADVERTISE_1000XFULL)) { 5969 if (common & ADVERTISE_1000XFULL) 5970 current_duplex = DUPLEX_FULL; 5971 else 5972 current_duplex = DUPLEX_HALF; 5973 5974 tp->link_config.rmt_adv = 5975 mii_adv_to_ethtool_adv_x(remote_adv); 5976 } else if (!tg3_flag(tp, 5780_CLASS)) { 5977 /* Link is up via parallel detect */ 5978 } else { 5979 current_link_up = false; 5980 } 5981 } 5982 } 5983 5984 fiber_setup_done: 5985 if (current_link_up && current_duplex == DUPLEX_FULL) 5986 tg3_setup_flow_control(tp, local_adv, remote_adv); 5987 5988 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5989 if (tp->link_config.active_duplex == DUPLEX_HALF) 5990 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5991 5992 tw32_f(MAC_MODE, tp->mac_mode); 5993 udelay(40); 5994 5995 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5996 5997 tp->link_config.active_speed = current_speed; 5998 tp->link_config.active_duplex = current_duplex; 5999 6000 tg3_test_and_report_link_chg(tp, current_link_up); 6001 return err; 6002 } 6003 6004 static void tg3_serdes_parallel_detect(struct tg3 *tp) 6005 { 6006 if (tp->serdes_counter) { 6007 /* Give autoneg time to complete. */ 6008 tp->serdes_counter--; 6009 return; 6010 } 6011 6012 if (!tp->link_up && 6013 (tp->link_config.autoneg == AUTONEG_ENABLE)) { 6014 u32 bmcr; 6015 6016 tg3_readphy(tp, MII_BMCR, &bmcr); 6017 if (bmcr & BMCR_ANENABLE) { 6018 u32 phy1, phy2; 6019 6020 /* Select shadow register 0x1f */ 6021 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); 6022 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); 6023 6024 /* Select expansion interrupt status register */ 6025 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6026 MII_TG3_DSP_EXP1_INT_STAT); 6027 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6028 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6029 6030 if ((phy1 & 0x10) && !(phy2 & 0x20)) { 6031 /* We have signal detect and not receiving 6032 * config code words, link is up by parallel 6033 * detection. 6034 */ 6035 6036 bmcr &= ~BMCR_ANENABLE; 6037 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; 6038 tg3_writephy(tp, MII_BMCR, bmcr); 6039 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; 6040 } 6041 } 6042 } else if (tp->link_up && 6043 (tp->link_config.autoneg == AUTONEG_ENABLE) && 6044 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 6045 u32 phy2; 6046 6047 /* Select expansion interrupt status register */ 6048 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6049 MII_TG3_DSP_EXP1_INT_STAT); 6050 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6051 if (phy2 & 0x20) { 6052 u32 bmcr; 6053 6054 /* Config code words received, turn on autoneg. */ 6055 tg3_readphy(tp, MII_BMCR, &bmcr); 6056 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); 6057 6058 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 6059 6060 } 6061 } 6062 } 6063 6064 static int tg3_setup_phy(struct tg3 *tp, bool force_reset) 6065 { 6066 u32 val; 6067 int err; 6068 6069 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 6070 err = tg3_setup_fiber_phy(tp, force_reset); 6071 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 6072 err = tg3_setup_fiber_mii_phy(tp, force_reset); 6073 else 6074 err = tg3_setup_copper_phy(tp, force_reset); 6075 6076 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 6077 u32 scale; 6078 6079 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; 6080 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) 6081 scale = 65; 6082 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) 6083 scale = 6; 6084 else 6085 scale = 12; 6086 6087 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; 6088 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); 6089 tw32(GRC_MISC_CFG, val); 6090 } 6091 6092 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 6093 (6 << TX_LENGTHS_IPG_SHIFT); 6094 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 6095 tg3_asic_rev(tp) == ASIC_REV_5762) 6096 val |= tr32(MAC_TX_LENGTHS) & 6097 (TX_LENGTHS_JMB_FRM_LEN_MSK | 6098 TX_LENGTHS_CNT_DWN_VAL_MSK); 6099 6100 if (tp->link_config.active_speed == SPEED_1000 && 6101 tp->link_config.active_duplex == DUPLEX_HALF) 6102 tw32(MAC_TX_LENGTHS, val | 6103 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); 6104 else 6105 tw32(MAC_TX_LENGTHS, val | 6106 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); 6107 6108 if (!tg3_flag(tp, 5705_PLUS)) { 6109 if (tp->link_up) { 6110 tw32(HOSTCC_STAT_COAL_TICKS, 6111 tp->coal.stats_block_coalesce_usecs); 6112 } else { 6113 tw32(HOSTCC_STAT_COAL_TICKS, 0); 6114 } 6115 } 6116 6117 if (tg3_flag(tp, ASPM_WORKAROUND)) { 6118 val = tr32(PCIE_PWR_MGMT_THRESH); 6119 if (!tp->link_up) 6120 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | 6121 tp->pwrmgmt_thresh; 6122 else 6123 val |= PCIE_PWR_MGMT_L1_THRESH_MSK; 6124 tw32(PCIE_PWR_MGMT_THRESH, val); 6125 } 6126 6127 return err; 6128 } 6129 6130 /* tp->lock must be held */ 6131 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts) 6132 { 6133 u64 stamp; 6134 6135 ptp_read_system_prets(sts); 6136 stamp = tr32(TG3_EAV_REF_CLCK_LSB); 6137 ptp_read_system_postts(sts); 6138 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32; 6139 6140 return stamp; 6141 } 6142 6143 /* tp->lock must be held */ 6144 static void tg3_refclk_write(struct tg3 *tp, u64 newval) 6145 { 6146 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6147 6148 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP); 6149 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff); 6150 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32); 6151 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME); 6152 } 6153 6154 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); 6155 static inline void tg3_full_unlock(struct tg3 *tp); 6156 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) 6157 { 6158 struct tg3 *tp = netdev_priv(dev); 6159 6160 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 6161 SOF_TIMESTAMPING_RX_SOFTWARE | 6162 SOF_TIMESTAMPING_SOFTWARE; 6163 6164 if (tg3_flag(tp, PTP_CAPABLE)) { 6165 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 6166 SOF_TIMESTAMPING_RX_HARDWARE | 6167 SOF_TIMESTAMPING_RAW_HARDWARE; 6168 } 6169 6170 if (tp->ptp_clock) 6171 info->phc_index = ptp_clock_index(tp->ptp_clock); 6172 else 6173 info->phc_index = -1; 6174 6175 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 6176 6177 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 6178 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 6179 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 6180 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 6181 return 0; 6182 } 6183 6184 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 6185 { 6186 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6187 bool neg_adj = false; 6188 u32 correction = 0; 6189 6190 if (ppb < 0) { 6191 neg_adj = true; 6192 ppb = -ppb; 6193 } 6194 6195 /* Frequency adjustment is performed using hardware with a 24 bit 6196 * accumulator and a programmable correction value. On each clk, the 6197 * correction value gets added to the accumulator and when it 6198 * overflows, the time counter is incremented/decremented. 6199 * 6200 * So conversion from ppb to correction value is 6201 * ppb * (1 << 24) / 1000000000 6202 */ 6203 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) & 6204 TG3_EAV_REF_CLK_CORRECT_MASK; 6205 6206 tg3_full_lock(tp, 0); 6207 6208 if (correction) 6209 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 6210 TG3_EAV_REF_CLK_CORRECT_EN | 6211 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction); 6212 else 6213 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0); 6214 6215 tg3_full_unlock(tp); 6216 6217 return 0; 6218 } 6219 6220 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 6221 { 6222 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6223 6224 tg3_full_lock(tp, 0); 6225 tp->ptp_adjust += delta; 6226 tg3_full_unlock(tp); 6227 6228 return 0; 6229 } 6230 6231 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, 6232 struct ptp_system_timestamp *sts) 6233 { 6234 u64 ns; 6235 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6236 6237 tg3_full_lock(tp, 0); 6238 ns = tg3_refclk_read(tp, sts); 6239 ns += tp->ptp_adjust; 6240 tg3_full_unlock(tp); 6241 6242 *ts = ns_to_timespec64(ns); 6243 6244 return 0; 6245 } 6246 6247 static int tg3_ptp_settime(struct ptp_clock_info *ptp, 6248 const struct timespec64 *ts) 6249 { 6250 u64 ns; 6251 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6252 6253 ns = timespec64_to_ns(ts); 6254 6255 tg3_full_lock(tp, 0); 6256 tg3_refclk_write(tp, ns); 6257 tp->ptp_adjust = 0; 6258 tg3_full_unlock(tp); 6259 6260 return 0; 6261 } 6262 6263 static int tg3_ptp_enable(struct ptp_clock_info *ptp, 6264 struct ptp_clock_request *rq, int on) 6265 { 6266 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6267 u32 clock_ctl; 6268 int rval = 0; 6269 6270 switch (rq->type) { 6271 case PTP_CLK_REQ_PEROUT: 6272 /* Reject requests with unsupported flags */ 6273 if (rq->perout.flags) 6274 return -EOPNOTSUPP; 6275 6276 if (rq->perout.index != 0) 6277 return -EINVAL; 6278 6279 tg3_full_lock(tp, 0); 6280 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6281 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK; 6282 6283 if (on) { 6284 u64 nsec; 6285 6286 nsec = rq->perout.start.sec * 1000000000ULL + 6287 rq->perout.start.nsec; 6288 6289 if (rq->perout.period.sec || rq->perout.period.nsec) { 6290 netdev_warn(tp->dev, 6291 "Device supports only a one-shot timesync output, period must be 0\n"); 6292 rval = -EINVAL; 6293 goto err_out; 6294 } 6295 6296 if (nsec & (1ULL << 63)) { 6297 netdev_warn(tp->dev, 6298 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n"); 6299 rval = -EINVAL; 6300 goto err_out; 6301 } 6302 6303 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff)); 6304 tw32(TG3_EAV_WATCHDOG0_MSB, 6305 TG3_EAV_WATCHDOG0_EN | 6306 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK)); 6307 6308 tw32(TG3_EAV_REF_CLCK_CTL, 6309 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0); 6310 } else { 6311 tw32(TG3_EAV_WATCHDOG0_MSB, 0); 6312 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl); 6313 } 6314 6315 err_out: 6316 tg3_full_unlock(tp); 6317 return rval; 6318 6319 default: 6320 break; 6321 } 6322 6323 return -EOPNOTSUPP; 6324 } 6325 6326 static const struct ptp_clock_info tg3_ptp_caps = { 6327 .owner = THIS_MODULE, 6328 .name = "tg3 clock", 6329 .max_adj = 250000000, 6330 .n_alarm = 0, 6331 .n_ext_ts = 0, 6332 .n_per_out = 1, 6333 .n_pins = 0, 6334 .pps = 0, 6335 .adjfreq = tg3_ptp_adjfreq, 6336 .adjtime = tg3_ptp_adjtime, 6337 .gettimex64 = tg3_ptp_gettimex, 6338 .settime64 = tg3_ptp_settime, 6339 .enable = tg3_ptp_enable, 6340 }; 6341 6342 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, 6343 struct skb_shared_hwtstamps *timestamp) 6344 { 6345 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); 6346 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + 6347 tp->ptp_adjust); 6348 } 6349 6350 /* tp->lock must be held */ 6351 static void tg3_ptp_init(struct tg3 *tp) 6352 { 6353 if (!tg3_flag(tp, PTP_CAPABLE)) 6354 return; 6355 6356 /* Initialize the hardware clock to the system time. */ 6357 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real())); 6358 tp->ptp_adjust = 0; 6359 tp->ptp_info = tg3_ptp_caps; 6360 } 6361 6362 /* tp->lock must be held */ 6363 static void tg3_ptp_resume(struct tg3 *tp) 6364 { 6365 if (!tg3_flag(tp, PTP_CAPABLE)) 6366 return; 6367 6368 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust); 6369 tp->ptp_adjust = 0; 6370 } 6371 6372 static void tg3_ptp_fini(struct tg3 *tp) 6373 { 6374 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock) 6375 return; 6376 6377 ptp_clock_unregister(tp->ptp_clock); 6378 tp->ptp_clock = NULL; 6379 tp->ptp_adjust = 0; 6380 } 6381 6382 static inline int tg3_irq_sync(struct tg3 *tp) 6383 { 6384 return tp->irq_sync; 6385 } 6386 6387 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) 6388 { 6389 int i; 6390 6391 dst = (u32 *)((u8 *)dst + off); 6392 for (i = 0; i < len; i += sizeof(u32)) 6393 *dst++ = tr32(off + i); 6394 } 6395 6396 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) 6397 { 6398 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); 6399 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); 6400 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); 6401 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); 6402 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); 6403 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); 6404 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); 6405 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); 6406 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); 6407 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); 6408 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); 6409 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); 6410 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); 6411 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); 6412 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); 6413 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); 6414 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); 6415 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); 6416 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); 6417 6418 if (tg3_flag(tp, SUPPORT_MSIX)) 6419 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); 6420 6421 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); 6422 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); 6423 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); 6424 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); 6425 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); 6426 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); 6427 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); 6428 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); 6429 6430 if (!tg3_flag(tp, 5705_PLUS)) { 6431 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); 6432 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); 6433 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); 6434 } 6435 6436 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); 6437 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); 6438 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); 6439 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); 6440 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); 6441 6442 if (tg3_flag(tp, NVRAM)) 6443 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); 6444 } 6445 6446 static void tg3_dump_state(struct tg3 *tp) 6447 { 6448 int i; 6449 u32 *regs; 6450 6451 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); 6452 if (!regs) 6453 return; 6454 6455 if (tg3_flag(tp, PCI_EXPRESS)) { 6456 /* Read up to but not including private PCI registers */ 6457 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) 6458 regs[i / sizeof(u32)] = tr32(i); 6459 } else 6460 tg3_dump_legacy_regs(tp, regs); 6461 6462 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { 6463 if (!regs[i + 0] && !regs[i + 1] && 6464 !regs[i + 2] && !regs[i + 3]) 6465 continue; 6466 6467 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", 6468 i * 4, 6469 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); 6470 } 6471 6472 kfree(regs); 6473 6474 for (i = 0; i < tp->irq_cnt; i++) { 6475 struct tg3_napi *tnapi = &tp->napi[i]; 6476 6477 /* SW status block */ 6478 netdev_err(tp->dev, 6479 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", 6480 i, 6481 tnapi->hw_status->status, 6482 tnapi->hw_status->status_tag, 6483 tnapi->hw_status->rx_jumbo_consumer, 6484 tnapi->hw_status->rx_consumer, 6485 tnapi->hw_status->rx_mini_consumer, 6486 tnapi->hw_status->idx[0].rx_producer, 6487 tnapi->hw_status->idx[0].tx_consumer); 6488 6489 netdev_err(tp->dev, 6490 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", 6491 i, 6492 tnapi->last_tag, tnapi->last_irq_tag, 6493 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, 6494 tnapi->rx_rcb_ptr, 6495 tnapi->prodring.rx_std_prod_idx, 6496 tnapi->prodring.rx_std_cons_idx, 6497 tnapi->prodring.rx_jmb_prod_idx, 6498 tnapi->prodring.rx_jmb_cons_idx); 6499 } 6500 } 6501 6502 /* This is called whenever we suspect that the system chipset is re- 6503 * ordering the sequence of MMIO to the tx send mailbox. The symptom 6504 * is bogus tx completions. We try to recover by setting the 6505 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later 6506 * in the workqueue. 6507 */ 6508 static void tg3_tx_recover(struct tg3 *tp) 6509 { 6510 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || 6511 tp->write32_tx_mbox == tg3_write_indirect_mbox); 6512 6513 netdev_warn(tp->dev, 6514 "The system may be re-ordering memory-mapped I/O " 6515 "cycles to the network device, attempting to recover. " 6516 "Please report the problem to the driver maintainer " 6517 "and include system chipset information.\n"); 6518 6519 tg3_flag_set(tp, TX_RECOVERY_PENDING); 6520 } 6521 6522 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) 6523 { 6524 /* Tell compiler to fetch tx indices from memory. */ 6525 barrier(); 6526 return tnapi->tx_pending - 6527 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); 6528 } 6529 6530 /* Tigon3 never reports partial packet sends. So we do not 6531 * need special logic to handle SKBs that have not had all 6532 * of their frags sent yet, like SunGEM does. 6533 */ 6534 static void tg3_tx(struct tg3_napi *tnapi) 6535 { 6536 struct tg3 *tp = tnapi->tp; 6537 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; 6538 u32 sw_idx = tnapi->tx_cons; 6539 struct netdev_queue *txq; 6540 int index = tnapi - tp->napi; 6541 unsigned int pkts_compl = 0, bytes_compl = 0; 6542 6543 if (tg3_flag(tp, ENABLE_TSS)) 6544 index--; 6545 6546 txq = netdev_get_tx_queue(tp->dev, index); 6547 6548 while (sw_idx != hw_idx) { 6549 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; 6550 struct sk_buff *skb = ri->skb; 6551 int i, tx_bug = 0; 6552 6553 if (unlikely(skb == NULL)) { 6554 tg3_tx_recover(tp); 6555 return; 6556 } 6557 6558 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) { 6559 struct skb_shared_hwtstamps timestamp; 6560 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB); 6561 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; 6562 6563 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); 6564 6565 skb_tstamp_tx(skb, ×tamp); 6566 } 6567 6568 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), 6569 skb_headlen(skb), DMA_TO_DEVICE); 6570 6571 ri->skb = NULL; 6572 6573 while (ri->fragmented) { 6574 ri->fragmented = false; 6575 sw_idx = NEXT_TX(sw_idx); 6576 ri = &tnapi->tx_buffers[sw_idx]; 6577 } 6578 6579 sw_idx = NEXT_TX(sw_idx); 6580 6581 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 6582 ri = &tnapi->tx_buffers[sw_idx]; 6583 if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) 6584 tx_bug = 1; 6585 6586 dma_unmap_page(&tp->pdev->dev, 6587 dma_unmap_addr(ri, mapping), 6588 skb_frag_size(&skb_shinfo(skb)->frags[i]), 6589 DMA_TO_DEVICE); 6590 6591 while (ri->fragmented) { 6592 ri->fragmented = false; 6593 sw_idx = NEXT_TX(sw_idx); 6594 ri = &tnapi->tx_buffers[sw_idx]; 6595 } 6596 6597 sw_idx = NEXT_TX(sw_idx); 6598 } 6599 6600 pkts_compl++; 6601 bytes_compl += skb->len; 6602 6603 dev_consume_skb_any(skb); 6604 6605 if (unlikely(tx_bug)) { 6606 tg3_tx_recover(tp); 6607 return; 6608 } 6609 } 6610 6611 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); 6612 6613 tnapi->tx_cons = sw_idx; 6614 6615 /* Need to make the tx_cons update visible to tg3_start_xmit() 6616 * before checking for netif_queue_stopped(). Without the 6617 * memory barrier, there is a small possibility that tg3_start_xmit() 6618 * will miss it and cause the queue to be stopped forever. 6619 */ 6620 smp_mb(); 6621 6622 if (unlikely(netif_tx_queue_stopped(txq) && 6623 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { 6624 __netif_tx_lock(txq, smp_processor_id()); 6625 if (netif_tx_queue_stopped(txq) && 6626 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) 6627 netif_tx_wake_queue(txq); 6628 __netif_tx_unlock(txq); 6629 } 6630 } 6631 6632 static void tg3_frag_free(bool is_frag, void *data) 6633 { 6634 if (is_frag) 6635 skb_free_frag(data); 6636 else 6637 kfree(data); 6638 } 6639 6640 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) 6641 { 6642 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) + 6643 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6644 6645 if (!ri->data) 6646 return; 6647 6648 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz, 6649 DMA_FROM_DEVICE); 6650 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data); 6651 ri->data = NULL; 6652 } 6653 6654 6655 /* Returns size of skb allocated or < 0 on error. 6656 * 6657 * We only need to fill in the address because the other members 6658 * of the RX descriptor are invariant, see tg3_init_rings. 6659 * 6660 * Note the purposeful assymetry of cpu vs. chip accesses. For 6661 * posting buffers we only dirty the first cache line of the RX 6662 * descriptor (containing the address). Whereas for the RX status 6663 * buffers the cpu only reads the last cacheline of the RX descriptor 6664 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 6665 */ 6666 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, 6667 u32 opaque_key, u32 dest_idx_unmasked, 6668 unsigned int *frag_size) 6669 { 6670 struct tg3_rx_buffer_desc *desc; 6671 struct ring_info *map; 6672 u8 *data; 6673 dma_addr_t mapping; 6674 int skb_size, data_size, dest_idx; 6675 6676 switch (opaque_key) { 6677 case RXD_OPAQUE_RING_STD: 6678 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6679 desc = &tpr->rx_std[dest_idx]; 6680 map = &tpr->rx_std_buffers[dest_idx]; 6681 data_size = tp->rx_pkt_map_sz; 6682 break; 6683 6684 case RXD_OPAQUE_RING_JUMBO: 6685 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6686 desc = &tpr->rx_jmb[dest_idx].std; 6687 map = &tpr->rx_jmb_buffers[dest_idx]; 6688 data_size = TG3_RX_JMB_MAP_SZ; 6689 break; 6690 6691 default: 6692 return -EINVAL; 6693 } 6694 6695 /* Do not overwrite any of the map or rp information 6696 * until we are sure we can commit to a new buffer. 6697 * 6698 * Callers depend upon this behavior and assume that 6699 * we leave everything unchanged if we fail. 6700 */ 6701 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + 6702 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6703 if (skb_size <= PAGE_SIZE) { 6704 data = napi_alloc_frag(skb_size); 6705 *frag_size = skb_size; 6706 } else { 6707 data = kmalloc(skb_size, GFP_ATOMIC); 6708 *frag_size = 0; 6709 } 6710 if (!data) 6711 return -ENOMEM; 6712 6713 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp), 6714 data_size, DMA_FROM_DEVICE); 6715 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) { 6716 tg3_frag_free(skb_size <= PAGE_SIZE, data); 6717 return -EIO; 6718 } 6719 6720 map->data = data; 6721 dma_unmap_addr_set(map, mapping, mapping); 6722 6723 desc->addr_hi = ((u64)mapping >> 32); 6724 desc->addr_lo = ((u64)mapping & 0xffffffff); 6725 6726 return data_size; 6727 } 6728 6729 /* We only need to move over in the address because the other 6730 * members of the RX descriptor are invariant. See notes above 6731 * tg3_alloc_rx_data for full details. 6732 */ 6733 static void tg3_recycle_rx(struct tg3_napi *tnapi, 6734 struct tg3_rx_prodring_set *dpr, 6735 u32 opaque_key, int src_idx, 6736 u32 dest_idx_unmasked) 6737 { 6738 struct tg3 *tp = tnapi->tp; 6739 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 6740 struct ring_info *src_map, *dest_map; 6741 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; 6742 int dest_idx; 6743 6744 switch (opaque_key) { 6745 case RXD_OPAQUE_RING_STD: 6746 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6747 dest_desc = &dpr->rx_std[dest_idx]; 6748 dest_map = &dpr->rx_std_buffers[dest_idx]; 6749 src_desc = &spr->rx_std[src_idx]; 6750 src_map = &spr->rx_std_buffers[src_idx]; 6751 break; 6752 6753 case RXD_OPAQUE_RING_JUMBO: 6754 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6755 dest_desc = &dpr->rx_jmb[dest_idx].std; 6756 dest_map = &dpr->rx_jmb_buffers[dest_idx]; 6757 src_desc = &spr->rx_jmb[src_idx].std; 6758 src_map = &spr->rx_jmb_buffers[src_idx]; 6759 break; 6760 6761 default: 6762 return; 6763 } 6764 6765 dest_map->data = src_map->data; 6766 dma_unmap_addr_set(dest_map, mapping, 6767 dma_unmap_addr(src_map, mapping)); 6768 dest_desc->addr_hi = src_desc->addr_hi; 6769 dest_desc->addr_lo = src_desc->addr_lo; 6770 6771 /* Ensure that the update to the skb happens after the physical 6772 * addresses have been transferred to the new BD location. 6773 */ 6774 smp_wmb(); 6775 6776 src_map->data = NULL; 6777 } 6778 6779 /* The RX ring scheme is composed of multiple rings which post fresh 6780 * buffers to the chip, and one special ring the chip uses to report 6781 * status back to the host. 6782 * 6783 * The special ring reports the status of received packets to the 6784 * host. The chip does not write into the original descriptor the 6785 * RX buffer was obtained from. The chip simply takes the original 6786 * descriptor as provided by the host, updates the status and length 6787 * field, then writes this into the next status ring entry. 6788 * 6789 * Each ring the host uses to post buffers to the chip is described 6790 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, 6791 * it is first placed into the on-chip ram. When the packet's length 6792 * is known, it walks down the TG3_BDINFO entries to select the ring. 6793 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO 6794 * which is within the range of the new packet's length is chosen. 6795 * 6796 * The "separate ring for rx status" scheme may sound queer, but it makes 6797 * sense from a cache coherency perspective. If only the host writes 6798 * to the buffer post rings, and only the chip writes to the rx status 6799 * rings, then cache lines never move beyond shared-modified state. 6800 * If both the host and chip were to write into the same ring, cache line 6801 * eviction could occur since both entities want it in an exclusive state. 6802 */ 6803 static int tg3_rx(struct tg3_napi *tnapi, int budget) 6804 { 6805 struct tg3 *tp = tnapi->tp; 6806 u32 work_mask, rx_std_posted = 0; 6807 u32 std_prod_idx, jmb_prod_idx; 6808 u32 sw_idx = tnapi->rx_rcb_ptr; 6809 u16 hw_idx; 6810 int received; 6811 struct tg3_rx_prodring_set *tpr = &tnapi->prodring; 6812 6813 hw_idx = *(tnapi->rx_rcb_prod_idx); 6814 /* 6815 * We need to order the read of hw_idx and the read of 6816 * the opaque cookie. 6817 */ 6818 rmb(); 6819 work_mask = 0; 6820 received = 0; 6821 std_prod_idx = tpr->rx_std_prod_idx; 6822 jmb_prod_idx = tpr->rx_jmb_prod_idx; 6823 while (sw_idx != hw_idx && budget > 0) { 6824 struct ring_info *ri; 6825 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; 6826 unsigned int len; 6827 struct sk_buff *skb; 6828 dma_addr_t dma_addr; 6829 u32 opaque_key, desc_idx, *post_ptr; 6830 u8 *data; 6831 u64 tstamp = 0; 6832 6833 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 6834 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 6835 if (opaque_key == RXD_OPAQUE_RING_STD) { 6836 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; 6837 dma_addr = dma_unmap_addr(ri, mapping); 6838 data = ri->data; 6839 post_ptr = &std_prod_idx; 6840 rx_std_posted++; 6841 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 6842 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; 6843 dma_addr = dma_unmap_addr(ri, mapping); 6844 data = ri->data; 6845 post_ptr = &jmb_prod_idx; 6846 } else 6847 goto next_pkt_nopost; 6848 6849 work_mask |= opaque_key; 6850 6851 if (desc->err_vlan & RXD_ERR_MASK) { 6852 drop_it: 6853 tg3_recycle_rx(tnapi, tpr, opaque_key, 6854 desc_idx, *post_ptr); 6855 drop_it_no_recycle: 6856 /* Other statistics kept track of by card. */ 6857 tp->rx_dropped++; 6858 goto next_pkt; 6859 } 6860 6861 prefetch(data + TG3_RX_OFFSET(tp)); 6862 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 6863 ETH_FCS_LEN; 6864 6865 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6866 RXD_FLAG_PTPSTAT_PTPV1 || 6867 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6868 RXD_FLAG_PTPSTAT_PTPV2) { 6869 tstamp = tr32(TG3_RX_TSTAMP_LSB); 6870 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32; 6871 } 6872 6873 if (len > TG3_RX_COPY_THRESH(tp)) { 6874 int skb_size; 6875 unsigned int frag_size; 6876 6877 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, 6878 *post_ptr, &frag_size); 6879 if (skb_size < 0) 6880 goto drop_it; 6881 6882 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size, 6883 DMA_FROM_DEVICE); 6884 6885 /* Ensure that the update to the data happens 6886 * after the usage of the old DMA mapping. 6887 */ 6888 smp_wmb(); 6889 6890 ri->data = NULL; 6891 6892 skb = build_skb(data, frag_size); 6893 if (!skb) { 6894 tg3_frag_free(frag_size != 0, data); 6895 goto drop_it_no_recycle; 6896 } 6897 skb_reserve(skb, TG3_RX_OFFSET(tp)); 6898 } else { 6899 tg3_recycle_rx(tnapi, tpr, opaque_key, 6900 desc_idx, *post_ptr); 6901 6902 skb = netdev_alloc_skb(tp->dev, 6903 len + TG3_RAW_IP_ALIGN); 6904 if (skb == NULL) 6905 goto drop_it_no_recycle; 6906 6907 skb_reserve(skb, TG3_RAW_IP_ALIGN); 6908 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len, 6909 DMA_FROM_DEVICE); 6910 memcpy(skb->data, 6911 data + TG3_RX_OFFSET(tp), 6912 len); 6913 dma_sync_single_for_device(&tp->pdev->dev, dma_addr, 6914 len, DMA_FROM_DEVICE); 6915 } 6916 6917 skb_put(skb, len); 6918 if (tstamp) 6919 tg3_hwclock_to_timestamp(tp, tstamp, 6920 skb_hwtstamps(skb)); 6921 6922 if ((tp->dev->features & NETIF_F_RXCSUM) && 6923 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 6924 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 6925 >> RXD_TCPCSUM_SHIFT) == 0xffff)) 6926 skb->ip_summed = CHECKSUM_UNNECESSARY; 6927 else 6928 skb_checksum_none_assert(skb); 6929 6930 skb->protocol = eth_type_trans(skb, tp->dev); 6931 6932 if (len > (tp->dev->mtu + ETH_HLEN) && 6933 skb->protocol != htons(ETH_P_8021Q) && 6934 skb->protocol != htons(ETH_P_8021AD)) { 6935 dev_kfree_skb_any(skb); 6936 goto drop_it_no_recycle; 6937 } 6938 6939 if (desc->type_flags & RXD_FLAG_VLAN && 6940 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) 6941 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 6942 desc->err_vlan & RXD_VLAN_MASK); 6943 6944 napi_gro_receive(&tnapi->napi, skb); 6945 6946 received++; 6947 budget--; 6948 6949 next_pkt: 6950 (*post_ptr)++; 6951 6952 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 6953 tpr->rx_std_prod_idx = std_prod_idx & 6954 tp->rx_std_ring_mask; 6955 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 6956 tpr->rx_std_prod_idx); 6957 work_mask &= ~RXD_OPAQUE_RING_STD; 6958 rx_std_posted = 0; 6959 } 6960 next_pkt_nopost: 6961 sw_idx++; 6962 sw_idx &= tp->rx_ret_ring_mask; 6963 6964 /* Refresh hw_idx to see if there is new work */ 6965 if (sw_idx == hw_idx) { 6966 hw_idx = *(tnapi->rx_rcb_prod_idx); 6967 rmb(); 6968 } 6969 } 6970 6971 /* ACK the status ring. */ 6972 tnapi->rx_rcb_ptr = sw_idx; 6973 tw32_rx_mbox(tnapi->consmbox, sw_idx); 6974 6975 /* Refill RX ring(s). */ 6976 if (!tg3_flag(tp, ENABLE_RSS)) { 6977 /* Sync BD data before updating mailbox */ 6978 wmb(); 6979 6980 if (work_mask & RXD_OPAQUE_RING_STD) { 6981 tpr->rx_std_prod_idx = std_prod_idx & 6982 tp->rx_std_ring_mask; 6983 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 6984 tpr->rx_std_prod_idx); 6985 } 6986 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 6987 tpr->rx_jmb_prod_idx = jmb_prod_idx & 6988 tp->rx_jmb_ring_mask; 6989 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 6990 tpr->rx_jmb_prod_idx); 6991 } 6992 } else if (work_mask) { 6993 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be 6994 * updated before the producer indices can be updated. 6995 */ 6996 smp_wmb(); 6997 6998 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; 6999 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; 7000 7001 if (tnapi != &tp->napi[1]) { 7002 tp->rx_refill = true; 7003 napi_schedule(&tp->napi[1].napi); 7004 } 7005 } 7006 7007 return received; 7008 } 7009 7010 static void tg3_poll_link(struct tg3 *tp) 7011 { 7012 /* handle link change and other phy events */ 7013 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 7014 struct tg3_hw_status *sblk = tp->napi[0].hw_status; 7015 7016 if (sblk->status & SD_STATUS_LINK_CHG) { 7017 sblk->status = SD_STATUS_UPDATED | 7018 (sblk->status & ~SD_STATUS_LINK_CHG); 7019 spin_lock(&tp->lock); 7020 if (tg3_flag(tp, USE_PHYLIB)) { 7021 tw32_f(MAC_STATUS, 7022 (MAC_STATUS_SYNC_CHANGED | 7023 MAC_STATUS_CFG_CHANGED | 7024 MAC_STATUS_MI_COMPLETION | 7025 MAC_STATUS_LNKSTATE_CHANGED)); 7026 udelay(40); 7027 } else 7028 tg3_setup_phy(tp, false); 7029 spin_unlock(&tp->lock); 7030 } 7031 } 7032 } 7033 7034 static int tg3_rx_prodring_xfer(struct tg3 *tp, 7035 struct tg3_rx_prodring_set *dpr, 7036 struct tg3_rx_prodring_set *spr) 7037 { 7038 u32 si, di, cpycnt, src_prod_idx; 7039 int i, err = 0; 7040 7041 while (1) { 7042 src_prod_idx = spr->rx_std_prod_idx; 7043 7044 /* Make sure updates to the rx_std_buffers[] entries and the 7045 * standard producer index are seen in the correct order. 7046 */ 7047 smp_rmb(); 7048 7049 if (spr->rx_std_cons_idx == src_prod_idx) 7050 break; 7051 7052 if (spr->rx_std_cons_idx < src_prod_idx) 7053 cpycnt = src_prod_idx - spr->rx_std_cons_idx; 7054 else 7055 cpycnt = tp->rx_std_ring_mask + 1 - 7056 spr->rx_std_cons_idx; 7057 7058 cpycnt = min(cpycnt, 7059 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); 7060 7061 si = spr->rx_std_cons_idx; 7062 di = dpr->rx_std_prod_idx; 7063 7064 for (i = di; i < di + cpycnt; i++) { 7065 if (dpr->rx_std_buffers[i].data) { 7066 cpycnt = i - di; 7067 err = -ENOSPC; 7068 break; 7069 } 7070 } 7071 7072 if (!cpycnt) 7073 break; 7074 7075 /* Ensure that updates to the rx_std_buffers ring and the 7076 * shadowed hardware producer ring from tg3_recycle_skb() are 7077 * ordered correctly WRT the skb check above. 7078 */ 7079 smp_rmb(); 7080 7081 memcpy(&dpr->rx_std_buffers[di], 7082 &spr->rx_std_buffers[si], 7083 cpycnt * sizeof(struct ring_info)); 7084 7085 for (i = 0; i < cpycnt; i++, di++, si++) { 7086 struct tg3_rx_buffer_desc *sbd, *dbd; 7087 sbd = &spr->rx_std[si]; 7088 dbd = &dpr->rx_std[di]; 7089 dbd->addr_hi = sbd->addr_hi; 7090 dbd->addr_lo = sbd->addr_lo; 7091 } 7092 7093 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & 7094 tp->rx_std_ring_mask; 7095 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & 7096 tp->rx_std_ring_mask; 7097 } 7098 7099 while (1) { 7100 src_prod_idx = spr->rx_jmb_prod_idx; 7101 7102 /* Make sure updates to the rx_jmb_buffers[] entries and 7103 * the jumbo producer index are seen in the correct order. 7104 */ 7105 smp_rmb(); 7106 7107 if (spr->rx_jmb_cons_idx == src_prod_idx) 7108 break; 7109 7110 if (spr->rx_jmb_cons_idx < src_prod_idx) 7111 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; 7112 else 7113 cpycnt = tp->rx_jmb_ring_mask + 1 - 7114 spr->rx_jmb_cons_idx; 7115 7116 cpycnt = min(cpycnt, 7117 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); 7118 7119 si = spr->rx_jmb_cons_idx; 7120 di = dpr->rx_jmb_prod_idx; 7121 7122 for (i = di; i < di + cpycnt; i++) { 7123 if (dpr->rx_jmb_buffers[i].data) { 7124 cpycnt = i - di; 7125 err = -ENOSPC; 7126 break; 7127 } 7128 } 7129 7130 if (!cpycnt) 7131 break; 7132 7133 /* Ensure that updates to the rx_jmb_buffers ring and the 7134 * shadowed hardware producer ring from tg3_recycle_skb() are 7135 * ordered correctly WRT the skb check above. 7136 */ 7137 smp_rmb(); 7138 7139 memcpy(&dpr->rx_jmb_buffers[di], 7140 &spr->rx_jmb_buffers[si], 7141 cpycnt * sizeof(struct ring_info)); 7142 7143 for (i = 0; i < cpycnt; i++, di++, si++) { 7144 struct tg3_rx_buffer_desc *sbd, *dbd; 7145 sbd = &spr->rx_jmb[si].std; 7146 dbd = &dpr->rx_jmb[di].std; 7147 dbd->addr_hi = sbd->addr_hi; 7148 dbd->addr_lo = sbd->addr_lo; 7149 } 7150 7151 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & 7152 tp->rx_jmb_ring_mask; 7153 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & 7154 tp->rx_jmb_ring_mask; 7155 } 7156 7157 return err; 7158 } 7159 7160 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) 7161 { 7162 struct tg3 *tp = tnapi->tp; 7163 7164 /* run TX completion thread */ 7165 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { 7166 tg3_tx(tnapi); 7167 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7168 return work_done; 7169 } 7170 7171 if (!tnapi->rx_rcb_prod_idx) 7172 return work_done; 7173 7174 /* run RX thread, within the bounds set by NAPI. 7175 * All RX "locking" is done by ensuring outside 7176 * code synchronizes with tg3->napi.poll() 7177 */ 7178 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 7179 work_done += tg3_rx(tnapi, budget - work_done); 7180 7181 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { 7182 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; 7183 int i, err = 0; 7184 u32 std_prod_idx = dpr->rx_std_prod_idx; 7185 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; 7186 7187 tp->rx_refill = false; 7188 for (i = 1; i <= tp->rxq_cnt; i++) 7189 err |= tg3_rx_prodring_xfer(tp, dpr, 7190 &tp->napi[i].prodring); 7191 7192 wmb(); 7193 7194 if (std_prod_idx != dpr->rx_std_prod_idx) 7195 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 7196 dpr->rx_std_prod_idx); 7197 7198 if (jmb_prod_idx != dpr->rx_jmb_prod_idx) 7199 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 7200 dpr->rx_jmb_prod_idx); 7201 7202 if (err) 7203 tw32_f(HOSTCC_MODE, tp->coal_now); 7204 } 7205 7206 return work_done; 7207 } 7208 7209 static inline void tg3_reset_task_schedule(struct tg3 *tp) 7210 { 7211 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) 7212 schedule_work(&tp->reset_task); 7213 } 7214 7215 static inline void tg3_reset_task_cancel(struct tg3 *tp) 7216 { 7217 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) 7218 cancel_work_sync(&tp->reset_task); 7219 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 7220 } 7221 7222 static int tg3_poll_msix(struct napi_struct *napi, int budget) 7223 { 7224 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7225 struct tg3 *tp = tnapi->tp; 7226 int work_done = 0; 7227 struct tg3_hw_status *sblk = tnapi->hw_status; 7228 7229 while (1) { 7230 work_done = tg3_poll_work(tnapi, work_done, budget); 7231 7232 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7233 goto tx_recovery; 7234 7235 if (unlikely(work_done >= budget)) 7236 break; 7237 7238 /* tp->last_tag is used in tg3_int_reenable() below 7239 * to tell the hw how much work has been processed, 7240 * so we must read it before checking for more work. 7241 */ 7242 tnapi->last_tag = sblk->status_tag; 7243 tnapi->last_irq_tag = tnapi->last_tag; 7244 rmb(); 7245 7246 /* check for RX/TX work to do */ 7247 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && 7248 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { 7249 7250 /* This test here is not race free, but will reduce 7251 * the number of interrupts by looping again. 7252 */ 7253 if (tnapi == &tp->napi[1] && tp->rx_refill) 7254 continue; 7255 7256 napi_complete_done(napi, work_done); 7257 /* Reenable interrupts. */ 7258 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 7259 7260 /* This test here is synchronized by napi_schedule() 7261 * and napi_complete() to close the race condition. 7262 */ 7263 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) { 7264 tw32(HOSTCC_MODE, tp->coalesce_mode | 7265 HOSTCC_MODE_ENABLE | 7266 tnapi->coal_now); 7267 } 7268 break; 7269 } 7270 } 7271 7272 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); 7273 return work_done; 7274 7275 tx_recovery: 7276 /* work_done is guaranteed to be less than budget. */ 7277 napi_complete(napi); 7278 tg3_reset_task_schedule(tp); 7279 return work_done; 7280 } 7281 7282 static void tg3_process_error(struct tg3 *tp) 7283 { 7284 u32 val; 7285 bool real_error = false; 7286 7287 if (tg3_flag(tp, ERROR_PROCESSED)) 7288 return; 7289 7290 /* Check Flow Attention register */ 7291 val = tr32(HOSTCC_FLOW_ATTN); 7292 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { 7293 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); 7294 real_error = true; 7295 } 7296 7297 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { 7298 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); 7299 real_error = true; 7300 } 7301 7302 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { 7303 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); 7304 real_error = true; 7305 } 7306 7307 if (!real_error) 7308 return; 7309 7310 tg3_dump_state(tp); 7311 7312 tg3_flag_set(tp, ERROR_PROCESSED); 7313 tg3_reset_task_schedule(tp); 7314 } 7315 7316 static int tg3_poll(struct napi_struct *napi, int budget) 7317 { 7318 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7319 struct tg3 *tp = tnapi->tp; 7320 int work_done = 0; 7321 struct tg3_hw_status *sblk = tnapi->hw_status; 7322 7323 while (1) { 7324 if (sblk->status & SD_STATUS_ERROR) 7325 tg3_process_error(tp); 7326 7327 tg3_poll_link(tp); 7328 7329 work_done = tg3_poll_work(tnapi, work_done, budget); 7330 7331 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7332 goto tx_recovery; 7333 7334 if (unlikely(work_done >= budget)) 7335 break; 7336 7337 if (tg3_flag(tp, TAGGED_STATUS)) { 7338 /* tp->last_tag is used in tg3_int_reenable() below 7339 * to tell the hw how much work has been processed, 7340 * so we must read it before checking for more work. 7341 */ 7342 tnapi->last_tag = sblk->status_tag; 7343 tnapi->last_irq_tag = tnapi->last_tag; 7344 rmb(); 7345 } else 7346 sblk->status &= ~SD_STATUS_UPDATED; 7347 7348 if (likely(!tg3_has_work(tnapi))) { 7349 napi_complete_done(napi, work_done); 7350 tg3_int_reenable(tnapi); 7351 break; 7352 } 7353 } 7354 7355 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); 7356 return work_done; 7357 7358 tx_recovery: 7359 /* work_done is guaranteed to be less than budget. */ 7360 napi_complete(napi); 7361 tg3_reset_task_schedule(tp); 7362 return work_done; 7363 } 7364 7365 static void tg3_napi_disable(struct tg3 *tp) 7366 { 7367 int i; 7368 7369 for (i = tp->irq_cnt - 1; i >= 0; i--) 7370 napi_disable(&tp->napi[i].napi); 7371 } 7372 7373 static void tg3_napi_enable(struct tg3 *tp) 7374 { 7375 int i; 7376 7377 for (i = 0; i < tp->irq_cnt; i++) 7378 napi_enable(&tp->napi[i].napi); 7379 } 7380 7381 static void tg3_napi_init(struct tg3 *tp) 7382 { 7383 int i; 7384 7385 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64); 7386 for (i = 1; i < tp->irq_cnt; i++) 7387 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64); 7388 } 7389 7390 static void tg3_napi_fini(struct tg3 *tp) 7391 { 7392 int i; 7393 7394 for (i = 0; i < tp->irq_cnt; i++) 7395 netif_napi_del(&tp->napi[i].napi); 7396 } 7397 7398 static inline void tg3_netif_stop(struct tg3 *tp) 7399 { 7400 netif_trans_update(tp->dev); /* prevent tx timeout */ 7401 tg3_napi_disable(tp); 7402 netif_carrier_off(tp->dev); 7403 netif_tx_disable(tp->dev); 7404 } 7405 7406 /* tp->lock must be held */ 7407 static inline void tg3_netif_start(struct tg3 *tp) 7408 { 7409 tg3_ptp_resume(tp); 7410 7411 /* NOTE: unconditional netif_tx_wake_all_queues is only 7412 * appropriate so long as all callers are assured to 7413 * have free tx slots (such as after tg3_init_hw) 7414 */ 7415 netif_tx_wake_all_queues(tp->dev); 7416 7417 if (tp->link_up) 7418 netif_carrier_on(tp->dev); 7419 7420 tg3_napi_enable(tp); 7421 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; 7422 tg3_enable_ints(tp); 7423 } 7424 7425 static void tg3_irq_quiesce(struct tg3 *tp) 7426 __releases(tp->lock) 7427 __acquires(tp->lock) 7428 { 7429 int i; 7430 7431 BUG_ON(tp->irq_sync); 7432 7433 tp->irq_sync = 1; 7434 smp_mb(); 7435 7436 spin_unlock_bh(&tp->lock); 7437 7438 for (i = 0; i < tp->irq_cnt; i++) 7439 synchronize_irq(tp->napi[i].irq_vec); 7440 7441 spin_lock_bh(&tp->lock); 7442 } 7443 7444 /* Fully shutdown all tg3 driver activity elsewhere in the system. 7445 * If irq_sync is non-zero, then the IRQ handler must be synchronized 7446 * with as well. Most of the time, this is not necessary except when 7447 * shutting down the device. 7448 */ 7449 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) 7450 { 7451 spin_lock_bh(&tp->lock); 7452 if (irq_sync) 7453 tg3_irq_quiesce(tp); 7454 } 7455 7456 static inline void tg3_full_unlock(struct tg3 *tp) 7457 { 7458 spin_unlock_bh(&tp->lock); 7459 } 7460 7461 /* One-shot MSI handler - Chip automatically disables interrupt 7462 * after sending MSI so driver doesn't have to do it. 7463 */ 7464 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) 7465 { 7466 struct tg3_napi *tnapi = dev_id; 7467 struct tg3 *tp = tnapi->tp; 7468 7469 prefetch(tnapi->hw_status); 7470 if (tnapi->rx_rcb) 7471 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7472 7473 if (likely(!tg3_irq_sync(tp))) 7474 napi_schedule(&tnapi->napi); 7475 7476 return IRQ_HANDLED; 7477 } 7478 7479 /* MSI ISR - No need to check for interrupt sharing and no need to 7480 * flush status block and interrupt mailbox. PCI ordering rules 7481 * guarantee that MSI will arrive after the status block. 7482 */ 7483 static irqreturn_t tg3_msi(int irq, void *dev_id) 7484 { 7485 struct tg3_napi *tnapi = dev_id; 7486 struct tg3 *tp = tnapi->tp; 7487 7488 prefetch(tnapi->hw_status); 7489 if (tnapi->rx_rcb) 7490 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7491 /* 7492 * Writing any value to intr-mbox-0 clears PCI INTA# and 7493 * chip-internal interrupt pending events. 7494 * Writing non-zero to intr-mbox-0 additional tells the 7495 * NIC to stop sending us irqs, engaging "in-intr-handler" 7496 * event coalescing. 7497 */ 7498 tw32_mailbox(tnapi->int_mbox, 0x00000001); 7499 if (likely(!tg3_irq_sync(tp))) 7500 napi_schedule(&tnapi->napi); 7501 7502 return IRQ_RETVAL(1); 7503 } 7504 7505 static irqreturn_t tg3_interrupt(int irq, void *dev_id) 7506 { 7507 struct tg3_napi *tnapi = dev_id; 7508 struct tg3 *tp = tnapi->tp; 7509 struct tg3_hw_status *sblk = tnapi->hw_status; 7510 unsigned int handled = 1; 7511 7512 /* In INTx mode, it is possible for the interrupt to arrive at 7513 * the CPU before the status block posted prior to the interrupt. 7514 * Reading the PCI State register will confirm whether the 7515 * interrupt is ours and will flush the status block. 7516 */ 7517 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { 7518 if (tg3_flag(tp, CHIP_RESETTING) || 7519 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7520 handled = 0; 7521 goto out; 7522 } 7523 } 7524 7525 /* 7526 * Writing any value to intr-mbox-0 clears PCI INTA# and 7527 * chip-internal interrupt pending events. 7528 * Writing non-zero to intr-mbox-0 additional tells the 7529 * NIC to stop sending us irqs, engaging "in-intr-handler" 7530 * event coalescing. 7531 * 7532 * Flush the mailbox to de-assert the IRQ immediately to prevent 7533 * spurious interrupts. The flush impacts performance but 7534 * excessive spurious interrupts can be worse in some cases. 7535 */ 7536 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7537 if (tg3_irq_sync(tp)) 7538 goto out; 7539 sblk->status &= ~SD_STATUS_UPDATED; 7540 if (likely(tg3_has_work(tnapi))) { 7541 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7542 napi_schedule(&tnapi->napi); 7543 } else { 7544 /* No work, shared interrupt perhaps? re-enable 7545 * interrupts, and flush that PCI write 7546 */ 7547 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 7548 0x00000000); 7549 } 7550 out: 7551 return IRQ_RETVAL(handled); 7552 } 7553 7554 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) 7555 { 7556 struct tg3_napi *tnapi = dev_id; 7557 struct tg3 *tp = tnapi->tp; 7558 struct tg3_hw_status *sblk = tnapi->hw_status; 7559 unsigned int handled = 1; 7560 7561 /* In INTx mode, it is possible for the interrupt to arrive at 7562 * the CPU before the status block posted prior to the interrupt. 7563 * Reading the PCI State register will confirm whether the 7564 * interrupt is ours and will flush the status block. 7565 */ 7566 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { 7567 if (tg3_flag(tp, CHIP_RESETTING) || 7568 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7569 handled = 0; 7570 goto out; 7571 } 7572 } 7573 7574 /* 7575 * writing any value to intr-mbox-0 clears PCI INTA# and 7576 * chip-internal interrupt pending events. 7577 * writing non-zero to intr-mbox-0 additional tells the 7578 * NIC to stop sending us irqs, engaging "in-intr-handler" 7579 * event coalescing. 7580 * 7581 * Flush the mailbox to de-assert the IRQ immediately to prevent 7582 * spurious interrupts. The flush impacts performance but 7583 * excessive spurious interrupts can be worse in some cases. 7584 */ 7585 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7586 7587 /* 7588 * In a shared interrupt configuration, sometimes other devices' 7589 * interrupts will scream. We record the current status tag here 7590 * so that the above check can report that the screaming interrupts 7591 * are unhandled. Eventually they will be silenced. 7592 */ 7593 tnapi->last_irq_tag = sblk->status_tag; 7594 7595 if (tg3_irq_sync(tp)) 7596 goto out; 7597 7598 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7599 7600 napi_schedule(&tnapi->napi); 7601 7602 out: 7603 return IRQ_RETVAL(handled); 7604 } 7605 7606 /* ISR for interrupt test */ 7607 static irqreturn_t tg3_test_isr(int irq, void *dev_id) 7608 { 7609 struct tg3_napi *tnapi = dev_id; 7610 struct tg3 *tp = tnapi->tp; 7611 struct tg3_hw_status *sblk = tnapi->hw_status; 7612 7613 if ((sblk->status & SD_STATUS_UPDATED) || 7614 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7615 tg3_disable_ints(tp); 7616 return IRQ_RETVAL(1); 7617 } 7618 return IRQ_RETVAL(0); 7619 } 7620 7621 #ifdef CONFIG_NET_POLL_CONTROLLER 7622 static void tg3_poll_controller(struct net_device *dev) 7623 { 7624 int i; 7625 struct tg3 *tp = netdev_priv(dev); 7626 7627 if (tg3_irq_sync(tp)) 7628 return; 7629 7630 for (i = 0; i < tp->irq_cnt; i++) 7631 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); 7632 } 7633 #endif 7634 7635 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue) 7636 { 7637 struct tg3 *tp = netdev_priv(dev); 7638 7639 if (netif_msg_tx_err(tp)) { 7640 netdev_err(dev, "transmit timed out, resetting\n"); 7641 tg3_dump_state(tp); 7642 } 7643 7644 tg3_reset_task_schedule(tp); 7645 } 7646 7647 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ 7648 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) 7649 { 7650 u32 base = (u32) mapping & 0xffffffff; 7651 7652 return base + len + 8 < base; 7653 } 7654 7655 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes 7656 * of any 4GB boundaries: 4G, 8G, etc 7657 */ 7658 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7659 u32 len, u32 mss) 7660 { 7661 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) { 7662 u32 base = (u32) mapping & 0xffffffff; 7663 7664 return ((base + len + (mss & 0x3fff)) < base); 7665 } 7666 return 0; 7667 } 7668 7669 /* Test for DMA addresses > 40-bit */ 7670 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7671 int len) 7672 { 7673 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) 7674 if (tg3_flag(tp, 40BIT_DMA_BUG)) 7675 return ((u64) mapping + len) > DMA_BIT_MASK(40); 7676 return 0; 7677 #else 7678 return 0; 7679 #endif 7680 } 7681 7682 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, 7683 dma_addr_t mapping, u32 len, u32 flags, 7684 u32 mss, u32 vlan) 7685 { 7686 txbd->addr_hi = ((u64) mapping >> 32); 7687 txbd->addr_lo = ((u64) mapping & 0xffffffff); 7688 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); 7689 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); 7690 } 7691 7692 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, 7693 dma_addr_t map, u32 len, u32 flags, 7694 u32 mss, u32 vlan) 7695 { 7696 struct tg3 *tp = tnapi->tp; 7697 bool hwbug = false; 7698 7699 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) 7700 hwbug = true; 7701 7702 if (tg3_4g_overflow_test(map, len)) 7703 hwbug = true; 7704 7705 if (tg3_4g_tso_overflow_test(tp, map, len, mss)) 7706 hwbug = true; 7707 7708 if (tg3_40bit_overflow_test(tp, map, len)) 7709 hwbug = true; 7710 7711 if (tp->dma_limit) { 7712 u32 prvidx = *entry; 7713 u32 tmp_flag = flags & ~TXD_FLAG_END; 7714 while (len > tp->dma_limit && *budget) { 7715 u32 frag_len = tp->dma_limit; 7716 len -= tp->dma_limit; 7717 7718 /* Avoid the 8byte DMA problem */ 7719 if (len <= 8) { 7720 len += tp->dma_limit / 2; 7721 frag_len = tp->dma_limit / 2; 7722 } 7723 7724 tnapi->tx_buffers[*entry].fragmented = true; 7725 7726 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7727 frag_len, tmp_flag, mss, vlan); 7728 *budget -= 1; 7729 prvidx = *entry; 7730 *entry = NEXT_TX(*entry); 7731 7732 map += frag_len; 7733 } 7734 7735 if (len) { 7736 if (*budget) { 7737 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7738 len, flags, mss, vlan); 7739 *budget -= 1; 7740 *entry = NEXT_TX(*entry); 7741 } else { 7742 hwbug = true; 7743 tnapi->tx_buffers[prvidx].fragmented = false; 7744 } 7745 } 7746 } else { 7747 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7748 len, flags, mss, vlan); 7749 *entry = NEXT_TX(*entry); 7750 } 7751 7752 return hwbug; 7753 } 7754 7755 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) 7756 { 7757 int i; 7758 struct sk_buff *skb; 7759 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; 7760 7761 skb = txb->skb; 7762 txb->skb = NULL; 7763 7764 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping), 7765 skb_headlen(skb), DMA_TO_DEVICE); 7766 7767 while (txb->fragmented) { 7768 txb->fragmented = false; 7769 entry = NEXT_TX(entry); 7770 txb = &tnapi->tx_buffers[entry]; 7771 } 7772 7773 for (i = 0; i <= last; i++) { 7774 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 7775 7776 entry = NEXT_TX(entry); 7777 txb = &tnapi->tx_buffers[entry]; 7778 7779 dma_unmap_page(&tnapi->tp->pdev->dev, 7780 dma_unmap_addr(txb, mapping), 7781 skb_frag_size(frag), DMA_TO_DEVICE); 7782 7783 while (txb->fragmented) { 7784 txb->fragmented = false; 7785 entry = NEXT_TX(entry); 7786 txb = &tnapi->tx_buffers[entry]; 7787 } 7788 } 7789 } 7790 7791 /* Workaround 4GB and 40-bit hardware DMA bugs. */ 7792 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, 7793 struct sk_buff **pskb, 7794 u32 *entry, u32 *budget, 7795 u32 base_flags, u32 mss, u32 vlan) 7796 { 7797 struct tg3 *tp = tnapi->tp; 7798 struct sk_buff *new_skb, *skb = *pskb; 7799 dma_addr_t new_addr = 0; 7800 int ret = 0; 7801 7802 if (tg3_asic_rev(tp) != ASIC_REV_5701) 7803 new_skb = skb_copy(skb, GFP_ATOMIC); 7804 else { 7805 int more_headroom = 4 - ((unsigned long)skb->data & 3); 7806 7807 new_skb = skb_copy_expand(skb, 7808 skb_headroom(skb) + more_headroom, 7809 skb_tailroom(skb), GFP_ATOMIC); 7810 } 7811 7812 if (!new_skb) { 7813 ret = -1; 7814 } else { 7815 /* New SKB is guaranteed to be linear. */ 7816 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data, 7817 new_skb->len, DMA_TO_DEVICE); 7818 /* Make sure the mapping succeeded */ 7819 if (dma_mapping_error(&tp->pdev->dev, new_addr)) { 7820 dev_kfree_skb_any(new_skb); 7821 ret = -1; 7822 } else { 7823 u32 save_entry = *entry; 7824 7825 base_flags |= TXD_FLAG_END; 7826 7827 tnapi->tx_buffers[*entry].skb = new_skb; 7828 dma_unmap_addr_set(&tnapi->tx_buffers[*entry], 7829 mapping, new_addr); 7830 7831 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, 7832 new_skb->len, base_flags, 7833 mss, vlan)) { 7834 tg3_tx_skb_unmap(tnapi, save_entry, -1); 7835 dev_kfree_skb_any(new_skb); 7836 ret = -1; 7837 } 7838 } 7839 } 7840 7841 dev_consume_skb_any(skb); 7842 *pskb = new_skb; 7843 return ret; 7844 } 7845 7846 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb) 7847 { 7848 /* Check if we will never have enough descriptors, 7849 * as gso_segs can be more than current ring size 7850 */ 7851 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; 7852 } 7853 7854 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); 7855 7856 /* Use GSO to workaround all TSO packets that meet HW bug conditions 7857 * indicated in tg3_tx_frag_set() 7858 */ 7859 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, 7860 struct netdev_queue *txq, struct sk_buff *skb) 7861 { 7862 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; 7863 struct sk_buff *segs, *seg, *next; 7864 7865 /* Estimate the number of fragments in the worst case */ 7866 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) { 7867 netif_tx_stop_queue(txq); 7868 7869 /* netif_tx_stop_queue() must be done before checking 7870 * checking tx index in tg3_tx_avail() below, because in 7871 * tg3_tx(), we update tx index before checking for 7872 * netif_tx_queue_stopped(). 7873 */ 7874 smp_mb(); 7875 if (tg3_tx_avail(tnapi) <= frag_cnt_est) 7876 return NETDEV_TX_BUSY; 7877 7878 netif_tx_wake_queue(txq); 7879 } 7880 7881 segs = skb_gso_segment(skb, tp->dev->features & 7882 ~(NETIF_F_TSO | NETIF_F_TSO6)); 7883 if (IS_ERR(segs) || !segs) 7884 goto tg3_tso_bug_end; 7885 7886 skb_list_walk_safe(segs, seg, next) { 7887 skb_mark_not_on_list(seg); 7888 tg3_start_xmit(seg, tp->dev); 7889 } 7890 7891 tg3_tso_bug_end: 7892 dev_consume_skb_any(skb); 7893 7894 return NETDEV_TX_OK; 7895 } 7896 7897 /* hard_start_xmit for all devices */ 7898 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 7899 { 7900 struct tg3 *tp = netdev_priv(dev); 7901 u32 len, entry, base_flags, mss, vlan = 0; 7902 u32 budget; 7903 int i = -1, would_hit_hwbug; 7904 dma_addr_t mapping; 7905 struct tg3_napi *tnapi; 7906 struct netdev_queue *txq; 7907 unsigned int last; 7908 struct iphdr *iph = NULL; 7909 struct tcphdr *tcph = NULL; 7910 __sum16 tcp_csum = 0, ip_csum = 0; 7911 __be16 ip_tot_len = 0; 7912 7913 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 7914 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 7915 if (tg3_flag(tp, ENABLE_TSS)) 7916 tnapi++; 7917 7918 budget = tg3_tx_avail(tnapi); 7919 7920 /* We are running in BH disabled context with netif_tx_lock 7921 * and TX reclaim runs via tp->napi.poll inside of a software 7922 * interrupt. Furthermore, IRQ processing runs lockless so we have 7923 * no IRQ context deadlocks to worry about either. Rejoice! 7924 */ 7925 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { 7926 if (!netif_tx_queue_stopped(txq)) { 7927 netif_tx_stop_queue(txq); 7928 7929 /* This is a hard error, log it. */ 7930 netdev_err(dev, 7931 "BUG! Tx Ring full when queue awake!\n"); 7932 } 7933 return NETDEV_TX_BUSY; 7934 } 7935 7936 entry = tnapi->tx_prod; 7937 base_flags = 0; 7938 7939 mss = skb_shinfo(skb)->gso_size; 7940 if (mss) { 7941 u32 tcp_opt_len, hdr_len; 7942 7943 if (skb_cow_head(skb, 0)) 7944 goto drop; 7945 7946 iph = ip_hdr(skb); 7947 tcp_opt_len = tcp_optlen(skb); 7948 7949 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; 7950 7951 /* HW/FW can not correctly segment packets that have been 7952 * vlan encapsulated. 7953 */ 7954 if (skb->protocol == htons(ETH_P_8021Q) || 7955 skb->protocol == htons(ETH_P_8021AD)) { 7956 if (tg3_tso_bug_gso_check(tnapi, skb)) 7957 return tg3_tso_bug(tp, tnapi, txq, skb); 7958 goto drop; 7959 } 7960 7961 if (!skb_is_gso_v6(skb)) { 7962 if (unlikely((ETH_HLEN + hdr_len) > 80) && 7963 tg3_flag(tp, TSO_BUG)) { 7964 if (tg3_tso_bug_gso_check(tnapi, skb)) 7965 return tg3_tso_bug(tp, tnapi, txq, skb); 7966 goto drop; 7967 } 7968 ip_csum = iph->check; 7969 ip_tot_len = iph->tot_len; 7970 iph->check = 0; 7971 iph->tot_len = htons(mss + hdr_len); 7972 } 7973 7974 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 7975 TXD_FLAG_CPU_POST_DMA); 7976 7977 tcph = tcp_hdr(skb); 7978 tcp_csum = tcph->check; 7979 7980 if (tg3_flag(tp, HW_TSO_1) || 7981 tg3_flag(tp, HW_TSO_2) || 7982 tg3_flag(tp, HW_TSO_3)) { 7983 tcph->check = 0; 7984 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 7985 } else { 7986 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 7987 0, IPPROTO_TCP, 0); 7988 } 7989 7990 if (tg3_flag(tp, HW_TSO_3)) { 7991 mss |= (hdr_len & 0xc) << 12; 7992 if (hdr_len & 0x10) 7993 base_flags |= 0x00000010; 7994 base_flags |= (hdr_len & 0x3e0) << 5; 7995 } else if (tg3_flag(tp, HW_TSO_2)) 7996 mss |= hdr_len << 9; 7997 else if (tg3_flag(tp, HW_TSO_1) || 7998 tg3_asic_rev(tp) == ASIC_REV_5705) { 7999 if (tcp_opt_len || iph->ihl > 5) { 8000 int tsflags; 8001 8002 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 8003 mss |= (tsflags << 11); 8004 } 8005 } else { 8006 if (tcp_opt_len || iph->ihl > 5) { 8007 int tsflags; 8008 8009 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 8010 base_flags |= tsflags << 12; 8011 } 8012 } 8013 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 8014 /* HW/FW can not correctly checksum packets that have been 8015 * vlan encapsulated. 8016 */ 8017 if (skb->protocol == htons(ETH_P_8021Q) || 8018 skb->protocol == htons(ETH_P_8021AD)) { 8019 if (skb_checksum_help(skb)) 8020 goto drop; 8021 } else { 8022 base_flags |= TXD_FLAG_TCPUDP_CSUM; 8023 } 8024 } 8025 8026 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 8027 !mss && skb->len > VLAN_ETH_FRAME_LEN) 8028 base_flags |= TXD_FLAG_JMB_PKT; 8029 8030 if (skb_vlan_tag_present(skb)) { 8031 base_flags |= TXD_FLAG_VLAN; 8032 vlan = skb_vlan_tag_get(skb); 8033 } 8034 8035 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && 8036 tg3_flag(tp, TX_TSTAMP_EN)) { 8037 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 8038 base_flags |= TXD_FLAG_HWTSTAMP; 8039 } 8040 8041 len = skb_headlen(skb); 8042 8043 mapping = dma_map_single(&tp->pdev->dev, skb->data, len, 8044 DMA_TO_DEVICE); 8045 if (dma_mapping_error(&tp->pdev->dev, mapping)) 8046 goto drop; 8047 8048 8049 tnapi->tx_buffers[entry].skb = skb; 8050 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 8051 8052 would_hit_hwbug = 0; 8053 8054 if (tg3_flag(tp, 5701_DMA_BUG)) 8055 would_hit_hwbug = 1; 8056 8057 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | 8058 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), 8059 mss, vlan)) { 8060 would_hit_hwbug = 1; 8061 } else if (skb_shinfo(skb)->nr_frags > 0) { 8062 u32 tmp_mss = mss; 8063 8064 if (!tg3_flag(tp, HW_TSO_1) && 8065 !tg3_flag(tp, HW_TSO_2) && 8066 !tg3_flag(tp, HW_TSO_3)) 8067 tmp_mss = 0; 8068 8069 /* Now loop through additional data 8070 * fragments, and queue them. 8071 */ 8072 last = skb_shinfo(skb)->nr_frags - 1; 8073 for (i = 0; i <= last; i++) { 8074 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 8075 8076 len = skb_frag_size(frag); 8077 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, 8078 len, DMA_TO_DEVICE); 8079 8080 tnapi->tx_buffers[entry].skb = NULL; 8081 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 8082 mapping); 8083 if (dma_mapping_error(&tp->pdev->dev, mapping)) 8084 goto dma_error; 8085 8086 if (!budget || 8087 tg3_tx_frag_set(tnapi, &entry, &budget, mapping, 8088 len, base_flags | 8089 ((i == last) ? TXD_FLAG_END : 0), 8090 tmp_mss, vlan)) { 8091 would_hit_hwbug = 1; 8092 break; 8093 } 8094 } 8095 } 8096 8097 if (would_hit_hwbug) { 8098 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 8099 8100 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) { 8101 /* If it's a TSO packet, do GSO instead of 8102 * allocating and copying to a large linear SKB 8103 */ 8104 if (ip_tot_len) { 8105 iph->check = ip_csum; 8106 iph->tot_len = ip_tot_len; 8107 } 8108 tcph->check = tcp_csum; 8109 return tg3_tso_bug(tp, tnapi, txq, skb); 8110 } 8111 8112 /* If the workaround fails due to memory/mapping 8113 * failure, silently drop this packet. 8114 */ 8115 entry = tnapi->tx_prod; 8116 budget = tg3_tx_avail(tnapi); 8117 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, 8118 base_flags, mss, vlan)) 8119 goto drop_nofree; 8120 } 8121 8122 skb_tx_timestamp(skb); 8123 netdev_tx_sent_queue(txq, skb->len); 8124 8125 /* Sync BD data before updating mailbox */ 8126 wmb(); 8127 8128 tnapi->tx_prod = entry; 8129 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 8130 netif_tx_stop_queue(txq); 8131 8132 /* netif_tx_stop_queue() must be done before checking 8133 * checking tx index in tg3_tx_avail() below, because in 8134 * tg3_tx(), we update tx index before checking for 8135 * netif_tx_queue_stopped(). 8136 */ 8137 smp_mb(); 8138 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 8139 netif_tx_wake_queue(txq); 8140 } 8141 8142 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { 8143 /* Packets are ready, update Tx producer idx on card. */ 8144 tw32_tx_mbox(tnapi->prodmbox, entry); 8145 } 8146 8147 return NETDEV_TX_OK; 8148 8149 dma_error: 8150 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); 8151 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; 8152 drop: 8153 dev_kfree_skb_any(skb); 8154 drop_nofree: 8155 tp->tx_dropped++; 8156 return NETDEV_TX_OK; 8157 } 8158 8159 static void tg3_mac_loopback(struct tg3 *tp, bool enable) 8160 { 8161 if (enable) { 8162 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | 8163 MAC_MODE_PORT_MODE_MASK); 8164 8165 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; 8166 8167 if (!tg3_flag(tp, 5705_PLUS)) 8168 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 8169 8170 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 8171 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 8172 else 8173 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 8174 } else { 8175 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; 8176 8177 if (tg3_flag(tp, 5705_PLUS) || 8178 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || 8179 tg3_asic_rev(tp) == ASIC_REV_5700) 8180 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 8181 } 8182 8183 tw32(MAC_MODE, tp->mac_mode); 8184 udelay(40); 8185 } 8186 8187 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) 8188 { 8189 u32 val, bmcr, mac_mode, ptest = 0; 8190 8191 tg3_phy_toggle_apd(tp, false); 8192 tg3_phy_toggle_automdix(tp, false); 8193 8194 if (extlpbk && tg3_phy_set_extloopbk(tp)) 8195 return -EIO; 8196 8197 bmcr = BMCR_FULLDPLX; 8198 switch (speed) { 8199 case SPEED_10: 8200 break; 8201 case SPEED_100: 8202 bmcr |= BMCR_SPEED100; 8203 break; 8204 case SPEED_1000: 8205 default: 8206 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 8207 speed = SPEED_100; 8208 bmcr |= BMCR_SPEED100; 8209 } else { 8210 speed = SPEED_1000; 8211 bmcr |= BMCR_SPEED1000; 8212 } 8213 } 8214 8215 if (extlpbk) { 8216 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 8217 tg3_readphy(tp, MII_CTRL1000, &val); 8218 val |= CTL1000_AS_MASTER | 8219 CTL1000_ENABLE_MASTER; 8220 tg3_writephy(tp, MII_CTRL1000, val); 8221 } else { 8222 ptest = MII_TG3_FET_PTEST_TRIM_SEL | 8223 MII_TG3_FET_PTEST_TRIM_2; 8224 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); 8225 } 8226 } else 8227 bmcr |= BMCR_LOOPBACK; 8228 8229 tg3_writephy(tp, MII_BMCR, bmcr); 8230 8231 /* The write needs to be flushed for the FETs */ 8232 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 8233 tg3_readphy(tp, MII_BMCR, &bmcr); 8234 8235 udelay(40); 8236 8237 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 8238 tg3_asic_rev(tp) == ASIC_REV_5785) { 8239 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | 8240 MII_TG3_FET_PTEST_FRC_TX_LINK | 8241 MII_TG3_FET_PTEST_FRC_TX_LOCK); 8242 8243 /* The write needs to be flushed for the AC131 */ 8244 tg3_readphy(tp, MII_TG3_FET_PTEST, &val); 8245 } 8246 8247 /* Reset to prevent losing 1st rx packet intermittently */ 8248 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 8249 tg3_flag(tp, 5780_CLASS)) { 8250 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 8251 udelay(10); 8252 tw32_f(MAC_RX_MODE, tp->rx_mode); 8253 } 8254 8255 mac_mode = tp->mac_mode & 8256 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 8257 if (speed == SPEED_1000) 8258 mac_mode |= MAC_MODE_PORT_MODE_GMII; 8259 else 8260 mac_mode |= MAC_MODE_PORT_MODE_MII; 8261 8262 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 8263 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; 8264 8265 if (masked_phy_id == TG3_PHY_ID_BCM5401) 8266 mac_mode &= ~MAC_MODE_LINK_POLARITY; 8267 else if (masked_phy_id == TG3_PHY_ID_BCM5411) 8268 mac_mode |= MAC_MODE_LINK_POLARITY; 8269 8270 tg3_writephy(tp, MII_TG3_EXT_CTRL, 8271 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 8272 } 8273 8274 tw32(MAC_MODE, mac_mode); 8275 udelay(40); 8276 8277 return 0; 8278 } 8279 8280 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) 8281 { 8282 struct tg3 *tp = netdev_priv(dev); 8283 8284 if (features & NETIF_F_LOOPBACK) { 8285 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) 8286 return; 8287 8288 spin_lock_bh(&tp->lock); 8289 tg3_mac_loopback(tp, true); 8290 netif_carrier_on(tp->dev); 8291 spin_unlock_bh(&tp->lock); 8292 netdev_info(dev, "Internal MAC loopback mode enabled.\n"); 8293 } else { 8294 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 8295 return; 8296 8297 spin_lock_bh(&tp->lock); 8298 tg3_mac_loopback(tp, false); 8299 /* Force link status check */ 8300 tg3_setup_phy(tp, true); 8301 spin_unlock_bh(&tp->lock); 8302 netdev_info(dev, "Internal MAC loopback mode disabled.\n"); 8303 } 8304 } 8305 8306 static netdev_features_t tg3_fix_features(struct net_device *dev, 8307 netdev_features_t features) 8308 { 8309 struct tg3 *tp = netdev_priv(dev); 8310 8311 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) 8312 features &= ~NETIF_F_ALL_TSO; 8313 8314 return features; 8315 } 8316 8317 static int tg3_set_features(struct net_device *dev, netdev_features_t features) 8318 { 8319 netdev_features_t changed = dev->features ^ features; 8320 8321 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) 8322 tg3_set_loopback(dev, features); 8323 8324 return 0; 8325 } 8326 8327 static void tg3_rx_prodring_free(struct tg3 *tp, 8328 struct tg3_rx_prodring_set *tpr) 8329 { 8330 int i; 8331 8332 if (tpr != &tp->napi[0].prodring) { 8333 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; 8334 i = (i + 1) & tp->rx_std_ring_mask) 8335 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8336 tp->rx_pkt_map_sz); 8337 8338 if (tg3_flag(tp, JUMBO_CAPABLE)) { 8339 for (i = tpr->rx_jmb_cons_idx; 8340 i != tpr->rx_jmb_prod_idx; 8341 i = (i + 1) & tp->rx_jmb_ring_mask) { 8342 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8343 TG3_RX_JMB_MAP_SZ); 8344 } 8345 } 8346 8347 return; 8348 } 8349 8350 for (i = 0; i <= tp->rx_std_ring_mask; i++) 8351 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8352 tp->rx_pkt_map_sz); 8353 8354 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8355 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) 8356 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8357 TG3_RX_JMB_MAP_SZ); 8358 } 8359 } 8360 8361 /* Initialize rx rings for packet processing. 8362 * 8363 * The chip has been shut down and the driver detached from 8364 * the networking, so no interrupts or new tx packets will 8365 * end up in the driver. tp->{tx,}lock are held and thus 8366 * we may not sleep. 8367 */ 8368 static int tg3_rx_prodring_alloc(struct tg3 *tp, 8369 struct tg3_rx_prodring_set *tpr) 8370 { 8371 u32 i, rx_pkt_dma_sz; 8372 8373 tpr->rx_std_cons_idx = 0; 8374 tpr->rx_std_prod_idx = 0; 8375 tpr->rx_jmb_cons_idx = 0; 8376 tpr->rx_jmb_prod_idx = 0; 8377 8378 if (tpr != &tp->napi[0].prodring) { 8379 memset(&tpr->rx_std_buffers[0], 0, 8380 TG3_RX_STD_BUFF_RING_SIZE(tp)); 8381 if (tpr->rx_jmb_buffers) 8382 memset(&tpr->rx_jmb_buffers[0], 0, 8383 TG3_RX_JMB_BUFF_RING_SIZE(tp)); 8384 goto done; 8385 } 8386 8387 /* Zero out all descriptors. */ 8388 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); 8389 8390 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; 8391 if (tg3_flag(tp, 5780_CLASS) && 8392 tp->dev->mtu > ETH_DATA_LEN) 8393 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; 8394 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); 8395 8396 /* Initialize invariants of the rings, we only set this 8397 * stuff once. This works because the card does not 8398 * write into the rx buffer posting rings. 8399 */ 8400 for (i = 0; i <= tp->rx_std_ring_mask; i++) { 8401 struct tg3_rx_buffer_desc *rxd; 8402 8403 rxd = &tpr->rx_std[i]; 8404 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; 8405 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); 8406 rxd->opaque = (RXD_OPAQUE_RING_STD | 8407 (i << RXD_OPAQUE_INDEX_SHIFT)); 8408 } 8409 8410 /* Now allocate fresh SKBs for each rx ring. */ 8411 for (i = 0; i < tp->rx_pending; i++) { 8412 unsigned int frag_size; 8413 8414 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i, 8415 &frag_size) < 0) { 8416 netdev_warn(tp->dev, 8417 "Using a smaller RX standard ring. Only " 8418 "%d out of %d buffers were allocated " 8419 "successfully\n", i, tp->rx_pending); 8420 if (i == 0) 8421 goto initfail; 8422 tp->rx_pending = i; 8423 break; 8424 } 8425 } 8426 8427 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 8428 goto done; 8429 8430 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); 8431 8432 if (!tg3_flag(tp, JUMBO_RING_ENABLE)) 8433 goto done; 8434 8435 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { 8436 struct tg3_rx_buffer_desc *rxd; 8437 8438 rxd = &tpr->rx_jmb[i].std; 8439 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; 8440 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | 8441 RXD_FLAG_JUMBO; 8442 rxd->opaque = (RXD_OPAQUE_RING_JUMBO | 8443 (i << RXD_OPAQUE_INDEX_SHIFT)); 8444 } 8445 8446 for (i = 0; i < tp->rx_jumbo_pending; i++) { 8447 unsigned int frag_size; 8448 8449 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i, 8450 &frag_size) < 0) { 8451 netdev_warn(tp->dev, 8452 "Using a smaller RX jumbo ring. Only %d " 8453 "out of %d buffers were allocated " 8454 "successfully\n", i, tp->rx_jumbo_pending); 8455 if (i == 0) 8456 goto initfail; 8457 tp->rx_jumbo_pending = i; 8458 break; 8459 } 8460 } 8461 8462 done: 8463 return 0; 8464 8465 initfail: 8466 tg3_rx_prodring_free(tp, tpr); 8467 return -ENOMEM; 8468 } 8469 8470 static void tg3_rx_prodring_fini(struct tg3 *tp, 8471 struct tg3_rx_prodring_set *tpr) 8472 { 8473 kfree(tpr->rx_std_buffers); 8474 tpr->rx_std_buffers = NULL; 8475 kfree(tpr->rx_jmb_buffers); 8476 tpr->rx_jmb_buffers = NULL; 8477 if (tpr->rx_std) { 8478 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), 8479 tpr->rx_std, tpr->rx_std_mapping); 8480 tpr->rx_std = NULL; 8481 } 8482 if (tpr->rx_jmb) { 8483 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), 8484 tpr->rx_jmb, tpr->rx_jmb_mapping); 8485 tpr->rx_jmb = NULL; 8486 } 8487 } 8488 8489 static int tg3_rx_prodring_init(struct tg3 *tp, 8490 struct tg3_rx_prodring_set *tpr) 8491 { 8492 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), 8493 GFP_KERNEL); 8494 if (!tpr->rx_std_buffers) 8495 return -ENOMEM; 8496 8497 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, 8498 TG3_RX_STD_RING_BYTES(tp), 8499 &tpr->rx_std_mapping, 8500 GFP_KERNEL); 8501 if (!tpr->rx_std) 8502 goto err_out; 8503 8504 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8505 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), 8506 GFP_KERNEL); 8507 if (!tpr->rx_jmb_buffers) 8508 goto err_out; 8509 8510 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, 8511 TG3_RX_JMB_RING_BYTES(tp), 8512 &tpr->rx_jmb_mapping, 8513 GFP_KERNEL); 8514 if (!tpr->rx_jmb) 8515 goto err_out; 8516 } 8517 8518 return 0; 8519 8520 err_out: 8521 tg3_rx_prodring_fini(tp, tpr); 8522 return -ENOMEM; 8523 } 8524 8525 /* Free up pending packets in all rx/tx rings. 8526 * 8527 * The chip has been shut down and the driver detached from 8528 * the networking, so no interrupts or new tx packets will 8529 * end up in the driver. tp->{tx,}lock is not held and we are not 8530 * in an interrupt context and thus may sleep. 8531 */ 8532 static void tg3_free_rings(struct tg3 *tp) 8533 { 8534 int i, j; 8535 8536 for (j = 0; j < tp->irq_cnt; j++) { 8537 struct tg3_napi *tnapi = &tp->napi[j]; 8538 8539 tg3_rx_prodring_free(tp, &tnapi->prodring); 8540 8541 if (!tnapi->tx_buffers) 8542 continue; 8543 8544 for (i = 0; i < TG3_TX_RING_SIZE; i++) { 8545 struct sk_buff *skb = tnapi->tx_buffers[i].skb; 8546 8547 if (!skb) 8548 continue; 8549 8550 tg3_tx_skb_unmap(tnapi, i, 8551 skb_shinfo(skb)->nr_frags - 1); 8552 8553 dev_consume_skb_any(skb); 8554 } 8555 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j)); 8556 } 8557 } 8558 8559 /* Initialize tx/rx rings for packet processing. 8560 * 8561 * The chip has been shut down and the driver detached from 8562 * the networking, so no interrupts or new tx packets will 8563 * end up in the driver. tp->{tx,}lock are held and thus 8564 * we may not sleep. 8565 */ 8566 static int tg3_init_rings(struct tg3 *tp) 8567 { 8568 int i; 8569 8570 /* Free up all the SKBs. */ 8571 tg3_free_rings(tp); 8572 8573 for (i = 0; i < tp->irq_cnt; i++) { 8574 struct tg3_napi *tnapi = &tp->napi[i]; 8575 8576 tnapi->last_tag = 0; 8577 tnapi->last_irq_tag = 0; 8578 tnapi->hw_status->status = 0; 8579 tnapi->hw_status->status_tag = 0; 8580 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8581 8582 tnapi->tx_prod = 0; 8583 tnapi->tx_cons = 0; 8584 if (tnapi->tx_ring) 8585 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); 8586 8587 tnapi->rx_rcb_ptr = 0; 8588 if (tnapi->rx_rcb) 8589 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 8590 8591 if (tnapi->prodring.rx_std && 8592 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { 8593 tg3_free_rings(tp); 8594 return -ENOMEM; 8595 } 8596 } 8597 8598 return 0; 8599 } 8600 8601 static void tg3_mem_tx_release(struct tg3 *tp) 8602 { 8603 int i; 8604 8605 for (i = 0; i < tp->irq_max; i++) { 8606 struct tg3_napi *tnapi = &tp->napi[i]; 8607 8608 if (tnapi->tx_ring) { 8609 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, 8610 tnapi->tx_ring, tnapi->tx_desc_mapping); 8611 tnapi->tx_ring = NULL; 8612 } 8613 8614 kfree(tnapi->tx_buffers); 8615 tnapi->tx_buffers = NULL; 8616 } 8617 } 8618 8619 static int tg3_mem_tx_acquire(struct tg3 *tp) 8620 { 8621 int i; 8622 struct tg3_napi *tnapi = &tp->napi[0]; 8623 8624 /* If multivector TSS is enabled, vector 0 does not handle 8625 * tx interrupts. Don't allocate any resources for it. 8626 */ 8627 if (tg3_flag(tp, ENABLE_TSS)) 8628 tnapi++; 8629 8630 for (i = 0; i < tp->txq_cnt; i++, tnapi++) { 8631 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE, 8632 sizeof(struct tg3_tx_ring_info), 8633 GFP_KERNEL); 8634 if (!tnapi->tx_buffers) 8635 goto err_out; 8636 8637 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, 8638 TG3_TX_RING_BYTES, 8639 &tnapi->tx_desc_mapping, 8640 GFP_KERNEL); 8641 if (!tnapi->tx_ring) 8642 goto err_out; 8643 } 8644 8645 return 0; 8646 8647 err_out: 8648 tg3_mem_tx_release(tp); 8649 return -ENOMEM; 8650 } 8651 8652 static void tg3_mem_rx_release(struct tg3 *tp) 8653 { 8654 int i; 8655 8656 for (i = 0; i < tp->irq_max; i++) { 8657 struct tg3_napi *tnapi = &tp->napi[i]; 8658 8659 tg3_rx_prodring_fini(tp, &tnapi->prodring); 8660 8661 if (!tnapi->rx_rcb) 8662 continue; 8663 8664 dma_free_coherent(&tp->pdev->dev, 8665 TG3_RX_RCB_RING_BYTES(tp), 8666 tnapi->rx_rcb, 8667 tnapi->rx_rcb_mapping); 8668 tnapi->rx_rcb = NULL; 8669 } 8670 } 8671 8672 static int tg3_mem_rx_acquire(struct tg3 *tp) 8673 { 8674 unsigned int i, limit; 8675 8676 limit = tp->rxq_cnt; 8677 8678 /* If RSS is enabled, we need a (dummy) producer ring 8679 * set on vector zero. This is the true hw prodring. 8680 */ 8681 if (tg3_flag(tp, ENABLE_RSS)) 8682 limit++; 8683 8684 for (i = 0; i < limit; i++) { 8685 struct tg3_napi *tnapi = &tp->napi[i]; 8686 8687 if (tg3_rx_prodring_init(tp, &tnapi->prodring)) 8688 goto err_out; 8689 8690 /* If multivector RSS is enabled, vector 0 8691 * does not handle rx or tx interrupts. 8692 * Don't allocate any resources for it. 8693 */ 8694 if (!i && tg3_flag(tp, ENABLE_RSS)) 8695 continue; 8696 8697 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, 8698 TG3_RX_RCB_RING_BYTES(tp), 8699 &tnapi->rx_rcb_mapping, 8700 GFP_KERNEL); 8701 if (!tnapi->rx_rcb) 8702 goto err_out; 8703 } 8704 8705 return 0; 8706 8707 err_out: 8708 tg3_mem_rx_release(tp); 8709 return -ENOMEM; 8710 } 8711 8712 /* 8713 * Must not be invoked with interrupt sources disabled and 8714 * the hardware shutdown down. 8715 */ 8716 static void tg3_free_consistent(struct tg3 *tp) 8717 { 8718 int i; 8719 8720 for (i = 0; i < tp->irq_cnt; i++) { 8721 struct tg3_napi *tnapi = &tp->napi[i]; 8722 8723 if (tnapi->hw_status) { 8724 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, 8725 tnapi->hw_status, 8726 tnapi->status_mapping); 8727 tnapi->hw_status = NULL; 8728 } 8729 } 8730 8731 tg3_mem_rx_release(tp); 8732 tg3_mem_tx_release(tp); 8733 8734 /* tp->hw_stats can be referenced safely: 8735 * 1. under rtnl_lock 8736 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set. 8737 */ 8738 if (tp->hw_stats) { 8739 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), 8740 tp->hw_stats, tp->stats_mapping); 8741 tp->hw_stats = NULL; 8742 } 8743 } 8744 8745 /* 8746 * Must not be invoked with interrupt sources disabled and 8747 * the hardware shutdown down. Can sleep. 8748 */ 8749 static int tg3_alloc_consistent(struct tg3 *tp) 8750 { 8751 int i; 8752 8753 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, 8754 sizeof(struct tg3_hw_stats), 8755 &tp->stats_mapping, GFP_KERNEL); 8756 if (!tp->hw_stats) 8757 goto err_out; 8758 8759 for (i = 0; i < tp->irq_cnt; i++) { 8760 struct tg3_napi *tnapi = &tp->napi[i]; 8761 struct tg3_hw_status *sblk; 8762 8763 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, 8764 TG3_HW_STATUS_SIZE, 8765 &tnapi->status_mapping, 8766 GFP_KERNEL); 8767 if (!tnapi->hw_status) 8768 goto err_out; 8769 8770 sblk = tnapi->hw_status; 8771 8772 if (tg3_flag(tp, ENABLE_RSS)) { 8773 u16 *prodptr = NULL; 8774 8775 /* 8776 * When RSS is enabled, the status block format changes 8777 * slightly. The "rx_jumbo_consumer", "reserved", 8778 * and "rx_mini_consumer" members get mapped to the 8779 * other three rx return ring producer indexes. 8780 */ 8781 switch (i) { 8782 case 1: 8783 prodptr = &sblk->idx[0].rx_producer; 8784 break; 8785 case 2: 8786 prodptr = &sblk->rx_jumbo_consumer; 8787 break; 8788 case 3: 8789 prodptr = &sblk->reserved; 8790 break; 8791 case 4: 8792 prodptr = &sblk->rx_mini_consumer; 8793 break; 8794 } 8795 tnapi->rx_rcb_prod_idx = prodptr; 8796 } else { 8797 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; 8798 } 8799 } 8800 8801 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp)) 8802 goto err_out; 8803 8804 return 0; 8805 8806 err_out: 8807 tg3_free_consistent(tp); 8808 return -ENOMEM; 8809 } 8810 8811 #define MAX_WAIT_CNT 1000 8812 8813 /* To stop a block, clear the enable bit and poll till it 8814 * clears. tp->lock is held. 8815 */ 8816 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent) 8817 { 8818 unsigned int i; 8819 u32 val; 8820 8821 if (tg3_flag(tp, 5705_PLUS)) { 8822 switch (ofs) { 8823 case RCVLSC_MODE: 8824 case DMAC_MODE: 8825 case MBFREE_MODE: 8826 case BUFMGR_MODE: 8827 case MEMARB_MODE: 8828 /* We can't enable/disable these bits of the 8829 * 5705/5750, just say success. 8830 */ 8831 return 0; 8832 8833 default: 8834 break; 8835 } 8836 } 8837 8838 val = tr32(ofs); 8839 val &= ~enable_bit; 8840 tw32_f(ofs, val); 8841 8842 for (i = 0; i < MAX_WAIT_CNT; i++) { 8843 if (pci_channel_offline(tp->pdev)) { 8844 dev_err(&tp->pdev->dev, 8845 "tg3_stop_block device offline, " 8846 "ofs=%lx enable_bit=%x\n", 8847 ofs, enable_bit); 8848 return -ENODEV; 8849 } 8850 8851 udelay(100); 8852 val = tr32(ofs); 8853 if ((val & enable_bit) == 0) 8854 break; 8855 } 8856 8857 if (i == MAX_WAIT_CNT && !silent) { 8858 dev_err(&tp->pdev->dev, 8859 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", 8860 ofs, enable_bit); 8861 return -ENODEV; 8862 } 8863 8864 return 0; 8865 } 8866 8867 /* tp->lock is held. */ 8868 static int tg3_abort_hw(struct tg3 *tp, bool silent) 8869 { 8870 int i, err; 8871 8872 tg3_disable_ints(tp); 8873 8874 if (pci_channel_offline(tp->pdev)) { 8875 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE); 8876 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8877 err = -ENODEV; 8878 goto err_no_dev; 8879 } 8880 8881 tp->rx_mode &= ~RX_MODE_ENABLE; 8882 tw32_f(MAC_RX_MODE, tp->rx_mode); 8883 udelay(10); 8884 8885 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); 8886 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); 8887 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); 8888 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); 8889 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); 8890 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); 8891 8892 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); 8893 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); 8894 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); 8895 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); 8896 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); 8897 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); 8898 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); 8899 8900 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8901 tw32_f(MAC_MODE, tp->mac_mode); 8902 udelay(40); 8903 8904 tp->tx_mode &= ~TX_MODE_ENABLE; 8905 tw32_f(MAC_TX_MODE, tp->tx_mode); 8906 8907 for (i = 0; i < MAX_WAIT_CNT; i++) { 8908 udelay(100); 8909 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) 8910 break; 8911 } 8912 if (i >= MAX_WAIT_CNT) { 8913 dev_err(&tp->pdev->dev, 8914 "%s timed out, TX_MODE_ENABLE will not clear " 8915 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); 8916 err |= -ENODEV; 8917 } 8918 8919 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); 8920 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); 8921 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); 8922 8923 tw32(FTQ_RESET, 0xffffffff); 8924 tw32(FTQ_RESET, 0x00000000); 8925 8926 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); 8927 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); 8928 8929 err_no_dev: 8930 for (i = 0; i < tp->irq_cnt; i++) { 8931 struct tg3_napi *tnapi = &tp->napi[i]; 8932 if (tnapi->hw_status) 8933 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8934 } 8935 8936 return err; 8937 } 8938 8939 /* Save PCI command register before chip reset */ 8940 static void tg3_save_pci_state(struct tg3 *tp) 8941 { 8942 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); 8943 } 8944 8945 /* Restore PCI state after chip reset */ 8946 static void tg3_restore_pci_state(struct tg3 *tp) 8947 { 8948 u32 val; 8949 8950 /* Re-enable indirect register accesses. */ 8951 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 8952 tp->misc_host_ctrl); 8953 8954 /* Set MAX PCI retry to zero. */ 8955 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); 8956 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 8957 tg3_flag(tp, PCIX_MODE)) 8958 val |= PCISTATE_RETRY_SAME_DMA; 8959 /* Allow reads and writes to the APE register and memory space. */ 8960 if (tg3_flag(tp, ENABLE_APE)) 8961 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 8962 PCISTATE_ALLOW_APE_SHMEM_WR | 8963 PCISTATE_ALLOW_APE_PSPACE_WR; 8964 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); 8965 8966 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); 8967 8968 if (!tg3_flag(tp, PCI_EXPRESS)) { 8969 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 8970 tp->pci_cacheline_sz); 8971 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 8972 tp->pci_lat_timer); 8973 } 8974 8975 /* Make sure PCI-X relaxed ordering bit is clear. */ 8976 if (tg3_flag(tp, PCIX_MODE)) { 8977 u16 pcix_cmd; 8978 8979 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 8980 &pcix_cmd); 8981 pcix_cmd &= ~PCI_X_CMD_ERO; 8982 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 8983 pcix_cmd); 8984 } 8985 8986 if (tg3_flag(tp, 5780_CLASS)) { 8987 8988 /* Chip reset on 5780 will reset MSI enable bit, 8989 * so need to restore it. 8990 */ 8991 if (tg3_flag(tp, USING_MSI)) { 8992 u16 ctrl; 8993 8994 pci_read_config_word(tp->pdev, 8995 tp->msi_cap + PCI_MSI_FLAGS, 8996 &ctrl); 8997 pci_write_config_word(tp->pdev, 8998 tp->msi_cap + PCI_MSI_FLAGS, 8999 ctrl | PCI_MSI_FLAGS_ENABLE); 9000 val = tr32(MSGINT_MODE); 9001 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); 9002 } 9003 } 9004 } 9005 9006 static void tg3_override_clk(struct tg3 *tp) 9007 { 9008 u32 val; 9009 9010 switch (tg3_asic_rev(tp)) { 9011 case ASIC_REV_5717: 9012 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9013 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 9014 TG3_CPMU_MAC_ORIDE_ENABLE); 9015 break; 9016 9017 case ASIC_REV_5719: 9018 case ASIC_REV_5720: 9019 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 9020 break; 9021 9022 default: 9023 return; 9024 } 9025 } 9026 9027 static void tg3_restore_clk(struct tg3 *tp) 9028 { 9029 u32 val; 9030 9031 switch (tg3_asic_rev(tp)) { 9032 case ASIC_REV_5717: 9033 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9034 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, 9035 val & ~TG3_CPMU_MAC_ORIDE_ENABLE); 9036 break; 9037 9038 case ASIC_REV_5719: 9039 case ASIC_REV_5720: 9040 val = tr32(TG3_CPMU_CLCK_ORIDE); 9041 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 9042 break; 9043 9044 default: 9045 return; 9046 } 9047 } 9048 9049 /* tp->lock is held. */ 9050 static int tg3_chip_reset(struct tg3 *tp) 9051 __releases(tp->lock) 9052 __acquires(tp->lock) 9053 { 9054 u32 val; 9055 void (*write_op)(struct tg3 *, u32, u32); 9056 int i, err; 9057 9058 if (!pci_device_is_present(tp->pdev)) 9059 return -ENODEV; 9060 9061 tg3_nvram_lock(tp); 9062 9063 tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 9064 9065 /* No matching tg3_nvram_unlock() after this because 9066 * chip reset below will undo the nvram lock. 9067 */ 9068 tp->nvram_lock_cnt = 0; 9069 9070 /* GRC_MISC_CFG core clock reset will clear the memory 9071 * enable bit in PCI register 4 and the MSI enable bit 9072 * on some chips, so we save relevant registers here. 9073 */ 9074 tg3_save_pci_state(tp); 9075 9076 if (tg3_asic_rev(tp) == ASIC_REV_5752 || 9077 tg3_flag(tp, 5755_PLUS)) 9078 tw32(GRC_FASTBOOT_PC, 0); 9079 9080 /* 9081 * We must avoid the readl() that normally takes place. 9082 * It locks machines, causes machine checks, and other 9083 * fun things. So, temporarily disable the 5701 9084 * hardware workaround, while we do the reset. 9085 */ 9086 write_op = tp->write32; 9087 if (write_op == tg3_write_flush_reg32) 9088 tp->write32 = tg3_write32; 9089 9090 /* Prevent the irq handler from reading or writing PCI registers 9091 * during chip reset when the memory enable bit in the PCI command 9092 * register may be cleared. The chip does not generate interrupt 9093 * at this time, but the irq handler may still be called due to irq 9094 * sharing or irqpoll. 9095 */ 9096 tg3_flag_set(tp, CHIP_RESETTING); 9097 for (i = 0; i < tp->irq_cnt; i++) { 9098 struct tg3_napi *tnapi = &tp->napi[i]; 9099 if (tnapi->hw_status) { 9100 tnapi->hw_status->status = 0; 9101 tnapi->hw_status->status_tag = 0; 9102 } 9103 tnapi->last_tag = 0; 9104 tnapi->last_irq_tag = 0; 9105 } 9106 smp_mb(); 9107 9108 tg3_full_unlock(tp); 9109 9110 for (i = 0; i < tp->irq_cnt; i++) 9111 synchronize_irq(tp->napi[i].irq_vec); 9112 9113 tg3_full_lock(tp, 0); 9114 9115 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9116 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9117 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9118 } 9119 9120 /* do the reset */ 9121 val = GRC_MISC_CFG_CORECLK_RESET; 9122 9123 if (tg3_flag(tp, PCI_EXPRESS)) { 9124 /* Force PCIe 1.0a mode */ 9125 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 9126 !tg3_flag(tp, 57765_PLUS) && 9127 tr32(TG3_PCIE_PHY_TSTCTL) == 9128 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) 9129 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); 9130 9131 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) { 9132 tw32(GRC_MISC_CFG, (1 << 29)); 9133 val |= (1 << 29); 9134 } 9135 } 9136 9137 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 9138 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); 9139 tw32(GRC_VCPU_EXT_CTRL, 9140 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); 9141 } 9142 9143 /* Set the clock to the highest frequency to avoid timeouts. With link 9144 * aware mode, the clock speed could be slow and bootcode does not 9145 * complete within the expected time. Override the clock to allow the 9146 * bootcode to finish sooner and then restore it. 9147 */ 9148 tg3_override_clk(tp); 9149 9150 /* Manage gphy power for all CPMU absent PCIe devices. */ 9151 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) 9152 val |= GRC_MISC_CFG_KEEP_GPHY_POWER; 9153 9154 tw32(GRC_MISC_CFG, val); 9155 9156 /* restore 5701 hardware bug workaround write method */ 9157 tp->write32 = write_op; 9158 9159 /* Unfortunately, we have to delay before the PCI read back. 9160 * Some 575X chips even will not respond to a PCI cfg access 9161 * when the reset command is given to the chip. 9162 * 9163 * How do these hardware designers expect things to work 9164 * properly if the PCI write is posted for a long period 9165 * of time? It is always necessary to have some method by 9166 * which a register read back can occur to push the write 9167 * out which does the reset. 9168 * 9169 * For most tg3 variants the trick below was working. 9170 * Ho hum... 9171 */ 9172 udelay(120); 9173 9174 /* Flush PCI posted writes. The normal MMIO registers 9175 * are inaccessible at this time so this is the only 9176 * way to make this reliably (actually, this is no longer 9177 * the case, see above). I tried to use indirect 9178 * register read/write but this upset some 5701 variants. 9179 */ 9180 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); 9181 9182 udelay(120); 9183 9184 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) { 9185 u16 val16; 9186 9187 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) { 9188 int j; 9189 u32 cfg_val; 9190 9191 /* Wait for link training to complete. */ 9192 for (j = 0; j < 5000; j++) 9193 udelay(100); 9194 9195 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); 9196 pci_write_config_dword(tp->pdev, 0xc4, 9197 cfg_val | (1 << 15)); 9198 } 9199 9200 /* Clear the "no snoop" and "relaxed ordering" bits. */ 9201 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN; 9202 /* 9203 * Older PCIe devices only support the 128 byte 9204 * MPS setting. Enforce the restriction. 9205 */ 9206 if (!tg3_flag(tp, CPMU_PRESENT)) 9207 val16 |= PCI_EXP_DEVCTL_PAYLOAD; 9208 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16); 9209 9210 /* Clear error status */ 9211 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA, 9212 PCI_EXP_DEVSTA_CED | 9213 PCI_EXP_DEVSTA_NFED | 9214 PCI_EXP_DEVSTA_FED | 9215 PCI_EXP_DEVSTA_URD); 9216 } 9217 9218 tg3_restore_pci_state(tp); 9219 9220 tg3_flag_clear(tp, CHIP_RESETTING); 9221 tg3_flag_clear(tp, ERROR_PROCESSED); 9222 9223 val = 0; 9224 if (tg3_flag(tp, 5780_CLASS)) 9225 val = tr32(MEMARB_MODE); 9226 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 9227 9228 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) { 9229 tg3_stop_fw(tp); 9230 tw32(0x5000, 0x400); 9231 } 9232 9233 if (tg3_flag(tp, IS_SSB_CORE)) { 9234 /* 9235 * BCM4785: In order to avoid repercussions from using 9236 * potentially defective internal ROM, stop the Rx RISC CPU, 9237 * which is not required. 9238 */ 9239 tg3_stop_fw(tp); 9240 tg3_halt_cpu(tp, RX_CPU_BASE); 9241 } 9242 9243 err = tg3_poll_fw(tp); 9244 if (err) 9245 return err; 9246 9247 tw32(GRC_MODE, tp->grc_mode); 9248 9249 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) { 9250 val = tr32(0xc4); 9251 9252 tw32(0xc4, val | (1 << 15)); 9253 } 9254 9255 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && 9256 tg3_asic_rev(tp) == ASIC_REV_5705) { 9257 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; 9258 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) 9259 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; 9260 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 9261 } 9262 9263 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 9264 tp->mac_mode = MAC_MODE_PORT_MODE_TBI; 9265 val = tp->mac_mode; 9266 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 9267 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 9268 val = tp->mac_mode; 9269 } else 9270 val = 0; 9271 9272 tw32_f(MAC_MODE, val); 9273 udelay(40); 9274 9275 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); 9276 9277 tg3_mdio_start(tp); 9278 9279 if (tg3_flag(tp, PCI_EXPRESS) && 9280 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 9281 tg3_asic_rev(tp) != ASIC_REV_5785 && 9282 !tg3_flag(tp, 57765_PLUS)) { 9283 val = tr32(0x7c00); 9284 9285 tw32(0x7c00, val | (1 << 25)); 9286 } 9287 9288 tg3_restore_clk(tp); 9289 9290 /* Increase the core clock speed to fix tx timeout issue for 5762 9291 * with 100Mbps link speed. 9292 */ 9293 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 9294 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9295 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 9296 TG3_CPMU_MAC_ORIDE_ENABLE); 9297 } 9298 9299 /* Reprobe ASF enable state. */ 9300 tg3_flag_clear(tp, ENABLE_ASF); 9301 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 9302 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 9303 9304 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); 9305 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 9306 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 9307 u32 nic_cfg; 9308 9309 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 9310 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 9311 tg3_flag_set(tp, ENABLE_ASF); 9312 tp->last_event_jiffies = jiffies; 9313 if (tg3_flag(tp, 5750_PLUS)) 9314 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 9315 9316 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg); 9317 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK) 9318 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 9319 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID) 9320 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 9321 } 9322 } 9323 9324 return 0; 9325 } 9326 9327 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *); 9328 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *); 9329 static void __tg3_set_rx_mode(struct net_device *); 9330 9331 /* tp->lock is held. */ 9332 static int tg3_halt(struct tg3 *tp, int kind, bool silent) 9333 { 9334 int err; 9335 9336 tg3_stop_fw(tp); 9337 9338 tg3_write_sig_pre_reset(tp, kind); 9339 9340 tg3_abort_hw(tp, silent); 9341 err = tg3_chip_reset(tp); 9342 9343 __tg3_set_mac_addr(tp, false); 9344 9345 tg3_write_sig_legacy(tp, kind); 9346 tg3_write_sig_post_reset(tp, kind); 9347 9348 if (tp->hw_stats) { 9349 /* Save the stats across chip resets... */ 9350 tg3_get_nstats(tp, &tp->net_stats_prev); 9351 tg3_get_estats(tp, &tp->estats_prev); 9352 9353 /* And make sure the next sample is new data */ 9354 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); 9355 } 9356 9357 return err; 9358 } 9359 9360 static int tg3_set_mac_addr(struct net_device *dev, void *p) 9361 { 9362 struct tg3 *tp = netdev_priv(dev); 9363 struct sockaddr *addr = p; 9364 int err = 0; 9365 bool skip_mac_1 = false; 9366 9367 if (!is_valid_ether_addr(addr->sa_data)) 9368 return -EADDRNOTAVAIL; 9369 9370 eth_hw_addr_set(dev, addr->sa_data); 9371 9372 if (!netif_running(dev)) 9373 return 0; 9374 9375 if (tg3_flag(tp, ENABLE_ASF)) { 9376 u32 addr0_high, addr0_low, addr1_high, addr1_low; 9377 9378 addr0_high = tr32(MAC_ADDR_0_HIGH); 9379 addr0_low = tr32(MAC_ADDR_0_LOW); 9380 addr1_high = tr32(MAC_ADDR_1_HIGH); 9381 addr1_low = tr32(MAC_ADDR_1_LOW); 9382 9383 /* Skip MAC addr 1 if ASF is using it. */ 9384 if ((addr0_high != addr1_high || addr0_low != addr1_low) && 9385 !(addr1_high == 0 && addr1_low == 0)) 9386 skip_mac_1 = true; 9387 } 9388 spin_lock_bh(&tp->lock); 9389 __tg3_set_mac_addr(tp, skip_mac_1); 9390 __tg3_set_rx_mode(dev); 9391 spin_unlock_bh(&tp->lock); 9392 9393 return err; 9394 } 9395 9396 /* tp->lock is held. */ 9397 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, 9398 dma_addr_t mapping, u32 maxlen_flags, 9399 u32 nic_addr) 9400 { 9401 tg3_write_mem(tp, 9402 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), 9403 ((u64) mapping >> 32)); 9404 tg3_write_mem(tp, 9405 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), 9406 ((u64) mapping & 0xffffffff)); 9407 tg3_write_mem(tp, 9408 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), 9409 maxlen_flags); 9410 9411 if (!tg3_flag(tp, 5705_PLUS)) 9412 tg3_write_mem(tp, 9413 (bdinfo_addr + TG3_BDINFO_NIC_ADDR), 9414 nic_addr); 9415 } 9416 9417 9418 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9419 { 9420 int i = 0; 9421 9422 if (!tg3_flag(tp, ENABLE_TSS)) { 9423 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); 9424 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); 9425 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); 9426 } else { 9427 tw32(HOSTCC_TXCOL_TICKS, 0); 9428 tw32(HOSTCC_TXMAX_FRAMES, 0); 9429 tw32(HOSTCC_TXCOAL_MAXF_INT, 0); 9430 9431 for (; i < tp->txq_cnt; i++) { 9432 u32 reg; 9433 9434 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; 9435 tw32(reg, ec->tx_coalesce_usecs); 9436 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; 9437 tw32(reg, ec->tx_max_coalesced_frames); 9438 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; 9439 tw32(reg, ec->tx_max_coalesced_frames_irq); 9440 } 9441 } 9442 9443 for (; i < tp->irq_max - 1; i++) { 9444 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); 9445 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); 9446 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9447 } 9448 } 9449 9450 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9451 { 9452 int i = 0; 9453 u32 limit = tp->rxq_cnt; 9454 9455 if (!tg3_flag(tp, ENABLE_RSS)) { 9456 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); 9457 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); 9458 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); 9459 limit--; 9460 } else { 9461 tw32(HOSTCC_RXCOL_TICKS, 0); 9462 tw32(HOSTCC_RXMAX_FRAMES, 0); 9463 tw32(HOSTCC_RXCOAL_MAXF_INT, 0); 9464 } 9465 9466 for (; i < limit; i++) { 9467 u32 reg; 9468 9469 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; 9470 tw32(reg, ec->rx_coalesce_usecs); 9471 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; 9472 tw32(reg, ec->rx_max_coalesced_frames); 9473 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; 9474 tw32(reg, ec->rx_max_coalesced_frames_irq); 9475 } 9476 9477 for (; i < tp->irq_max - 1; i++) { 9478 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); 9479 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); 9480 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9481 } 9482 } 9483 9484 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) 9485 { 9486 tg3_coal_tx_init(tp, ec); 9487 tg3_coal_rx_init(tp, ec); 9488 9489 if (!tg3_flag(tp, 5705_PLUS)) { 9490 u32 val = ec->stats_block_coalesce_usecs; 9491 9492 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); 9493 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); 9494 9495 if (!tp->link_up) 9496 val = 0; 9497 9498 tw32(HOSTCC_STAT_COAL_TICKS, val); 9499 } 9500 } 9501 9502 /* tp->lock is held. */ 9503 static void tg3_tx_rcbs_disable(struct tg3 *tp) 9504 { 9505 u32 txrcb, limit; 9506 9507 /* Disable all transmit rings but the first. */ 9508 if (!tg3_flag(tp, 5705_PLUS)) 9509 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; 9510 else if (tg3_flag(tp, 5717_PLUS)) 9511 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; 9512 else if (tg3_flag(tp, 57765_CLASS) || 9513 tg3_asic_rev(tp) == ASIC_REV_5762) 9514 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; 9515 else 9516 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9517 9518 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9519 txrcb < limit; txrcb += TG3_BDINFO_SIZE) 9520 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, 9521 BDINFO_FLAGS_DISABLED); 9522 } 9523 9524 /* tp->lock is held. */ 9525 static void tg3_tx_rcbs_init(struct tg3 *tp) 9526 { 9527 int i = 0; 9528 u32 txrcb = NIC_SRAM_SEND_RCB; 9529 9530 if (tg3_flag(tp, ENABLE_TSS)) 9531 i++; 9532 9533 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { 9534 struct tg3_napi *tnapi = &tp->napi[i]; 9535 9536 if (!tnapi->tx_ring) 9537 continue; 9538 9539 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, 9540 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), 9541 NIC_SRAM_TX_BUFFER_DESC); 9542 } 9543 } 9544 9545 /* tp->lock is held. */ 9546 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp) 9547 { 9548 u32 rxrcb, limit; 9549 9550 /* Disable all receive return rings but the first. */ 9551 if (tg3_flag(tp, 5717_PLUS)) 9552 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; 9553 else if (!tg3_flag(tp, 5705_PLUS)) 9554 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; 9555 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9556 tg3_asic_rev(tp) == ASIC_REV_5762 || 9557 tg3_flag(tp, 57765_CLASS)) 9558 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; 9559 else 9560 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9561 9562 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9563 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) 9564 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, 9565 BDINFO_FLAGS_DISABLED); 9566 } 9567 9568 /* tp->lock is held. */ 9569 static void tg3_rx_ret_rcbs_init(struct tg3 *tp) 9570 { 9571 int i = 0; 9572 u32 rxrcb = NIC_SRAM_RCV_RET_RCB; 9573 9574 if (tg3_flag(tp, ENABLE_RSS)) 9575 i++; 9576 9577 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { 9578 struct tg3_napi *tnapi = &tp->napi[i]; 9579 9580 if (!tnapi->rx_rcb) 9581 continue; 9582 9583 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, 9584 (tp->rx_ret_ring_mask + 1) << 9585 BDINFO_FLAGS_MAXLEN_SHIFT, 0); 9586 } 9587 } 9588 9589 /* tp->lock is held. */ 9590 static void tg3_rings_reset(struct tg3 *tp) 9591 { 9592 int i; 9593 u32 stblk; 9594 struct tg3_napi *tnapi = &tp->napi[0]; 9595 9596 tg3_tx_rcbs_disable(tp); 9597 9598 tg3_rx_ret_rcbs_disable(tp); 9599 9600 /* Disable interrupts */ 9601 tw32_mailbox_f(tp->napi[0].int_mbox, 1); 9602 tp->napi[0].chk_msi_cnt = 0; 9603 tp->napi[0].last_rx_cons = 0; 9604 tp->napi[0].last_tx_cons = 0; 9605 9606 /* Zero mailbox registers. */ 9607 if (tg3_flag(tp, SUPPORT_MSIX)) { 9608 for (i = 1; i < tp->irq_max; i++) { 9609 tp->napi[i].tx_prod = 0; 9610 tp->napi[i].tx_cons = 0; 9611 if (tg3_flag(tp, ENABLE_TSS)) 9612 tw32_mailbox(tp->napi[i].prodmbox, 0); 9613 tw32_rx_mbox(tp->napi[i].consmbox, 0); 9614 tw32_mailbox_f(tp->napi[i].int_mbox, 1); 9615 tp->napi[i].chk_msi_cnt = 0; 9616 tp->napi[i].last_rx_cons = 0; 9617 tp->napi[i].last_tx_cons = 0; 9618 } 9619 if (!tg3_flag(tp, ENABLE_TSS)) 9620 tw32_mailbox(tp->napi[0].prodmbox, 0); 9621 } else { 9622 tp->napi[0].tx_prod = 0; 9623 tp->napi[0].tx_cons = 0; 9624 tw32_mailbox(tp->napi[0].prodmbox, 0); 9625 tw32_rx_mbox(tp->napi[0].consmbox, 0); 9626 } 9627 9628 /* Make sure the NIC-based send BD rings are disabled. */ 9629 if (!tg3_flag(tp, 5705_PLUS)) { 9630 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; 9631 for (i = 0; i < 16; i++) 9632 tw32_tx_mbox(mbox + i * 8, 0); 9633 } 9634 9635 /* Clear status block in ram. */ 9636 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9637 9638 /* Set status block DMA address */ 9639 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 9640 ((u64) tnapi->status_mapping >> 32)); 9641 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 9642 ((u64) tnapi->status_mapping & 0xffffffff)); 9643 9644 stblk = HOSTCC_STATBLCK_RING1; 9645 9646 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { 9647 u64 mapping = (u64)tnapi->status_mapping; 9648 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); 9649 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); 9650 stblk += 8; 9651 9652 /* Clear status block in ram. */ 9653 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9654 } 9655 9656 tg3_tx_rcbs_init(tp); 9657 tg3_rx_ret_rcbs_init(tp); 9658 } 9659 9660 static void tg3_setup_rxbd_thresholds(struct tg3 *tp) 9661 { 9662 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; 9663 9664 if (!tg3_flag(tp, 5750_PLUS) || 9665 tg3_flag(tp, 5780_CLASS) || 9666 tg3_asic_rev(tp) == ASIC_REV_5750 || 9667 tg3_asic_rev(tp) == ASIC_REV_5752 || 9668 tg3_flag(tp, 57765_PLUS)) 9669 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; 9670 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9671 tg3_asic_rev(tp) == ASIC_REV_5787) 9672 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; 9673 else 9674 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; 9675 9676 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); 9677 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); 9678 9679 val = min(nic_rep_thresh, host_rep_thresh); 9680 tw32(RCVBDI_STD_THRESH, val); 9681 9682 if (tg3_flag(tp, 57765_PLUS)) 9683 tw32(STD_REPLENISH_LWM, bdcache_maxcnt); 9684 9685 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 9686 return; 9687 9688 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; 9689 9690 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); 9691 9692 val = min(bdcache_maxcnt / 2, host_rep_thresh); 9693 tw32(RCVBDI_JUMBO_THRESH, val); 9694 9695 if (tg3_flag(tp, 57765_PLUS)) 9696 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); 9697 } 9698 9699 static inline u32 calc_crc(unsigned char *buf, int len) 9700 { 9701 u32 reg; 9702 u32 tmp; 9703 int j, k; 9704 9705 reg = 0xffffffff; 9706 9707 for (j = 0; j < len; j++) { 9708 reg ^= buf[j]; 9709 9710 for (k = 0; k < 8; k++) { 9711 tmp = reg & 0x01; 9712 9713 reg >>= 1; 9714 9715 if (tmp) 9716 reg ^= CRC32_POLY_LE; 9717 } 9718 } 9719 9720 return ~reg; 9721 } 9722 9723 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) 9724 { 9725 /* accept or reject all multicast frames */ 9726 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); 9727 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); 9728 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); 9729 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); 9730 } 9731 9732 static void __tg3_set_rx_mode(struct net_device *dev) 9733 { 9734 struct tg3 *tp = netdev_priv(dev); 9735 u32 rx_mode; 9736 9737 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | 9738 RX_MODE_KEEP_VLAN_TAG); 9739 9740 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) 9741 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 9742 * flag clear. 9743 */ 9744 if (!tg3_flag(tp, ENABLE_ASF)) 9745 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 9746 #endif 9747 9748 if (dev->flags & IFF_PROMISC) { 9749 /* Promiscuous mode. */ 9750 rx_mode |= RX_MODE_PROMISC; 9751 } else if (dev->flags & IFF_ALLMULTI) { 9752 /* Accept all multicast. */ 9753 tg3_set_multi(tp, 1); 9754 } else if (netdev_mc_empty(dev)) { 9755 /* Reject all multicast. */ 9756 tg3_set_multi(tp, 0); 9757 } else { 9758 /* Accept one or more multicast(s). */ 9759 struct netdev_hw_addr *ha; 9760 u32 mc_filter[4] = { 0, }; 9761 u32 regidx; 9762 u32 bit; 9763 u32 crc; 9764 9765 netdev_for_each_mc_addr(ha, dev) { 9766 crc = calc_crc(ha->addr, ETH_ALEN); 9767 bit = ~crc & 0x7f; 9768 regidx = (bit & 0x60) >> 5; 9769 bit &= 0x1f; 9770 mc_filter[regidx] |= (1 << bit); 9771 } 9772 9773 tw32(MAC_HASH_REG_0, mc_filter[0]); 9774 tw32(MAC_HASH_REG_1, mc_filter[1]); 9775 tw32(MAC_HASH_REG_2, mc_filter[2]); 9776 tw32(MAC_HASH_REG_3, mc_filter[3]); 9777 } 9778 9779 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) { 9780 rx_mode |= RX_MODE_PROMISC; 9781 } else if (!(dev->flags & IFF_PROMISC)) { 9782 /* Add all entries into to the mac addr filter list */ 9783 int i = 0; 9784 struct netdev_hw_addr *ha; 9785 9786 netdev_for_each_uc_addr(ha, dev) { 9787 __tg3_set_one_mac_addr(tp, ha->addr, 9788 i + TG3_UCAST_ADDR_IDX(tp)); 9789 i++; 9790 } 9791 } 9792 9793 if (rx_mode != tp->rx_mode) { 9794 tp->rx_mode = rx_mode; 9795 tw32_f(MAC_RX_MODE, rx_mode); 9796 udelay(10); 9797 } 9798 } 9799 9800 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt) 9801 { 9802 int i; 9803 9804 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 9805 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt); 9806 } 9807 9808 static void tg3_rss_check_indir_tbl(struct tg3 *tp) 9809 { 9810 int i; 9811 9812 if (!tg3_flag(tp, SUPPORT_MSIX)) 9813 return; 9814 9815 if (tp->rxq_cnt == 1) { 9816 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); 9817 return; 9818 } 9819 9820 /* Validate table against current IRQ count */ 9821 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { 9822 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt) 9823 break; 9824 } 9825 9826 if (i != TG3_RSS_INDIR_TBL_SIZE) 9827 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt); 9828 } 9829 9830 static void tg3_rss_write_indir_tbl(struct tg3 *tp) 9831 { 9832 int i = 0; 9833 u32 reg = MAC_RSS_INDIR_TBL_0; 9834 9835 while (i < TG3_RSS_INDIR_TBL_SIZE) { 9836 u32 val = tp->rss_ind_tbl[i]; 9837 i++; 9838 for (; i % 8; i++) { 9839 val <<= 4; 9840 val |= tp->rss_ind_tbl[i]; 9841 } 9842 tw32(reg, val); 9843 reg += 4; 9844 } 9845 } 9846 9847 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp) 9848 { 9849 if (tg3_asic_rev(tp) == ASIC_REV_5719) 9850 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719; 9851 else 9852 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720; 9853 } 9854 9855 /* tp->lock is held. */ 9856 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) 9857 { 9858 u32 val, rdmac_mode; 9859 int i, err, limit; 9860 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 9861 9862 tg3_disable_ints(tp); 9863 9864 tg3_stop_fw(tp); 9865 9866 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); 9867 9868 if (tg3_flag(tp, INIT_COMPLETE)) 9869 tg3_abort_hw(tp, 1); 9870 9871 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 9872 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { 9873 tg3_phy_pull_config(tp); 9874 tg3_eee_pull_config(tp, NULL); 9875 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 9876 } 9877 9878 /* Enable MAC control of LPI */ 9879 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 9880 tg3_setup_eee(tp); 9881 9882 if (reset_phy) 9883 tg3_phy_reset(tp); 9884 9885 err = tg3_chip_reset(tp); 9886 if (err) 9887 return err; 9888 9889 tg3_write_sig_legacy(tp, RESET_KIND_INIT); 9890 9891 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 9892 val = tr32(TG3_CPMU_CTRL); 9893 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); 9894 tw32(TG3_CPMU_CTRL, val); 9895 9896 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 9897 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 9898 val |= CPMU_LSPD_10MB_MACCLK_6_25; 9899 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 9900 9901 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); 9902 val &= ~CPMU_LNK_AWARE_MACCLK_MASK; 9903 val |= CPMU_LNK_AWARE_MACCLK_6_25; 9904 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); 9905 9906 val = tr32(TG3_CPMU_HST_ACC); 9907 val &= ~CPMU_HST_ACC_MACCLK_MASK; 9908 val |= CPMU_HST_ACC_MACCLK_6_25; 9909 tw32(TG3_CPMU_HST_ACC, val); 9910 } 9911 9912 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9913 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; 9914 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | 9915 PCIE_PWR_MGMT_L1_THRESH_4MS; 9916 tw32(PCIE_PWR_MGMT_THRESH, val); 9917 9918 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; 9919 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); 9920 9921 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); 9922 9923 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9924 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9925 } 9926 9927 if (tg3_flag(tp, L1PLLPD_EN)) { 9928 u32 grc_mode = tr32(GRC_MODE); 9929 9930 /* Access the lower 1K of PL PCIE block registers. */ 9931 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9932 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 9933 9934 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); 9935 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, 9936 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); 9937 9938 tw32(GRC_MODE, grc_mode); 9939 } 9940 9941 if (tg3_flag(tp, 57765_CLASS)) { 9942 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 9943 u32 grc_mode = tr32(GRC_MODE); 9944 9945 /* Access the lower 1K of PL PCIE block registers. */ 9946 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9947 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 9948 9949 val = tr32(TG3_PCIE_TLDLPL_PORT + 9950 TG3_PCIE_PL_LO_PHYCTL5); 9951 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, 9952 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); 9953 9954 tw32(GRC_MODE, grc_mode); 9955 } 9956 9957 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) { 9958 u32 grc_mode; 9959 9960 /* Fix transmit hangs */ 9961 val = tr32(TG3_CPMU_PADRNG_CTL); 9962 val |= TG3_CPMU_PADRNG_CTL_RDIV2; 9963 tw32(TG3_CPMU_PADRNG_CTL, val); 9964 9965 grc_mode = tr32(GRC_MODE); 9966 9967 /* Access the lower 1K of DL PCIE block registers. */ 9968 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9969 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); 9970 9971 val = tr32(TG3_PCIE_TLDLPL_PORT + 9972 TG3_PCIE_DL_LO_FTSMAX); 9973 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; 9974 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, 9975 val | TG3_PCIE_DL_LO_FTSMAX_VAL); 9976 9977 tw32(GRC_MODE, grc_mode); 9978 } 9979 9980 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 9981 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 9982 val |= CPMU_LSPD_10MB_MACCLK_6_25; 9983 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 9984 } 9985 9986 /* This works around an issue with Athlon chipsets on 9987 * B3 tigon3 silicon. This bit has no effect on any 9988 * other revision. But do not set this on PCI Express 9989 * chips and don't even touch the clocks if the CPMU is present. 9990 */ 9991 if (!tg3_flag(tp, CPMU_PRESENT)) { 9992 if (!tg3_flag(tp, PCI_EXPRESS)) 9993 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; 9994 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 9995 } 9996 9997 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 9998 tg3_flag(tp, PCIX_MODE)) { 9999 val = tr32(TG3PCI_PCISTATE); 10000 val |= PCISTATE_RETRY_SAME_DMA; 10001 tw32(TG3PCI_PCISTATE, val); 10002 } 10003 10004 if (tg3_flag(tp, ENABLE_APE)) { 10005 /* Allow reads and writes to the 10006 * APE register and memory space. 10007 */ 10008 val = tr32(TG3PCI_PCISTATE); 10009 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 10010 PCISTATE_ALLOW_APE_SHMEM_WR | 10011 PCISTATE_ALLOW_APE_PSPACE_WR; 10012 tw32(TG3PCI_PCISTATE, val); 10013 } 10014 10015 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) { 10016 /* Enable some hw fixes. */ 10017 val = tr32(TG3PCI_MSI_DATA); 10018 val |= (1 << 26) | (1 << 28) | (1 << 29); 10019 tw32(TG3PCI_MSI_DATA, val); 10020 } 10021 10022 /* Descriptor ring init may make accesses to the 10023 * NIC SRAM area to setup the TX descriptors, so we 10024 * can only do this after the hardware has been 10025 * successfully reset. 10026 */ 10027 err = tg3_init_rings(tp); 10028 if (err) 10029 return err; 10030 10031 if (tg3_flag(tp, 57765_PLUS)) { 10032 val = tr32(TG3PCI_DMA_RW_CTRL) & 10033 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 10034 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 10035 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; 10036 if (!tg3_flag(tp, 57765_CLASS) && 10037 tg3_asic_rev(tp) != ASIC_REV_5717 && 10038 tg3_asic_rev(tp) != ASIC_REV_5762) 10039 val |= DMA_RWCTRL_TAGGED_STAT_WA; 10040 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); 10041 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 && 10042 tg3_asic_rev(tp) != ASIC_REV_5761) { 10043 /* This value is determined during the probe time DMA 10044 * engine test, tg3_test_dma. 10045 */ 10046 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 10047 } 10048 10049 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | 10050 GRC_MODE_4X_NIC_SEND_RINGS | 10051 GRC_MODE_NO_TX_PHDR_CSUM | 10052 GRC_MODE_NO_RX_PHDR_CSUM); 10053 tp->grc_mode |= GRC_MODE_HOST_SENDBDS; 10054 10055 /* Pseudo-header checksum is done by hardware logic and not 10056 * the offload processers, so make the chip do the pseudo- 10057 * header checksums on receive. For transmit it is more 10058 * convenient to do the pseudo-header checksum in software 10059 * as Linux does that on transmit for us in all cases. 10060 */ 10061 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; 10062 10063 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP; 10064 if (tp->rxptpctl) 10065 tw32(TG3_RX_PTP_CTL, 10066 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 10067 10068 if (tg3_flag(tp, PTP_CAPABLE)) 10069 val |= GRC_MODE_TIME_SYNC_ENABLE; 10070 10071 tw32(GRC_MODE, tp->grc_mode | val); 10072 10073 /* On one of the AMD platform, MRRS is restricted to 4000 because of 10074 * south bridge limitation. As a workaround, Driver is setting MRRS 10075 * to 2048 instead of default 4096. 10076 */ 10077 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 10078 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) { 10079 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK; 10080 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048); 10081 } 10082 10083 /* Setup the timer prescalar register. Clock is always 66Mhz. */ 10084 val = tr32(GRC_MISC_CFG); 10085 val &= ~0xff; 10086 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); 10087 tw32(GRC_MISC_CFG, val); 10088 10089 /* Initialize MBUF/DESC pool. */ 10090 if (tg3_flag(tp, 5750_PLUS)) { 10091 /* Do nothing. */ 10092 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) { 10093 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); 10094 if (tg3_asic_rev(tp) == ASIC_REV_5704) 10095 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); 10096 else 10097 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); 10098 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); 10099 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); 10100 } else if (tg3_flag(tp, TSO_CAPABLE)) { 10101 int fw_len; 10102 10103 fw_len = tp->fw_len; 10104 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); 10105 tw32(BUFMGR_MB_POOL_ADDR, 10106 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); 10107 tw32(BUFMGR_MB_POOL_SIZE, 10108 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); 10109 } 10110 10111 if (tp->dev->mtu <= ETH_DATA_LEN) { 10112 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10113 tp->bufmgr_config.mbuf_read_dma_low_water); 10114 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10115 tp->bufmgr_config.mbuf_mac_rx_low_water); 10116 tw32(BUFMGR_MB_HIGH_WATER, 10117 tp->bufmgr_config.mbuf_high_water); 10118 } else { 10119 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10120 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); 10121 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10122 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); 10123 tw32(BUFMGR_MB_HIGH_WATER, 10124 tp->bufmgr_config.mbuf_high_water_jumbo); 10125 } 10126 tw32(BUFMGR_DMA_LOW_WATER, 10127 tp->bufmgr_config.dma_low_water); 10128 tw32(BUFMGR_DMA_HIGH_WATER, 10129 tp->bufmgr_config.dma_high_water); 10130 10131 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; 10132 if (tg3_asic_rev(tp) == ASIC_REV_5719) 10133 val |= BUFMGR_MODE_NO_TX_UNDERRUN; 10134 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10135 tg3_asic_rev(tp) == ASIC_REV_5762 || 10136 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10137 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) 10138 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; 10139 tw32(BUFMGR_MODE, val); 10140 for (i = 0; i < 2000; i++) { 10141 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) 10142 break; 10143 udelay(10); 10144 } 10145 if (i >= 2000) { 10146 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); 10147 return -ENODEV; 10148 } 10149 10150 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1) 10151 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); 10152 10153 tg3_setup_rxbd_thresholds(tp); 10154 10155 /* Initialize TG3_BDINFO's at: 10156 * RCVDBDI_STD_BD: standard eth size rx ring 10157 * RCVDBDI_JUMBO_BD: jumbo frame rx ring 10158 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) 10159 * 10160 * like so: 10161 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring 10162 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | 10163 * ring attribute flags 10164 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM 10165 * 10166 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. 10167 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. 10168 * 10169 * The size of each ring is fixed in the firmware, but the location is 10170 * configurable. 10171 */ 10172 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10173 ((u64) tpr->rx_std_mapping >> 32)); 10174 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10175 ((u64) tpr->rx_std_mapping & 0xffffffff)); 10176 if (!tg3_flag(tp, 5717_PLUS)) 10177 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 10178 NIC_SRAM_RX_BUFFER_DESC); 10179 10180 /* Disable the mini ring */ 10181 if (!tg3_flag(tp, 5705_PLUS)) 10182 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, 10183 BDINFO_FLAGS_DISABLED); 10184 10185 /* Program the jumbo buffer descriptor ring control 10186 * blocks on those devices that have them. 10187 */ 10188 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10189 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { 10190 10191 if (tg3_flag(tp, JUMBO_RING_ENABLE)) { 10192 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10193 ((u64) tpr->rx_jmb_mapping >> 32)); 10194 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10195 ((u64) tpr->rx_jmb_mapping & 0xffffffff)); 10196 val = TG3_RX_JMB_RING_SIZE(tp) << 10197 BDINFO_FLAGS_MAXLEN_SHIFT; 10198 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10199 val | BDINFO_FLAGS_USE_EXT_RECV); 10200 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || 10201 tg3_flag(tp, 57765_CLASS) || 10202 tg3_asic_rev(tp) == ASIC_REV_5762) 10203 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 10204 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 10205 } else { 10206 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10207 BDINFO_FLAGS_DISABLED); 10208 } 10209 10210 if (tg3_flag(tp, 57765_PLUS)) { 10211 val = TG3_RX_STD_RING_SIZE(tp); 10212 val <<= BDINFO_FLAGS_MAXLEN_SHIFT; 10213 val |= (TG3_RX_STD_DMA_SZ << 2); 10214 } else 10215 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; 10216 } else 10217 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; 10218 10219 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); 10220 10221 tpr->rx_std_prod_idx = tp->rx_pending; 10222 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); 10223 10224 tpr->rx_jmb_prod_idx = 10225 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; 10226 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); 10227 10228 tg3_rings_reset(tp); 10229 10230 /* Initialize MAC address and backoff seed. */ 10231 __tg3_set_mac_addr(tp, false); 10232 10233 /* MTU + ethernet header + FCS + optional VLAN tag */ 10234 tw32(MAC_RX_MTU_SIZE, 10235 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 10236 10237 /* The slot time is changed by tg3_setup_phy if we 10238 * run at gigabit with half duplex. 10239 */ 10240 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 10241 (6 << TX_LENGTHS_IPG_SHIFT) | 10242 (32 << TX_LENGTHS_SLOT_TIME_SHIFT); 10243 10244 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10245 tg3_asic_rev(tp) == ASIC_REV_5762) 10246 val |= tr32(MAC_TX_LENGTHS) & 10247 (TX_LENGTHS_JMB_FRM_LEN_MSK | 10248 TX_LENGTHS_CNT_DWN_VAL_MSK); 10249 10250 tw32(MAC_TX_LENGTHS, val); 10251 10252 /* Receive rules. */ 10253 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); 10254 tw32(RCVLPC_CONFIG, 0x0181); 10255 10256 /* Calculate RDMAC_MODE setting early, we need it to determine 10257 * the RCVLPC_STATE_ENABLE mask. 10258 */ 10259 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | 10260 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | 10261 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | 10262 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 10263 RDMAC_MODE_LNGREAD_ENAB); 10264 10265 if (tg3_asic_rev(tp) == ASIC_REV_5717) 10266 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; 10267 10268 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 10269 tg3_asic_rev(tp) == ASIC_REV_5785 || 10270 tg3_asic_rev(tp) == ASIC_REV_57780) 10271 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | 10272 RDMAC_MODE_MBUF_RBD_CRPT_ENAB | 10273 RDMAC_MODE_MBUF_SBD_CRPT_ENAB; 10274 10275 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10276 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10277 if (tg3_flag(tp, TSO_CAPABLE)) { 10278 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; 10279 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10280 !tg3_flag(tp, IS_5788)) { 10281 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10282 } 10283 } 10284 10285 if (tg3_flag(tp, PCI_EXPRESS)) 10286 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10287 10288 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10289 tp->dma_limit = 0; 10290 if (tp->dev->mtu <= ETH_DATA_LEN) { 10291 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; 10292 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K; 10293 } 10294 } 10295 10296 if (tg3_flag(tp, HW_TSO_1) || 10297 tg3_flag(tp, HW_TSO_2) || 10298 tg3_flag(tp, HW_TSO_3)) 10299 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; 10300 10301 if (tg3_flag(tp, 57765_PLUS) || 10302 tg3_asic_rev(tp) == ASIC_REV_5785 || 10303 tg3_asic_rev(tp) == ASIC_REV_57780) 10304 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 10305 10306 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10307 tg3_asic_rev(tp) == ASIC_REV_5762) 10308 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; 10309 10310 if (tg3_asic_rev(tp) == ASIC_REV_5761 || 10311 tg3_asic_rev(tp) == ASIC_REV_5784 || 10312 tg3_asic_rev(tp) == ASIC_REV_5785 || 10313 tg3_asic_rev(tp) == ASIC_REV_57780 || 10314 tg3_flag(tp, 57765_PLUS)) { 10315 u32 tgtreg; 10316 10317 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10318 tgtreg = TG3_RDMA_RSRVCTRL_REG2; 10319 else 10320 tgtreg = TG3_RDMA_RSRVCTRL_REG; 10321 10322 val = tr32(tgtreg); 10323 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10324 tg3_asic_rev(tp) == ASIC_REV_5762) { 10325 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | 10326 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | 10327 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); 10328 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | 10329 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 10330 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; 10331 } 10332 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 10333 } 10334 10335 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10336 tg3_asic_rev(tp) == ASIC_REV_5720 || 10337 tg3_asic_rev(tp) == ASIC_REV_5762) { 10338 u32 tgtreg; 10339 10340 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10341 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2; 10342 else 10343 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL; 10344 10345 val = tr32(tgtreg); 10346 tw32(tgtreg, val | 10347 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | 10348 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); 10349 } 10350 10351 /* Receive/send statistics. */ 10352 if (tg3_flag(tp, 5750_PLUS)) { 10353 val = tr32(RCVLPC_STATS_ENABLE); 10354 val &= ~RCVLPC_STATSENAB_DACK_FIX; 10355 tw32(RCVLPC_STATS_ENABLE, val); 10356 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && 10357 tg3_flag(tp, TSO_CAPABLE)) { 10358 val = tr32(RCVLPC_STATS_ENABLE); 10359 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; 10360 tw32(RCVLPC_STATS_ENABLE, val); 10361 } else { 10362 tw32(RCVLPC_STATS_ENABLE, 0xffffff); 10363 } 10364 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); 10365 tw32(SNDDATAI_STATSENAB, 0xffffff); 10366 tw32(SNDDATAI_STATSCTRL, 10367 (SNDDATAI_SCTRL_ENABLE | 10368 SNDDATAI_SCTRL_FASTUPD)); 10369 10370 /* Setup host coalescing engine. */ 10371 tw32(HOSTCC_MODE, 0); 10372 for (i = 0; i < 2000; i++) { 10373 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) 10374 break; 10375 udelay(10); 10376 } 10377 10378 __tg3_set_coalesce(tp, &tp->coal); 10379 10380 if (!tg3_flag(tp, 5705_PLUS)) { 10381 /* Status/statistics block address. See tg3_timer, 10382 * the tg3_periodic_fetch_stats call there, and 10383 * tg3_get_stats to see how this works for 5705/5750 chips. 10384 */ 10385 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 10386 ((u64) tp->stats_mapping >> 32)); 10387 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 10388 ((u64) tp->stats_mapping & 0xffffffff)); 10389 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); 10390 10391 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); 10392 10393 /* Clear statistics and status block memory areas */ 10394 for (i = NIC_SRAM_STATS_BLK; 10395 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; 10396 i += sizeof(u32)) { 10397 tg3_write_mem(tp, i, 0); 10398 udelay(40); 10399 } 10400 } 10401 10402 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); 10403 10404 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); 10405 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); 10406 if (!tg3_flag(tp, 5705_PLUS)) 10407 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); 10408 10409 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 10410 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 10411 /* reset to prevent losing 1st rx packet intermittently */ 10412 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10413 udelay(10); 10414 } 10415 10416 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 10417 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | 10418 MAC_MODE_FHDE_ENABLE; 10419 if (tg3_flag(tp, ENABLE_APE)) 10420 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 10421 if (!tg3_flag(tp, 5705_PLUS) && 10422 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10423 tg3_asic_rev(tp) != ASIC_REV_5700) 10424 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 10425 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); 10426 udelay(40); 10427 10428 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). 10429 * If TG3_FLAG_IS_NIC is zero, we should read the 10430 * register to preserve the GPIO settings for LOMs. The GPIOs, 10431 * whether used as inputs or outputs, are set by boot code after 10432 * reset. 10433 */ 10434 if (!tg3_flag(tp, IS_NIC)) { 10435 u32 gpio_mask; 10436 10437 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | 10438 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | 10439 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; 10440 10441 if (tg3_asic_rev(tp) == ASIC_REV_5752) 10442 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | 10443 GRC_LCLCTRL_GPIO_OUTPUT3; 10444 10445 if (tg3_asic_rev(tp) == ASIC_REV_5755) 10446 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; 10447 10448 tp->grc_local_ctrl &= ~gpio_mask; 10449 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; 10450 10451 /* GPIO1 must be driven high for eeprom write protect */ 10452 if (tg3_flag(tp, EEPROM_WRITE_PROT)) 10453 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 10454 GRC_LCLCTRL_GPIO_OUTPUT1); 10455 } 10456 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10457 udelay(100); 10458 10459 if (tg3_flag(tp, USING_MSIX)) { 10460 val = tr32(MSGINT_MODE); 10461 val |= MSGINT_MODE_ENABLE; 10462 if (tp->irq_cnt > 1) 10463 val |= MSGINT_MODE_MULTIVEC_EN; 10464 if (!tg3_flag(tp, 1SHOT_MSI)) 10465 val |= MSGINT_MODE_ONE_SHOT_DISABLE; 10466 tw32(MSGINT_MODE, val); 10467 } 10468 10469 if (!tg3_flag(tp, 5705_PLUS)) { 10470 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 10471 udelay(40); 10472 } 10473 10474 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | 10475 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | 10476 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | 10477 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | 10478 WDMAC_MODE_LNGREAD_ENAB); 10479 10480 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10481 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10482 if (tg3_flag(tp, TSO_CAPABLE) && 10483 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 || 10484 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) { 10485 /* nothing */ 10486 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10487 !tg3_flag(tp, IS_5788)) { 10488 val |= WDMAC_MODE_RX_ACCEL; 10489 } 10490 } 10491 10492 /* Enable host coalescing bug fix */ 10493 if (tg3_flag(tp, 5755_PLUS)) 10494 val |= WDMAC_MODE_STATUS_TAG_FIX; 10495 10496 if (tg3_asic_rev(tp) == ASIC_REV_5785) 10497 val |= WDMAC_MODE_BURST_ALL_DATA; 10498 10499 tw32_f(WDMAC_MODE, val); 10500 udelay(40); 10501 10502 if (tg3_flag(tp, PCIX_MODE)) { 10503 u16 pcix_cmd; 10504 10505 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10506 &pcix_cmd); 10507 if (tg3_asic_rev(tp) == ASIC_REV_5703) { 10508 pcix_cmd &= ~PCI_X_CMD_MAX_READ; 10509 pcix_cmd |= PCI_X_CMD_READ_2K; 10510 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) { 10511 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); 10512 pcix_cmd |= PCI_X_CMD_READ_2K; 10513 } 10514 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10515 pcix_cmd); 10516 } 10517 10518 tw32_f(RDMAC_MODE, rdmac_mode); 10519 udelay(40); 10520 10521 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10522 tg3_asic_rev(tp) == ASIC_REV_5720) { 10523 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) { 10524 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp)) 10525 break; 10526 } 10527 if (i < TG3_NUM_RDMA_CHANNELS) { 10528 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10529 val |= tg3_lso_rd_dma_workaround_bit(tp); 10530 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10531 tg3_flag_set(tp, 5719_5720_RDMA_BUG); 10532 } 10533 } 10534 10535 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); 10536 if (!tg3_flag(tp, 5705_PLUS)) 10537 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); 10538 10539 if (tg3_asic_rev(tp) == ASIC_REV_5761) 10540 tw32(SNDDATAC_MODE, 10541 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); 10542 else 10543 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); 10544 10545 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); 10546 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); 10547 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; 10548 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 10549 val |= RCVDBDI_MODE_LRG_RING_SZ; 10550 tw32(RCVDBDI_MODE, val); 10551 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 10552 if (tg3_flag(tp, HW_TSO_1) || 10553 tg3_flag(tp, HW_TSO_2) || 10554 tg3_flag(tp, HW_TSO_3)) 10555 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); 10556 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; 10557 if (tg3_flag(tp, ENABLE_TSS)) 10558 val |= SNDBDI_MODE_MULTI_TXQ_EN; 10559 tw32(SNDBDI_MODE, val); 10560 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); 10561 10562 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 10563 err = tg3_load_5701_a0_firmware_fix(tp); 10564 if (err) 10565 return err; 10566 } 10567 10568 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10569 /* Ignore any errors for the firmware download. If download 10570 * fails, the device will operate with EEE disabled 10571 */ 10572 tg3_load_57766_firmware(tp); 10573 } 10574 10575 if (tg3_flag(tp, TSO_CAPABLE)) { 10576 err = tg3_load_tso_firmware(tp); 10577 if (err) 10578 return err; 10579 } 10580 10581 tp->tx_mode = TX_MODE_ENABLE; 10582 10583 if (tg3_flag(tp, 5755_PLUS) || 10584 tg3_asic_rev(tp) == ASIC_REV_5906) 10585 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; 10586 10587 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10588 tg3_asic_rev(tp) == ASIC_REV_5762) { 10589 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; 10590 tp->tx_mode &= ~val; 10591 tp->tx_mode |= tr32(MAC_TX_MODE) & val; 10592 } 10593 10594 tw32_f(MAC_TX_MODE, tp->tx_mode); 10595 udelay(100); 10596 10597 if (tg3_flag(tp, ENABLE_RSS)) { 10598 u32 rss_key[10]; 10599 10600 tg3_rss_write_indir_tbl(tp); 10601 10602 netdev_rss_key_fill(rss_key, 10 * sizeof(u32)); 10603 10604 for (i = 0; i < 10 ; i++) 10605 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]); 10606 } 10607 10608 tp->rx_mode = RX_MODE_ENABLE; 10609 if (tg3_flag(tp, 5755_PLUS)) 10610 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 10611 10612 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10613 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX; 10614 10615 if (tg3_flag(tp, ENABLE_RSS)) 10616 tp->rx_mode |= RX_MODE_RSS_ENABLE | 10617 RX_MODE_RSS_ITBL_HASH_BITS_7 | 10618 RX_MODE_RSS_IPV6_HASH_EN | 10619 RX_MODE_RSS_TCP_IPV6_HASH_EN | 10620 RX_MODE_RSS_IPV4_HASH_EN | 10621 RX_MODE_RSS_TCP_IPV4_HASH_EN; 10622 10623 tw32_f(MAC_RX_MODE, tp->rx_mode); 10624 udelay(10); 10625 10626 tw32(MAC_LED_CTRL, tp->led_ctrl); 10627 10628 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 10629 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10630 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10631 udelay(10); 10632 } 10633 tw32_f(MAC_RX_MODE, tp->rx_mode); 10634 udelay(10); 10635 10636 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10637 if ((tg3_asic_rev(tp) == ASIC_REV_5704) && 10638 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { 10639 /* Set drive transmission level to 1.2V */ 10640 /* only if the signal pre-emphasis bit is not set */ 10641 val = tr32(MAC_SERDES_CFG); 10642 val &= 0xfffff000; 10643 val |= 0x880; 10644 tw32(MAC_SERDES_CFG, val); 10645 } 10646 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) 10647 tw32(MAC_SERDES_CFG, 0x616000); 10648 } 10649 10650 /* Prevent chip from dropping frames when flow control 10651 * is enabled. 10652 */ 10653 if (tg3_flag(tp, 57765_CLASS)) 10654 val = 1; 10655 else 10656 val = 2; 10657 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); 10658 10659 if (tg3_asic_rev(tp) == ASIC_REV_5704 && 10660 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 10661 /* Use hardware link auto-negotiation */ 10662 tg3_flag_set(tp, HW_AUTONEG); 10663 } 10664 10665 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 10666 tg3_asic_rev(tp) == ASIC_REV_5714) { 10667 u32 tmp; 10668 10669 tmp = tr32(SERDES_RX_CTRL); 10670 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); 10671 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; 10672 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; 10673 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10674 } 10675 10676 if (!tg3_flag(tp, USE_PHYLIB)) { 10677 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 10678 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 10679 10680 err = tg3_setup_phy(tp, false); 10681 if (err) 10682 return err; 10683 10684 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10685 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 10686 u32 tmp; 10687 10688 /* Clear CRC stats. */ 10689 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { 10690 tg3_writephy(tp, MII_TG3_TEST1, 10691 tmp | MII_TG3_TEST1_CRC_EN); 10692 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); 10693 } 10694 } 10695 } 10696 10697 __tg3_set_rx_mode(tp->dev); 10698 10699 /* Initialize receive rules. */ 10700 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); 10701 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); 10702 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); 10703 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); 10704 10705 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) 10706 limit = 8; 10707 else 10708 limit = 16; 10709 if (tg3_flag(tp, ENABLE_ASF)) 10710 limit -= 4; 10711 switch (limit) { 10712 case 16: 10713 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); 10714 fallthrough; 10715 case 15: 10716 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); 10717 fallthrough; 10718 case 14: 10719 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); 10720 fallthrough; 10721 case 13: 10722 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); 10723 fallthrough; 10724 case 12: 10725 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); 10726 fallthrough; 10727 case 11: 10728 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); 10729 fallthrough; 10730 case 10: 10731 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); 10732 fallthrough; 10733 case 9: 10734 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); 10735 fallthrough; 10736 case 8: 10737 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); 10738 fallthrough; 10739 case 7: 10740 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); 10741 fallthrough; 10742 case 6: 10743 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); 10744 fallthrough; 10745 case 5: 10746 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); 10747 fallthrough; 10748 case 4: 10749 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ 10750 case 3: 10751 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ 10752 case 2: 10753 case 1: 10754 10755 default: 10756 break; 10757 } 10758 10759 if (tg3_flag(tp, ENABLE_APE)) 10760 /* Write our heartbeat update interval to APE. */ 10761 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, 10762 APE_HOST_HEARTBEAT_INT_5SEC); 10763 10764 tg3_write_sig_post_reset(tp, RESET_KIND_INIT); 10765 10766 return 0; 10767 } 10768 10769 /* Called at device open time to get the chip ready for 10770 * packet processing. Invoked with tp->lock held. 10771 */ 10772 static int tg3_init_hw(struct tg3 *tp, bool reset_phy) 10773 { 10774 /* Chip may have been just powered on. If so, the boot code may still 10775 * be running initialization. Wait for it to finish to avoid races in 10776 * accessing the hardware. 10777 */ 10778 tg3_enable_register_access(tp); 10779 tg3_poll_fw(tp); 10780 10781 tg3_switch_clocks(tp); 10782 10783 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 10784 10785 return tg3_reset_hw(tp, reset_phy); 10786 } 10787 10788 #ifdef CONFIG_TIGON3_HWMON 10789 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) 10790 { 10791 u32 off, len = TG3_OCIR_LEN; 10792 int i; 10793 10794 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) { 10795 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len); 10796 10797 if (ocir->signature != TG3_OCIR_SIG_MAGIC || 10798 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE)) 10799 memset(ocir, 0, len); 10800 } 10801 } 10802 10803 /* sysfs attributes for hwmon */ 10804 static ssize_t tg3_show_temp(struct device *dev, 10805 struct device_attribute *devattr, char *buf) 10806 { 10807 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 10808 struct tg3 *tp = dev_get_drvdata(dev); 10809 u32 temperature; 10810 10811 spin_lock_bh(&tp->lock); 10812 tg3_ape_scratchpad_read(tp, &temperature, attr->index, 10813 sizeof(temperature)); 10814 spin_unlock_bh(&tp->lock); 10815 return sprintf(buf, "%u\n", temperature * 1000); 10816 } 10817 10818 10819 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL, 10820 TG3_TEMP_SENSOR_OFFSET); 10821 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL, 10822 TG3_TEMP_CAUTION_OFFSET); 10823 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL, 10824 TG3_TEMP_MAX_OFFSET); 10825 10826 static struct attribute *tg3_attrs[] = { 10827 &sensor_dev_attr_temp1_input.dev_attr.attr, 10828 &sensor_dev_attr_temp1_crit.dev_attr.attr, 10829 &sensor_dev_attr_temp1_max.dev_attr.attr, 10830 NULL 10831 }; 10832 ATTRIBUTE_GROUPS(tg3); 10833 10834 static void tg3_hwmon_close(struct tg3 *tp) 10835 { 10836 if (tp->hwmon_dev) { 10837 hwmon_device_unregister(tp->hwmon_dev); 10838 tp->hwmon_dev = NULL; 10839 } 10840 } 10841 10842 static void tg3_hwmon_open(struct tg3 *tp) 10843 { 10844 int i; 10845 u32 size = 0; 10846 struct pci_dev *pdev = tp->pdev; 10847 struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; 10848 10849 tg3_sd_scan_scratchpad(tp, ocirs); 10850 10851 for (i = 0; i < TG3_SD_NUM_RECS; i++) { 10852 if (!ocirs[i].src_data_length) 10853 continue; 10854 10855 size += ocirs[i].src_hdr_length; 10856 size += ocirs[i].src_data_length; 10857 } 10858 10859 if (!size) 10860 return; 10861 10862 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", 10863 tp, tg3_groups); 10864 if (IS_ERR(tp->hwmon_dev)) { 10865 tp->hwmon_dev = NULL; 10866 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); 10867 } 10868 } 10869 #else 10870 static inline void tg3_hwmon_close(struct tg3 *tp) { } 10871 static inline void tg3_hwmon_open(struct tg3 *tp) { } 10872 #endif /* CONFIG_TIGON3_HWMON */ 10873 10874 10875 #define TG3_STAT_ADD32(PSTAT, REG) \ 10876 do { u32 __val = tr32(REG); \ 10877 (PSTAT)->low += __val; \ 10878 if ((PSTAT)->low < __val) \ 10879 (PSTAT)->high += 1; \ 10880 } while (0) 10881 10882 static void tg3_periodic_fetch_stats(struct tg3 *tp) 10883 { 10884 struct tg3_hw_stats *sp = tp->hw_stats; 10885 10886 if (!tp->link_up) 10887 return; 10888 10889 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); 10890 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); 10891 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); 10892 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); 10893 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); 10894 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); 10895 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); 10896 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); 10897 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); 10898 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); 10899 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); 10900 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); 10901 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); 10902 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) && 10903 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low + 10904 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) { 10905 u32 val; 10906 10907 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10908 val &= ~tg3_lso_rd_dma_workaround_bit(tp); 10909 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10910 tg3_flag_clear(tp, 5719_5720_RDMA_BUG); 10911 } 10912 10913 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); 10914 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); 10915 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); 10916 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); 10917 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); 10918 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); 10919 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); 10920 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); 10921 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); 10922 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); 10923 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); 10924 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); 10925 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); 10926 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); 10927 10928 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); 10929 if (tg3_asic_rev(tp) != ASIC_REV_5717 && 10930 tg3_asic_rev(tp) != ASIC_REV_5762 && 10931 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 && 10932 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) { 10933 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); 10934 } else { 10935 u32 val = tr32(HOSTCC_FLOW_ATTN); 10936 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; 10937 if (val) { 10938 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); 10939 sp->rx_discards.low += val; 10940 if (sp->rx_discards.low < val) 10941 sp->rx_discards.high += 1; 10942 } 10943 sp->mbuf_lwm_thresh_hit = sp->rx_discards; 10944 } 10945 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); 10946 } 10947 10948 static void tg3_chk_missed_msi(struct tg3 *tp) 10949 { 10950 u32 i; 10951 10952 for (i = 0; i < tp->irq_cnt; i++) { 10953 struct tg3_napi *tnapi = &tp->napi[i]; 10954 10955 if (tg3_has_work(tnapi)) { 10956 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && 10957 tnapi->last_tx_cons == tnapi->tx_cons) { 10958 if (tnapi->chk_msi_cnt < 1) { 10959 tnapi->chk_msi_cnt++; 10960 return; 10961 } 10962 tg3_msi(0, tnapi); 10963 } 10964 } 10965 tnapi->chk_msi_cnt = 0; 10966 tnapi->last_rx_cons = tnapi->rx_rcb_ptr; 10967 tnapi->last_tx_cons = tnapi->tx_cons; 10968 } 10969 } 10970 10971 static void tg3_timer(struct timer_list *t) 10972 { 10973 struct tg3 *tp = from_timer(tp, t, timer); 10974 10975 spin_lock(&tp->lock); 10976 10977 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) { 10978 spin_unlock(&tp->lock); 10979 goto restart_timer; 10980 } 10981 10982 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10983 tg3_flag(tp, 57765_CLASS)) 10984 tg3_chk_missed_msi(tp); 10985 10986 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 10987 /* BCM4785: Flush posted writes from GbE to host memory. */ 10988 tr32(HOSTCC_MODE); 10989 } 10990 10991 if (!tg3_flag(tp, TAGGED_STATUS)) { 10992 /* All of this garbage is because when using non-tagged 10993 * IRQ status the mailbox/status_block protocol the chip 10994 * uses with the cpu is race prone. 10995 */ 10996 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { 10997 tw32(GRC_LOCAL_CTRL, 10998 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 10999 } else { 11000 tw32(HOSTCC_MODE, tp->coalesce_mode | 11001 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); 11002 } 11003 11004 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 11005 spin_unlock(&tp->lock); 11006 tg3_reset_task_schedule(tp); 11007 goto restart_timer; 11008 } 11009 } 11010 11011 /* This part only runs once per second. */ 11012 if (!--tp->timer_counter) { 11013 if (tg3_flag(tp, 5705_PLUS)) 11014 tg3_periodic_fetch_stats(tp); 11015 11016 if (tp->setlpicnt && !--tp->setlpicnt) 11017 tg3_phy_eee_enable(tp); 11018 11019 if (tg3_flag(tp, USE_LINKCHG_REG)) { 11020 u32 mac_stat; 11021 int phy_event; 11022 11023 mac_stat = tr32(MAC_STATUS); 11024 11025 phy_event = 0; 11026 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { 11027 if (mac_stat & MAC_STATUS_MI_INTERRUPT) 11028 phy_event = 1; 11029 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) 11030 phy_event = 1; 11031 11032 if (phy_event) 11033 tg3_setup_phy(tp, false); 11034 } else if (tg3_flag(tp, POLL_SERDES)) { 11035 u32 mac_stat = tr32(MAC_STATUS); 11036 int need_setup = 0; 11037 11038 if (tp->link_up && 11039 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { 11040 need_setup = 1; 11041 } 11042 if (!tp->link_up && 11043 (mac_stat & (MAC_STATUS_PCS_SYNCED | 11044 MAC_STATUS_SIGNAL_DET))) { 11045 need_setup = 1; 11046 } 11047 if (need_setup) { 11048 if (!tp->serdes_counter) { 11049 tw32_f(MAC_MODE, 11050 (tp->mac_mode & 11051 ~MAC_MODE_PORT_MODE_MASK)); 11052 udelay(40); 11053 tw32_f(MAC_MODE, tp->mac_mode); 11054 udelay(40); 11055 } 11056 tg3_setup_phy(tp, false); 11057 } 11058 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 11059 tg3_flag(tp, 5780_CLASS)) { 11060 tg3_serdes_parallel_detect(tp); 11061 } else if (tg3_flag(tp, POLL_CPMU_LINK)) { 11062 u32 cpmu = tr32(TG3_CPMU_STATUS); 11063 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) == 11064 TG3_CPMU_STATUS_LINK_MASK); 11065 11066 if (link_up != tp->link_up) 11067 tg3_setup_phy(tp, false); 11068 } 11069 11070 tp->timer_counter = tp->timer_multiplier; 11071 } 11072 11073 /* Heartbeat is only sent once every 2 seconds. 11074 * 11075 * The heartbeat is to tell the ASF firmware that the host 11076 * driver is still alive. In the event that the OS crashes, 11077 * ASF needs to reset the hardware to free up the FIFO space 11078 * that may be filled with rx packets destined for the host. 11079 * If the FIFO is full, ASF will no longer function properly. 11080 * 11081 * Unintended resets have been reported on real time kernels 11082 * where the timer doesn't run on time. Netpoll will also have 11083 * same problem. 11084 * 11085 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware 11086 * to check the ring condition when the heartbeat is expiring 11087 * before doing the reset. This will prevent most unintended 11088 * resets. 11089 */ 11090 if (!--tp->asf_counter) { 11091 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 11092 tg3_wait_for_event_ack(tp); 11093 11094 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, 11095 FWCMD_NICDRV_ALIVE3); 11096 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 11097 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 11098 TG3_FW_UPDATE_TIMEOUT_SEC); 11099 11100 tg3_generate_fw_event(tp); 11101 } 11102 tp->asf_counter = tp->asf_multiplier; 11103 } 11104 11105 /* Update the APE heartbeat every 5 seconds.*/ 11106 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL); 11107 11108 spin_unlock(&tp->lock); 11109 11110 restart_timer: 11111 tp->timer.expires = jiffies + tp->timer_offset; 11112 add_timer(&tp->timer); 11113 } 11114 11115 static void tg3_timer_init(struct tg3 *tp) 11116 { 11117 if (tg3_flag(tp, TAGGED_STATUS) && 11118 tg3_asic_rev(tp) != ASIC_REV_5717 && 11119 !tg3_flag(tp, 57765_CLASS)) 11120 tp->timer_offset = HZ; 11121 else 11122 tp->timer_offset = HZ / 10; 11123 11124 BUG_ON(tp->timer_offset > HZ); 11125 11126 tp->timer_multiplier = (HZ / tp->timer_offset); 11127 tp->asf_multiplier = (HZ / tp->timer_offset) * 11128 TG3_FW_UPDATE_FREQ_SEC; 11129 11130 timer_setup(&tp->timer, tg3_timer, 0); 11131 } 11132 11133 static void tg3_timer_start(struct tg3 *tp) 11134 { 11135 tp->asf_counter = tp->asf_multiplier; 11136 tp->timer_counter = tp->timer_multiplier; 11137 11138 tp->timer.expires = jiffies + tp->timer_offset; 11139 add_timer(&tp->timer); 11140 } 11141 11142 static void tg3_timer_stop(struct tg3 *tp) 11143 { 11144 del_timer_sync(&tp->timer); 11145 } 11146 11147 /* Restart hardware after configuration changes, self-test, etc. 11148 * Invoked with tp->lock held. 11149 */ 11150 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy) 11151 __releases(tp->lock) 11152 __acquires(tp->lock) 11153 { 11154 int err; 11155 11156 err = tg3_init_hw(tp, reset_phy); 11157 if (err) { 11158 netdev_err(tp->dev, 11159 "Failed to re-initialize device, aborting\n"); 11160 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11161 tg3_full_unlock(tp); 11162 tg3_timer_stop(tp); 11163 tp->irq_sync = 0; 11164 tg3_napi_enable(tp); 11165 dev_close(tp->dev); 11166 tg3_full_lock(tp, 0); 11167 } 11168 return err; 11169 } 11170 11171 static void tg3_reset_task(struct work_struct *work) 11172 { 11173 struct tg3 *tp = container_of(work, struct tg3, reset_task); 11174 int err; 11175 11176 rtnl_lock(); 11177 tg3_full_lock(tp, 0); 11178 11179 if (!netif_running(tp->dev)) { 11180 tg3_flag_clear(tp, RESET_TASK_PENDING); 11181 tg3_full_unlock(tp); 11182 rtnl_unlock(); 11183 return; 11184 } 11185 11186 tg3_full_unlock(tp); 11187 11188 tg3_phy_stop(tp); 11189 11190 tg3_netif_stop(tp); 11191 11192 tg3_full_lock(tp, 1); 11193 11194 if (tg3_flag(tp, TX_RECOVERY_PENDING)) { 11195 tp->write32_tx_mbox = tg3_write32_tx_mbox; 11196 tp->write32_rx_mbox = tg3_write_flush_reg32; 11197 tg3_flag_set(tp, MBOX_WRITE_REORDER); 11198 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 11199 } 11200 11201 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 11202 err = tg3_init_hw(tp, true); 11203 if (err) { 11204 tg3_full_unlock(tp); 11205 tp->irq_sync = 0; 11206 tg3_napi_enable(tp); 11207 /* Clear this flag so that tg3_reset_task_cancel() will not 11208 * call cancel_work_sync() and wait forever. 11209 */ 11210 tg3_flag_clear(tp, RESET_TASK_PENDING); 11211 dev_close(tp->dev); 11212 goto out; 11213 } 11214 11215 tg3_netif_start(tp); 11216 tg3_full_unlock(tp); 11217 tg3_phy_start(tp); 11218 tg3_flag_clear(tp, RESET_TASK_PENDING); 11219 out: 11220 rtnl_unlock(); 11221 } 11222 11223 static int tg3_request_irq(struct tg3 *tp, int irq_num) 11224 { 11225 irq_handler_t fn; 11226 unsigned long flags; 11227 char *name; 11228 struct tg3_napi *tnapi = &tp->napi[irq_num]; 11229 11230 if (tp->irq_cnt == 1) 11231 name = tp->dev->name; 11232 else { 11233 name = &tnapi->irq_lbl[0]; 11234 if (tnapi->tx_buffers && tnapi->rx_rcb) 11235 snprintf(name, IFNAMSIZ, 11236 "%s-txrx-%d", tp->dev->name, irq_num); 11237 else if (tnapi->tx_buffers) 11238 snprintf(name, IFNAMSIZ, 11239 "%s-tx-%d", tp->dev->name, irq_num); 11240 else if (tnapi->rx_rcb) 11241 snprintf(name, IFNAMSIZ, 11242 "%s-rx-%d", tp->dev->name, irq_num); 11243 else 11244 snprintf(name, IFNAMSIZ, 11245 "%s-%d", tp->dev->name, irq_num); 11246 name[IFNAMSIZ-1] = 0; 11247 } 11248 11249 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11250 fn = tg3_msi; 11251 if (tg3_flag(tp, 1SHOT_MSI)) 11252 fn = tg3_msi_1shot; 11253 flags = 0; 11254 } else { 11255 fn = tg3_interrupt; 11256 if (tg3_flag(tp, TAGGED_STATUS)) 11257 fn = tg3_interrupt_tagged; 11258 flags = IRQF_SHARED; 11259 } 11260 11261 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); 11262 } 11263 11264 static int tg3_test_interrupt(struct tg3 *tp) 11265 { 11266 struct tg3_napi *tnapi = &tp->napi[0]; 11267 struct net_device *dev = tp->dev; 11268 int err, i, intr_ok = 0; 11269 u32 val; 11270 11271 if (!netif_running(dev)) 11272 return -ENODEV; 11273 11274 tg3_disable_ints(tp); 11275 11276 free_irq(tnapi->irq_vec, tnapi); 11277 11278 /* 11279 * Turn off MSI one shot mode. Otherwise this test has no 11280 * observable way to know whether the interrupt was delivered. 11281 */ 11282 if (tg3_flag(tp, 57765_PLUS)) { 11283 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; 11284 tw32(MSGINT_MODE, val); 11285 } 11286 11287 err = request_irq(tnapi->irq_vec, tg3_test_isr, 11288 IRQF_SHARED, dev->name, tnapi); 11289 if (err) 11290 return err; 11291 11292 tnapi->hw_status->status &= ~SD_STATUS_UPDATED; 11293 tg3_enable_ints(tp); 11294 11295 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 11296 tnapi->coal_now); 11297 11298 for (i = 0; i < 5; i++) { 11299 u32 int_mbox, misc_host_ctrl; 11300 11301 int_mbox = tr32_mailbox(tnapi->int_mbox); 11302 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 11303 11304 if ((int_mbox != 0) || 11305 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { 11306 intr_ok = 1; 11307 break; 11308 } 11309 11310 if (tg3_flag(tp, 57765_PLUS) && 11311 tnapi->hw_status->status_tag != tnapi->last_tag) 11312 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 11313 11314 msleep(10); 11315 } 11316 11317 tg3_disable_ints(tp); 11318 11319 free_irq(tnapi->irq_vec, tnapi); 11320 11321 err = tg3_request_irq(tp, 0); 11322 11323 if (err) 11324 return err; 11325 11326 if (intr_ok) { 11327 /* Reenable MSI one shot mode. */ 11328 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) { 11329 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; 11330 tw32(MSGINT_MODE, val); 11331 } 11332 return 0; 11333 } 11334 11335 return -EIO; 11336 } 11337 11338 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is 11339 * successfully restored 11340 */ 11341 static int tg3_test_msi(struct tg3 *tp) 11342 { 11343 int err; 11344 u16 pci_cmd; 11345 11346 if (!tg3_flag(tp, USING_MSI)) 11347 return 0; 11348 11349 /* Turn off SERR reporting in case MSI terminates with Master 11350 * Abort. 11351 */ 11352 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 11353 pci_write_config_word(tp->pdev, PCI_COMMAND, 11354 pci_cmd & ~PCI_COMMAND_SERR); 11355 11356 err = tg3_test_interrupt(tp); 11357 11358 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 11359 11360 if (!err) 11361 return 0; 11362 11363 /* other failures */ 11364 if (err != -EIO) 11365 return err; 11366 11367 /* MSI test failed, go back to INTx mode */ 11368 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " 11369 "to INTx mode. Please report this failure to the PCI " 11370 "maintainer and include system chipset information\n"); 11371 11372 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11373 11374 pci_disable_msi(tp->pdev); 11375 11376 tg3_flag_clear(tp, USING_MSI); 11377 tp->napi[0].irq_vec = tp->pdev->irq; 11378 11379 err = tg3_request_irq(tp, 0); 11380 if (err) 11381 return err; 11382 11383 /* Need to reset the chip because the MSI cycle may have terminated 11384 * with Master Abort. 11385 */ 11386 tg3_full_lock(tp, 1); 11387 11388 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11389 err = tg3_init_hw(tp, true); 11390 11391 tg3_full_unlock(tp); 11392 11393 if (err) 11394 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11395 11396 return err; 11397 } 11398 11399 static int tg3_request_firmware(struct tg3 *tp) 11400 { 11401 const struct tg3_firmware_hdr *fw_hdr; 11402 11403 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { 11404 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", 11405 tp->fw_needed); 11406 return -ENOENT; 11407 } 11408 11409 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 11410 11411 /* Firmware blob starts with version numbers, followed by 11412 * start address and _full_ length including BSS sections 11413 * (which must be longer than the actual data, of course 11414 */ 11415 11416 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */ 11417 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) { 11418 netdev_err(tp->dev, "bogus length %d in \"%s\"\n", 11419 tp->fw_len, tp->fw_needed); 11420 release_firmware(tp->fw); 11421 tp->fw = NULL; 11422 return -EINVAL; 11423 } 11424 11425 /* We no longer need firmware; we have it. */ 11426 tp->fw_needed = NULL; 11427 return 0; 11428 } 11429 11430 static u32 tg3_irq_count(struct tg3 *tp) 11431 { 11432 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt); 11433 11434 if (irq_cnt > 1) { 11435 /* We want as many rx rings enabled as there are cpus. 11436 * In multiqueue MSI-X mode, the first MSI-X vector 11437 * only deals with link interrupts, etc, so we add 11438 * one to the number of vectors we are requesting. 11439 */ 11440 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max); 11441 } 11442 11443 return irq_cnt; 11444 } 11445 11446 static bool tg3_enable_msix(struct tg3 *tp) 11447 { 11448 int i, rc; 11449 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS]; 11450 11451 tp->txq_cnt = tp->txq_req; 11452 tp->rxq_cnt = tp->rxq_req; 11453 if (!tp->rxq_cnt) 11454 tp->rxq_cnt = netif_get_num_default_rss_queues(); 11455 if (tp->rxq_cnt > tp->rxq_max) 11456 tp->rxq_cnt = tp->rxq_max; 11457 11458 /* Disable multiple TX rings by default. Simple round-robin hardware 11459 * scheduling of the TX rings can cause starvation of rings with 11460 * small packets when other rings have TSO or jumbo packets. 11461 */ 11462 if (!tp->txq_req) 11463 tp->txq_cnt = 1; 11464 11465 tp->irq_cnt = tg3_irq_count(tp); 11466 11467 for (i = 0; i < tp->irq_max; i++) { 11468 msix_ent[i].entry = i; 11469 msix_ent[i].vector = 0; 11470 } 11471 11472 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt); 11473 if (rc < 0) { 11474 return false; 11475 } else if (rc < tp->irq_cnt) { 11476 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", 11477 tp->irq_cnt, rc); 11478 tp->irq_cnt = rc; 11479 tp->rxq_cnt = max(rc - 1, 1); 11480 if (tp->txq_cnt) 11481 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max); 11482 } 11483 11484 for (i = 0; i < tp->irq_max; i++) 11485 tp->napi[i].irq_vec = msix_ent[i].vector; 11486 11487 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) { 11488 pci_disable_msix(tp->pdev); 11489 return false; 11490 } 11491 11492 if (tp->irq_cnt == 1) 11493 return true; 11494 11495 tg3_flag_set(tp, ENABLE_RSS); 11496 11497 if (tp->txq_cnt > 1) 11498 tg3_flag_set(tp, ENABLE_TSS); 11499 11500 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt); 11501 11502 return true; 11503 } 11504 11505 static void tg3_ints_init(struct tg3 *tp) 11506 { 11507 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && 11508 !tg3_flag(tp, TAGGED_STATUS)) { 11509 /* All MSI supporting chips should support tagged 11510 * status. Assert that this is the case. 11511 */ 11512 netdev_warn(tp->dev, 11513 "MSI without TAGGED_STATUS? Not using MSI\n"); 11514 goto defcfg; 11515 } 11516 11517 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) 11518 tg3_flag_set(tp, USING_MSIX); 11519 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) 11520 tg3_flag_set(tp, USING_MSI); 11521 11522 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11523 u32 msi_mode = tr32(MSGINT_MODE); 11524 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) 11525 msi_mode |= MSGINT_MODE_MULTIVEC_EN; 11526 if (!tg3_flag(tp, 1SHOT_MSI)) 11527 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE; 11528 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); 11529 } 11530 defcfg: 11531 if (!tg3_flag(tp, USING_MSIX)) { 11532 tp->irq_cnt = 1; 11533 tp->napi[0].irq_vec = tp->pdev->irq; 11534 } 11535 11536 if (tp->irq_cnt == 1) { 11537 tp->txq_cnt = 1; 11538 tp->rxq_cnt = 1; 11539 netif_set_real_num_tx_queues(tp->dev, 1); 11540 netif_set_real_num_rx_queues(tp->dev, 1); 11541 } 11542 } 11543 11544 static void tg3_ints_fini(struct tg3 *tp) 11545 { 11546 if (tg3_flag(tp, USING_MSIX)) 11547 pci_disable_msix(tp->pdev); 11548 else if (tg3_flag(tp, USING_MSI)) 11549 pci_disable_msi(tp->pdev); 11550 tg3_flag_clear(tp, USING_MSI); 11551 tg3_flag_clear(tp, USING_MSIX); 11552 tg3_flag_clear(tp, ENABLE_RSS); 11553 tg3_flag_clear(tp, ENABLE_TSS); 11554 } 11555 11556 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, 11557 bool init) 11558 { 11559 struct net_device *dev = tp->dev; 11560 int i, err; 11561 11562 /* 11563 * Setup interrupts first so we know how 11564 * many NAPI resources to allocate 11565 */ 11566 tg3_ints_init(tp); 11567 11568 tg3_rss_check_indir_tbl(tp); 11569 11570 /* The placement of this call is tied 11571 * to the setup and use of Host TX descriptors. 11572 */ 11573 err = tg3_alloc_consistent(tp); 11574 if (err) 11575 goto out_ints_fini; 11576 11577 tg3_napi_init(tp); 11578 11579 tg3_napi_enable(tp); 11580 11581 for (i = 0; i < tp->irq_cnt; i++) { 11582 err = tg3_request_irq(tp, i); 11583 if (err) { 11584 for (i--; i >= 0; i--) { 11585 struct tg3_napi *tnapi = &tp->napi[i]; 11586 11587 free_irq(tnapi->irq_vec, tnapi); 11588 } 11589 goto out_napi_fini; 11590 } 11591 } 11592 11593 tg3_full_lock(tp, 0); 11594 11595 if (init) 11596 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 11597 11598 err = tg3_init_hw(tp, reset_phy); 11599 if (err) { 11600 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11601 tg3_free_rings(tp); 11602 } 11603 11604 tg3_full_unlock(tp); 11605 11606 if (err) 11607 goto out_free_irq; 11608 11609 if (test_irq && tg3_flag(tp, USING_MSI)) { 11610 err = tg3_test_msi(tp); 11611 11612 if (err) { 11613 tg3_full_lock(tp, 0); 11614 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11615 tg3_free_rings(tp); 11616 tg3_full_unlock(tp); 11617 11618 goto out_napi_fini; 11619 } 11620 11621 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { 11622 u32 val = tr32(PCIE_TRANSACTION_CFG); 11623 11624 tw32(PCIE_TRANSACTION_CFG, 11625 val | PCIE_TRANS_CFG_1SHOT_MSI); 11626 } 11627 } 11628 11629 tg3_phy_start(tp); 11630 11631 tg3_hwmon_open(tp); 11632 11633 tg3_full_lock(tp, 0); 11634 11635 tg3_timer_start(tp); 11636 tg3_flag_set(tp, INIT_COMPLETE); 11637 tg3_enable_ints(tp); 11638 11639 tg3_ptp_resume(tp); 11640 11641 tg3_full_unlock(tp); 11642 11643 netif_tx_start_all_queues(dev); 11644 11645 /* 11646 * Reset loopback feature if it was turned on while the device was down 11647 * make sure that it's installed properly now. 11648 */ 11649 if (dev->features & NETIF_F_LOOPBACK) 11650 tg3_set_loopback(dev, dev->features); 11651 11652 return 0; 11653 11654 out_free_irq: 11655 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11656 struct tg3_napi *tnapi = &tp->napi[i]; 11657 free_irq(tnapi->irq_vec, tnapi); 11658 } 11659 11660 out_napi_fini: 11661 tg3_napi_disable(tp); 11662 tg3_napi_fini(tp); 11663 tg3_free_consistent(tp); 11664 11665 out_ints_fini: 11666 tg3_ints_fini(tp); 11667 11668 return err; 11669 } 11670 11671 static void tg3_stop(struct tg3 *tp) 11672 { 11673 int i; 11674 11675 tg3_reset_task_cancel(tp); 11676 tg3_netif_stop(tp); 11677 11678 tg3_timer_stop(tp); 11679 11680 tg3_hwmon_close(tp); 11681 11682 tg3_phy_stop(tp); 11683 11684 tg3_full_lock(tp, 1); 11685 11686 tg3_disable_ints(tp); 11687 11688 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11689 tg3_free_rings(tp); 11690 tg3_flag_clear(tp, INIT_COMPLETE); 11691 11692 tg3_full_unlock(tp); 11693 11694 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11695 struct tg3_napi *tnapi = &tp->napi[i]; 11696 free_irq(tnapi->irq_vec, tnapi); 11697 } 11698 11699 tg3_ints_fini(tp); 11700 11701 tg3_napi_fini(tp); 11702 11703 tg3_free_consistent(tp); 11704 } 11705 11706 static int tg3_open(struct net_device *dev) 11707 { 11708 struct tg3 *tp = netdev_priv(dev); 11709 int err; 11710 11711 if (tp->pcierr_recovery) { 11712 netdev_err(dev, "Failed to open device. PCI error recovery " 11713 "in progress\n"); 11714 return -EAGAIN; 11715 } 11716 11717 if (tp->fw_needed) { 11718 err = tg3_request_firmware(tp); 11719 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 11720 if (err) { 11721 netdev_warn(tp->dev, "EEE capability disabled\n"); 11722 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 11723 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 11724 netdev_warn(tp->dev, "EEE capability restored\n"); 11725 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 11726 } 11727 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 11728 if (err) 11729 return err; 11730 } else if (err) { 11731 netdev_warn(tp->dev, "TSO capability disabled\n"); 11732 tg3_flag_clear(tp, TSO_CAPABLE); 11733 } else if (!tg3_flag(tp, TSO_CAPABLE)) { 11734 netdev_notice(tp->dev, "TSO capability restored\n"); 11735 tg3_flag_set(tp, TSO_CAPABLE); 11736 } 11737 } 11738 11739 tg3_carrier_off(tp); 11740 11741 err = tg3_power_up(tp); 11742 if (err) 11743 return err; 11744 11745 tg3_full_lock(tp, 0); 11746 11747 tg3_disable_ints(tp); 11748 tg3_flag_clear(tp, INIT_COMPLETE); 11749 11750 tg3_full_unlock(tp); 11751 11752 err = tg3_start(tp, 11753 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN), 11754 true, true); 11755 if (err) { 11756 tg3_frob_aux_power(tp, false); 11757 pci_set_power_state(tp->pdev, PCI_D3hot); 11758 } 11759 11760 return err; 11761 } 11762 11763 static int tg3_close(struct net_device *dev) 11764 { 11765 struct tg3 *tp = netdev_priv(dev); 11766 11767 if (tp->pcierr_recovery) { 11768 netdev_err(dev, "Failed to close device. PCI error recovery " 11769 "in progress\n"); 11770 return -EAGAIN; 11771 } 11772 11773 tg3_stop(tp); 11774 11775 if (pci_device_is_present(tp->pdev)) { 11776 tg3_power_down_prepare(tp); 11777 11778 tg3_carrier_off(tp); 11779 } 11780 return 0; 11781 } 11782 11783 static inline u64 get_stat64(tg3_stat64_t *val) 11784 { 11785 return ((u64)val->high << 32) | ((u64)val->low); 11786 } 11787 11788 static u64 tg3_calc_crc_errors(struct tg3 *tp) 11789 { 11790 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11791 11792 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 11793 (tg3_asic_rev(tp) == ASIC_REV_5700 || 11794 tg3_asic_rev(tp) == ASIC_REV_5701)) { 11795 u32 val; 11796 11797 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { 11798 tg3_writephy(tp, MII_TG3_TEST1, 11799 val | MII_TG3_TEST1_CRC_EN); 11800 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); 11801 } else 11802 val = 0; 11803 11804 tp->phy_crc_errors += val; 11805 11806 return tp->phy_crc_errors; 11807 } 11808 11809 return get_stat64(&hw_stats->rx_fcs_errors); 11810 } 11811 11812 #define ESTAT_ADD(member) \ 11813 estats->member = old_estats->member + \ 11814 get_stat64(&hw_stats->member) 11815 11816 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats) 11817 { 11818 struct tg3_ethtool_stats *old_estats = &tp->estats_prev; 11819 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11820 11821 ESTAT_ADD(rx_octets); 11822 ESTAT_ADD(rx_fragments); 11823 ESTAT_ADD(rx_ucast_packets); 11824 ESTAT_ADD(rx_mcast_packets); 11825 ESTAT_ADD(rx_bcast_packets); 11826 ESTAT_ADD(rx_fcs_errors); 11827 ESTAT_ADD(rx_align_errors); 11828 ESTAT_ADD(rx_xon_pause_rcvd); 11829 ESTAT_ADD(rx_xoff_pause_rcvd); 11830 ESTAT_ADD(rx_mac_ctrl_rcvd); 11831 ESTAT_ADD(rx_xoff_entered); 11832 ESTAT_ADD(rx_frame_too_long_errors); 11833 ESTAT_ADD(rx_jabbers); 11834 ESTAT_ADD(rx_undersize_packets); 11835 ESTAT_ADD(rx_in_length_errors); 11836 ESTAT_ADD(rx_out_length_errors); 11837 ESTAT_ADD(rx_64_or_less_octet_packets); 11838 ESTAT_ADD(rx_65_to_127_octet_packets); 11839 ESTAT_ADD(rx_128_to_255_octet_packets); 11840 ESTAT_ADD(rx_256_to_511_octet_packets); 11841 ESTAT_ADD(rx_512_to_1023_octet_packets); 11842 ESTAT_ADD(rx_1024_to_1522_octet_packets); 11843 ESTAT_ADD(rx_1523_to_2047_octet_packets); 11844 ESTAT_ADD(rx_2048_to_4095_octet_packets); 11845 ESTAT_ADD(rx_4096_to_8191_octet_packets); 11846 ESTAT_ADD(rx_8192_to_9022_octet_packets); 11847 11848 ESTAT_ADD(tx_octets); 11849 ESTAT_ADD(tx_collisions); 11850 ESTAT_ADD(tx_xon_sent); 11851 ESTAT_ADD(tx_xoff_sent); 11852 ESTAT_ADD(tx_flow_control); 11853 ESTAT_ADD(tx_mac_errors); 11854 ESTAT_ADD(tx_single_collisions); 11855 ESTAT_ADD(tx_mult_collisions); 11856 ESTAT_ADD(tx_deferred); 11857 ESTAT_ADD(tx_excessive_collisions); 11858 ESTAT_ADD(tx_late_collisions); 11859 ESTAT_ADD(tx_collide_2times); 11860 ESTAT_ADD(tx_collide_3times); 11861 ESTAT_ADD(tx_collide_4times); 11862 ESTAT_ADD(tx_collide_5times); 11863 ESTAT_ADD(tx_collide_6times); 11864 ESTAT_ADD(tx_collide_7times); 11865 ESTAT_ADD(tx_collide_8times); 11866 ESTAT_ADD(tx_collide_9times); 11867 ESTAT_ADD(tx_collide_10times); 11868 ESTAT_ADD(tx_collide_11times); 11869 ESTAT_ADD(tx_collide_12times); 11870 ESTAT_ADD(tx_collide_13times); 11871 ESTAT_ADD(tx_collide_14times); 11872 ESTAT_ADD(tx_collide_15times); 11873 ESTAT_ADD(tx_ucast_packets); 11874 ESTAT_ADD(tx_mcast_packets); 11875 ESTAT_ADD(tx_bcast_packets); 11876 ESTAT_ADD(tx_carrier_sense_errors); 11877 ESTAT_ADD(tx_discards); 11878 ESTAT_ADD(tx_errors); 11879 11880 ESTAT_ADD(dma_writeq_full); 11881 ESTAT_ADD(dma_write_prioq_full); 11882 ESTAT_ADD(rxbds_empty); 11883 ESTAT_ADD(rx_discards); 11884 ESTAT_ADD(rx_errors); 11885 ESTAT_ADD(rx_threshold_hit); 11886 11887 ESTAT_ADD(dma_readq_full); 11888 ESTAT_ADD(dma_read_prioq_full); 11889 ESTAT_ADD(tx_comp_queue_full); 11890 11891 ESTAT_ADD(ring_set_send_prod_index); 11892 ESTAT_ADD(ring_status_update); 11893 ESTAT_ADD(nic_irqs); 11894 ESTAT_ADD(nic_avoided_irqs); 11895 ESTAT_ADD(nic_tx_threshold_hit); 11896 11897 ESTAT_ADD(mbuf_lwm_thresh_hit); 11898 } 11899 11900 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) 11901 { 11902 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; 11903 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11904 11905 stats->rx_packets = old_stats->rx_packets + 11906 get_stat64(&hw_stats->rx_ucast_packets) + 11907 get_stat64(&hw_stats->rx_mcast_packets) + 11908 get_stat64(&hw_stats->rx_bcast_packets); 11909 11910 stats->tx_packets = old_stats->tx_packets + 11911 get_stat64(&hw_stats->tx_ucast_packets) + 11912 get_stat64(&hw_stats->tx_mcast_packets) + 11913 get_stat64(&hw_stats->tx_bcast_packets); 11914 11915 stats->rx_bytes = old_stats->rx_bytes + 11916 get_stat64(&hw_stats->rx_octets); 11917 stats->tx_bytes = old_stats->tx_bytes + 11918 get_stat64(&hw_stats->tx_octets); 11919 11920 stats->rx_errors = old_stats->rx_errors + 11921 get_stat64(&hw_stats->rx_errors); 11922 stats->tx_errors = old_stats->tx_errors + 11923 get_stat64(&hw_stats->tx_errors) + 11924 get_stat64(&hw_stats->tx_mac_errors) + 11925 get_stat64(&hw_stats->tx_carrier_sense_errors) + 11926 get_stat64(&hw_stats->tx_discards); 11927 11928 stats->multicast = old_stats->multicast + 11929 get_stat64(&hw_stats->rx_mcast_packets); 11930 stats->collisions = old_stats->collisions + 11931 get_stat64(&hw_stats->tx_collisions); 11932 11933 stats->rx_length_errors = old_stats->rx_length_errors + 11934 get_stat64(&hw_stats->rx_frame_too_long_errors) + 11935 get_stat64(&hw_stats->rx_undersize_packets); 11936 11937 stats->rx_frame_errors = old_stats->rx_frame_errors + 11938 get_stat64(&hw_stats->rx_align_errors); 11939 stats->tx_aborted_errors = old_stats->tx_aborted_errors + 11940 get_stat64(&hw_stats->tx_discards); 11941 stats->tx_carrier_errors = old_stats->tx_carrier_errors + 11942 get_stat64(&hw_stats->tx_carrier_sense_errors); 11943 11944 stats->rx_crc_errors = old_stats->rx_crc_errors + 11945 tg3_calc_crc_errors(tp); 11946 11947 stats->rx_missed_errors = old_stats->rx_missed_errors + 11948 get_stat64(&hw_stats->rx_discards); 11949 11950 stats->rx_dropped = tp->rx_dropped; 11951 stats->tx_dropped = tp->tx_dropped; 11952 } 11953 11954 static int tg3_get_regs_len(struct net_device *dev) 11955 { 11956 return TG3_REG_BLK_SIZE; 11957 } 11958 11959 static void tg3_get_regs(struct net_device *dev, 11960 struct ethtool_regs *regs, void *_p) 11961 { 11962 struct tg3 *tp = netdev_priv(dev); 11963 11964 regs->version = 0; 11965 11966 memset(_p, 0, TG3_REG_BLK_SIZE); 11967 11968 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11969 return; 11970 11971 tg3_full_lock(tp, 0); 11972 11973 tg3_dump_legacy_regs(tp, (u32 *)_p); 11974 11975 tg3_full_unlock(tp); 11976 } 11977 11978 static int tg3_get_eeprom_len(struct net_device *dev) 11979 { 11980 struct tg3 *tp = netdev_priv(dev); 11981 11982 return tp->nvram_size; 11983 } 11984 11985 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 11986 { 11987 struct tg3 *tp = netdev_priv(dev); 11988 int ret, cpmu_restore = 0; 11989 u8 *pd; 11990 u32 i, offset, len, b_offset, b_count, cpmu_val = 0; 11991 __be32 val; 11992 11993 if (tg3_flag(tp, NO_NVRAM)) 11994 return -EINVAL; 11995 11996 offset = eeprom->offset; 11997 len = eeprom->len; 11998 eeprom->len = 0; 11999 12000 eeprom->magic = TG3_EEPROM_MAGIC; 12001 12002 /* Override clock, link aware and link idle modes */ 12003 if (tg3_flag(tp, CPMU_PRESENT)) { 12004 cpmu_val = tr32(TG3_CPMU_CTRL); 12005 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE | 12006 CPMU_CTRL_LINK_IDLE_MODE)) { 12007 tw32(TG3_CPMU_CTRL, cpmu_val & 12008 ~(CPMU_CTRL_LINK_AWARE_MODE | 12009 CPMU_CTRL_LINK_IDLE_MODE)); 12010 cpmu_restore = 1; 12011 } 12012 } 12013 tg3_override_clk(tp); 12014 12015 if (offset & 3) { 12016 /* adjustments to start on required 4 byte boundary */ 12017 b_offset = offset & 3; 12018 b_count = 4 - b_offset; 12019 if (b_count > len) { 12020 /* i.e. offset=1 len=2 */ 12021 b_count = len; 12022 } 12023 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); 12024 if (ret) 12025 goto eeprom_done; 12026 memcpy(data, ((char *)&val) + b_offset, b_count); 12027 len -= b_count; 12028 offset += b_count; 12029 eeprom->len += b_count; 12030 } 12031 12032 /* read bytes up to the last 4 byte boundary */ 12033 pd = &data[eeprom->len]; 12034 for (i = 0; i < (len - (len & 3)); i += 4) { 12035 ret = tg3_nvram_read_be32(tp, offset + i, &val); 12036 if (ret) { 12037 if (i) 12038 i -= 4; 12039 eeprom->len += i; 12040 goto eeprom_done; 12041 } 12042 memcpy(pd + i, &val, 4); 12043 if (need_resched()) { 12044 if (signal_pending(current)) { 12045 eeprom->len += i; 12046 ret = -EINTR; 12047 goto eeprom_done; 12048 } 12049 cond_resched(); 12050 } 12051 } 12052 eeprom->len += i; 12053 12054 if (len & 3) { 12055 /* read last bytes not ending on 4 byte boundary */ 12056 pd = &data[eeprom->len]; 12057 b_count = len & 3; 12058 b_offset = offset + len - b_count; 12059 ret = tg3_nvram_read_be32(tp, b_offset, &val); 12060 if (ret) 12061 goto eeprom_done; 12062 memcpy(pd, &val, b_count); 12063 eeprom->len += b_count; 12064 } 12065 ret = 0; 12066 12067 eeprom_done: 12068 /* Restore clock, link aware and link idle modes */ 12069 tg3_restore_clk(tp); 12070 if (cpmu_restore) 12071 tw32(TG3_CPMU_CTRL, cpmu_val); 12072 12073 return ret; 12074 } 12075 12076 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 12077 { 12078 struct tg3 *tp = netdev_priv(dev); 12079 int ret; 12080 u32 offset, len, b_offset, odd_len; 12081 u8 *buf; 12082 __be32 start = 0, end; 12083 12084 if (tg3_flag(tp, NO_NVRAM) || 12085 eeprom->magic != TG3_EEPROM_MAGIC) 12086 return -EINVAL; 12087 12088 offset = eeprom->offset; 12089 len = eeprom->len; 12090 12091 if ((b_offset = (offset & 3))) { 12092 /* adjustments to start on required 4 byte boundary */ 12093 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); 12094 if (ret) 12095 return ret; 12096 len += b_offset; 12097 offset &= ~3; 12098 if (len < 4) 12099 len = 4; 12100 } 12101 12102 odd_len = 0; 12103 if (len & 3) { 12104 /* adjustments to end on required 4 byte boundary */ 12105 odd_len = 1; 12106 len = (len + 3) & ~3; 12107 ret = tg3_nvram_read_be32(tp, offset+len-4, &end); 12108 if (ret) 12109 return ret; 12110 } 12111 12112 buf = data; 12113 if (b_offset || odd_len) { 12114 buf = kmalloc(len, GFP_KERNEL); 12115 if (!buf) 12116 return -ENOMEM; 12117 if (b_offset) 12118 memcpy(buf, &start, 4); 12119 if (odd_len) 12120 memcpy(buf+len-4, &end, 4); 12121 memcpy(buf + b_offset, data, eeprom->len); 12122 } 12123 12124 ret = tg3_nvram_write_block(tp, offset, len, buf); 12125 12126 if (buf != data) 12127 kfree(buf); 12128 12129 return ret; 12130 } 12131 12132 static int tg3_get_link_ksettings(struct net_device *dev, 12133 struct ethtool_link_ksettings *cmd) 12134 { 12135 struct tg3 *tp = netdev_priv(dev); 12136 u32 supported, advertising; 12137 12138 if (tg3_flag(tp, USE_PHYLIB)) { 12139 struct phy_device *phydev; 12140 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12141 return -EAGAIN; 12142 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12143 phy_ethtool_ksettings_get(phydev, cmd); 12144 12145 return 0; 12146 } 12147 12148 supported = (SUPPORTED_Autoneg); 12149 12150 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12151 supported |= (SUPPORTED_1000baseT_Half | 12152 SUPPORTED_1000baseT_Full); 12153 12154 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12155 supported |= (SUPPORTED_100baseT_Half | 12156 SUPPORTED_100baseT_Full | 12157 SUPPORTED_10baseT_Half | 12158 SUPPORTED_10baseT_Full | 12159 SUPPORTED_TP); 12160 cmd->base.port = PORT_TP; 12161 } else { 12162 supported |= SUPPORTED_FIBRE; 12163 cmd->base.port = PORT_FIBRE; 12164 } 12165 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 12166 supported); 12167 12168 advertising = tp->link_config.advertising; 12169 if (tg3_flag(tp, PAUSE_AUTONEG)) { 12170 if (tp->link_config.flowctrl & FLOW_CTRL_RX) { 12171 if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12172 advertising |= ADVERTISED_Pause; 12173 } else { 12174 advertising |= ADVERTISED_Pause | 12175 ADVERTISED_Asym_Pause; 12176 } 12177 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12178 advertising |= ADVERTISED_Asym_Pause; 12179 } 12180 } 12181 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 12182 advertising); 12183 12184 if (netif_running(dev) && tp->link_up) { 12185 cmd->base.speed = tp->link_config.active_speed; 12186 cmd->base.duplex = tp->link_config.active_duplex; 12187 ethtool_convert_legacy_u32_to_link_mode( 12188 cmd->link_modes.lp_advertising, 12189 tp->link_config.rmt_adv); 12190 12191 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12192 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE) 12193 cmd->base.eth_tp_mdix = ETH_TP_MDI_X; 12194 else 12195 cmd->base.eth_tp_mdix = ETH_TP_MDI; 12196 } 12197 } else { 12198 cmd->base.speed = SPEED_UNKNOWN; 12199 cmd->base.duplex = DUPLEX_UNKNOWN; 12200 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; 12201 } 12202 cmd->base.phy_address = tp->phy_addr; 12203 cmd->base.autoneg = tp->link_config.autoneg; 12204 return 0; 12205 } 12206 12207 static int tg3_set_link_ksettings(struct net_device *dev, 12208 const struct ethtool_link_ksettings *cmd) 12209 { 12210 struct tg3 *tp = netdev_priv(dev); 12211 u32 speed = cmd->base.speed; 12212 u32 advertising; 12213 12214 if (tg3_flag(tp, USE_PHYLIB)) { 12215 struct phy_device *phydev; 12216 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12217 return -EAGAIN; 12218 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12219 return phy_ethtool_ksettings_set(phydev, cmd); 12220 } 12221 12222 if (cmd->base.autoneg != AUTONEG_ENABLE && 12223 cmd->base.autoneg != AUTONEG_DISABLE) 12224 return -EINVAL; 12225 12226 if (cmd->base.autoneg == AUTONEG_DISABLE && 12227 cmd->base.duplex != DUPLEX_FULL && 12228 cmd->base.duplex != DUPLEX_HALF) 12229 return -EINVAL; 12230 12231 ethtool_convert_link_mode_to_legacy_u32(&advertising, 12232 cmd->link_modes.advertising); 12233 12234 if (cmd->base.autoneg == AUTONEG_ENABLE) { 12235 u32 mask = ADVERTISED_Autoneg | 12236 ADVERTISED_Pause | 12237 ADVERTISED_Asym_Pause; 12238 12239 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12240 mask |= ADVERTISED_1000baseT_Half | 12241 ADVERTISED_1000baseT_Full; 12242 12243 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 12244 mask |= ADVERTISED_100baseT_Half | 12245 ADVERTISED_100baseT_Full | 12246 ADVERTISED_10baseT_Half | 12247 ADVERTISED_10baseT_Full | 12248 ADVERTISED_TP; 12249 else 12250 mask |= ADVERTISED_FIBRE; 12251 12252 if (advertising & ~mask) 12253 return -EINVAL; 12254 12255 mask &= (ADVERTISED_1000baseT_Half | 12256 ADVERTISED_1000baseT_Full | 12257 ADVERTISED_100baseT_Half | 12258 ADVERTISED_100baseT_Full | 12259 ADVERTISED_10baseT_Half | 12260 ADVERTISED_10baseT_Full); 12261 12262 advertising &= mask; 12263 } else { 12264 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { 12265 if (speed != SPEED_1000) 12266 return -EINVAL; 12267 12268 if (cmd->base.duplex != DUPLEX_FULL) 12269 return -EINVAL; 12270 } else { 12271 if (speed != SPEED_100 && 12272 speed != SPEED_10) 12273 return -EINVAL; 12274 } 12275 } 12276 12277 tg3_full_lock(tp, 0); 12278 12279 tp->link_config.autoneg = cmd->base.autoneg; 12280 if (cmd->base.autoneg == AUTONEG_ENABLE) { 12281 tp->link_config.advertising = (advertising | 12282 ADVERTISED_Autoneg); 12283 tp->link_config.speed = SPEED_UNKNOWN; 12284 tp->link_config.duplex = DUPLEX_UNKNOWN; 12285 } else { 12286 tp->link_config.advertising = 0; 12287 tp->link_config.speed = speed; 12288 tp->link_config.duplex = cmd->base.duplex; 12289 } 12290 12291 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12292 12293 tg3_warn_mgmt_link_flap(tp); 12294 12295 if (netif_running(dev)) 12296 tg3_setup_phy(tp, true); 12297 12298 tg3_full_unlock(tp); 12299 12300 return 0; 12301 } 12302 12303 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 12304 { 12305 struct tg3 *tp = netdev_priv(dev); 12306 12307 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 12308 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version)); 12309 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info)); 12310 } 12311 12312 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12313 { 12314 struct tg3 *tp = netdev_priv(dev); 12315 12316 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) 12317 wol->supported = WAKE_MAGIC; 12318 else 12319 wol->supported = 0; 12320 wol->wolopts = 0; 12321 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) 12322 wol->wolopts = WAKE_MAGIC; 12323 memset(&wol->sopass, 0, sizeof(wol->sopass)); 12324 } 12325 12326 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12327 { 12328 struct tg3 *tp = netdev_priv(dev); 12329 struct device *dp = &tp->pdev->dev; 12330 12331 if (wol->wolopts & ~WAKE_MAGIC) 12332 return -EINVAL; 12333 if ((wol->wolopts & WAKE_MAGIC) && 12334 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) 12335 return -EINVAL; 12336 12337 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); 12338 12339 if (device_may_wakeup(dp)) 12340 tg3_flag_set(tp, WOL_ENABLE); 12341 else 12342 tg3_flag_clear(tp, WOL_ENABLE); 12343 12344 return 0; 12345 } 12346 12347 static u32 tg3_get_msglevel(struct net_device *dev) 12348 { 12349 struct tg3 *tp = netdev_priv(dev); 12350 return tp->msg_enable; 12351 } 12352 12353 static void tg3_set_msglevel(struct net_device *dev, u32 value) 12354 { 12355 struct tg3 *tp = netdev_priv(dev); 12356 tp->msg_enable = value; 12357 } 12358 12359 static int tg3_nway_reset(struct net_device *dev) 12360 { 12361 struct tg3 *tp = netdev_priv(dev); 12362 int r; 12363 12364 if (!netif_running(dev)) 12365 return -EAGAIN; 12366 12367 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 12368 return -EINVAL; 12369 12370 tg3_warn_mgmt_link_flap(tp); 12371 12372 if (tg3_flag(tp, USE_PHYLIB)) { 12373 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12374 return -EAGAIN; 12375 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 12376 } else { 12377 u32 bmcr; 12378 12379 spin_lock_bh(&tp->lock); 12380 r = -EINVAL; 12381 tg3_readphy(tp, MII_BMCR, &bmcr); 12382 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 12383 ((bmcr & BMCR_ANENABLE) || 12384 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { 12385 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | 12386 BMCR_ANENABLE); 12387 r = 0; 12388 } 12389 spin_unlock_bh(&tp->lock); 12390 } 12391 12392 return r; 12393 } 12394 12395 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 12396 { 12397 struct tg3 *tp = netdev_priv(dev); 12398 12399 ering->rx_max_pending = tp->rx_std_ring_mask; 12400 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12401 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; 12402 else 12403 ering->rx_jumbo_max_pending = 0; 12404 12405 ering->tx_max_pending = TG3_TX_RING_SIZE - 1; 12406 12407 ering->rx_pending = tp->rx_pending; 12408 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12409 ering->rx_jumbo_pending = tp->rx_jumbo_pending; 12410 else 12411 ering->rx_jumbo_pending = 0; 12412 12413 ering->tx_pending = tp->napi[0].tx_pending; 12414 } 12415 12416 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 12417 { 12418 struct tg3 *tp = netdev_priv(dev); 12419 int i, irq_sync = 0, err = 0; 12420 bool reset_phy = false; 12421 12422 if ((ering->rx_pending > tp->rx_std_ring_mask) || 12423 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || 12424 (ering->tx_pending > TG3_TX_RING_SIZE - 1) || 12425 (ering->tx_pending <= MAX_SKB_FRAGS) || 12426 (tg3_flag(tp, TSO_BUG) && 12427 (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) 12428 return -EINVAL; 12429 12430 if (netif_running(dev)) { 12431 tg3_phy_stop(tp); 12432 tg3_netif_stop(tp); 12433 irq_sync = 1; 12434 } 12435 12436 tg3_full_lock(tp, irq_sync); 12437 12438 tp->rx_pending = ering->rx_pending; 12439 12440 if (tg3_flag(tp, MAX_RXPEND_64) && 12441 tp->rx_pending > 63) 12442 tp->rx_pending = 63; 12443 12444 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12445 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 12446 12447 for (i = 0; i < tp->irq_max; i++) 12448 tp->napi[i].tx_pending = ering->tx_pending; 12449 12450 if (netif_running(dev)) { 12451 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12452 /* Reset PHY to avoid PHY lock up */ 12453 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 12454 tg3_asic_rev(tp) == ASIC_REV_5719 || 12455 tg3_asic_rev(tp) == ASIC_REV_5720) 12456 reset_phy = true; 12457 12458 err = tg3_restart_hw(tp, reset_phy); 12459 if (!err) 12460 tg3_netif_start(tp); 12461 } 12462 12463 tg3_full_unlock(tp); 12464 12465 if (irq_sync && !err) 12466 tg3_phy_start(tp); 12467 12468 return err; 12469 } 12470 12471 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12472 { 12473 struct tg3 *tp = netdev_priv(dev); 12474 12475 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); 12476 12477 if (tp->link_config.flowctrl & FLOW_CTRL_RX) 12478 epause->rx_pause = 1; 12479 else 12480 epause->rx_pause = 0; 12481 12482 if (tp->link_config.flowctrl & FLOW_CTRL_TX) 12483 epause->tx_pause = 1; 12484 else 12485 epause->tx_pause = 0; 12486 } 12487 12488 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12489 { 12490 struct tg3 *tp = netdev_priv(dev); 12491 int err = 0; 12492 bool reset_phy = false; 12493 12494 if (tp->link_config.autoneg == AUTONEG_ENABLE) 12495 tg3_warn_mgmt_link_flap(tp); 12496 12497 if (tg3_flag(tp, USE_PHYLIB)) { 12498 struct phy_device *phydev; 12499 12500 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12501 12502 if (!phy_validate_pause(phydev, epause)) 12503 return -EINVAL; 12504 12505 tp->link_config.flowctrl = 0; 12506 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); 12507 if (epause->rx_pause) { 12508 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12509 12510 if (epause->tx_pause) { 12511 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12512 } 12513 } else if (epause->tx_pause) { 12514 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12515 } 12516 12517 if (epause->autoneg) 12518 tg3_flag_set(tp, PAUSE_AUTONEG); 12519 else 12520 tg3_flag_clear(tp, PAUSE_AUTONEG); 12521 12522 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 12523 if (phydev->autoneg) { 12524 /* phy_set_asym_pause() will 12525 * renegotiate the link to inform our 12526 * link partner of our flow control 12527 * settings, even if the flow control 12528 * is forced. Let tg3_adjust_link() 12529 * do the final flow control setup. 12530 */ 12531 return 0; 12532 } 12533 12534 if (!epause->autoneg) 12535 tg3_setup_flow_control(tp, 0, 0); 12536 } 12537 } else { 12538 int irq_sync = 0; 12539 12540 if (netif_running(dev)) { 12541 tg3_netif_stop(tp); 12542 irq_sync = 1; 12543 } 12544 12545 tg3_full_lock(tp, irq_sync); 12546 12547 if (epause->autoneg) 12548 tg3_flag_set(tp, PAUSE_AUTONEG); 12549 else 12550 tg3_flag_clear(tp, PAUSE_AUTONEG); 12551 if (epause->rx_pause) 12552 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12553 else 12554 tp->link_config.flowctrl &= ~FLOW_CTRL_RX; 12555 if (epause->tx_pause) 12556 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12557 else 12558 tp->link_config.flowctrl &= ~FLOW_CTRL_TX; 12559 12560 if (netif_running(dev)) { 12561 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12562 /* Reset PHY to avoid PHY lock up */ 12563 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 12564 tg3_asic_rev(tp) == ASIC_REV_5719 || 12565 tg3_asic_rev(tp) == ASIC_REV_5720) 12566 reset_phy = true; 12567 12568 err = tg3_restart_hw(tp, reset_phy); 12569 if (!err) 12570 tg3_netif_start(tp); 12571 } 12572 12573 tg3_full_unlock(tp); 12574 } 12575 12576 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12577 12578 return err; 12579 } 12580 12581 static int tg3_get_sset_count(struct net_device *dev, int sset) 12582 { 12583 switch (sset) { 12584 case ETH_SS_TEST: 12585 return TG3_NUM_TEST; 12586 case ETH_SS_STATS: 12587 return TG3_NUM_STATS; 12588 default: 12589 return -EOPNOTSUPP; 12590 } 12591 } 12592 12593 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 12594 u32 *rules __always_unused) 12595 { 12596 struct tg3 *tp = netdev_priv(dev); 12597 12598 if (!tg3_flag(tp, SUPPORT_MSIX)) 12599 return -EOPNOTSUPP; 12600 12601 switch (info->cmd) { 12602 case ETHTOOL_GRXRINGS: 12603 if (netif_running(tp->dev)) 12604 info->data = tp->rxq_cnt; 12605 else { 12606 info->data = num_online_cpus(); 12607 if (info->data > TG3_RSS_MAX_NUM_QS) 12608 info->data = TG3_RSS_MAX_NUM_QS; 12609 } 12610 12611 return 0; 12612 12613 default: 12614 return -EOPNOTSUPP; 12615 } 12616 } 12617 12618 static u32 tg3_get_rxfh_indir_size(struct net_device *dev) 12619 { 12620 u32 size = 0; 12621 struct tg3 *tp = netdev_priv(dev); 12622 12623 if (tg3_flag(tp, SUPPORT_MSIX)) 12624 size = TG3_RSS_INDIR_TBL_SIZE; 12625 12626 return size; 12627 } 12628 12629 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) 12630 { 12631 struct tg3 *tp = netdev_priv(dev); 12632 int i; 12633 12634 if (hfunc) 12635 *hfunc = ETH_RSS_HASH_TOP; 12636 if (!indir) 12637 return 0; 12638 12639 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12640 indir[i] = tp->rss_ind_tbl[i]; 12641 12642 return 0; 12643 } 12644 12645 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, 12646 const u8 hfunc) 12647 { 12648 struct tg3 *tp = netdev_priv(dev); 12649 size_t i; 12650 12651 /* We require at least one supported parameter to be changed and no 12652 * change in any of the unsupported parameters 12653 */ 12654 if (key || 12655 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 12656 return -EOPNOTSUPP; 12657 12658 if (!indir) 12659 return 0; 12660 12661 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12662 tp->rss_ind_tbl[i] = indir[i]; 12663 12664 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS)) 12665 return 0; 12666 12667 /* It is legal to write the indirection 12668 * table while the device is running. 12669 */ 12670 tg3_full_lock(tp, 0); 12671 tg3_rss_write_indir_tbl(tp); 12672 tg3_full_unlock(tp); 12673 12674 return 0; 12675 } 12676 12677 static void tg3_get_channels(struct net_device *dev, 12678 struct ethtool_channels *channel) 12679 { 12680 struct tg3 *tp = netdev_priv(dev); 12681 u32 deflt_qs = netif_get_num_default_rss_queues(); 12682 12683 channel->max_rx = tp->rxq_max; 12684 channel->max_tx = tp->txq_max; 12685 12686 if (netif_running(dev)) { 12687 channel->rx_count = tp->rxq_cnt; 12688 channel->tx_count = tp->txq_cnt; 12689 } else { 12690 if (tp->rxq_req) 12691 channel->rx_count = tp->rxq_req; 12692 else 12693 channel->rx_count = min(deflt_qs, tp->rxq_max); 12694 12695 if (tp->txq_req) 12696 channel->tx_count = tp->txq_req; 12697 else 12698 channel->tx_count = min(deflt_qs, tp->txq_max); 12699 } 12700 } 12701 12702 static int tg3_set_channels(struct net_device *dev, 12703 struct ethtool_channels *channel) 12704 { 12705 struct tg3 *tp = netdev_priv(dev); 12706 12707 if (!tg3_flag(tp, SUPPORT_MSIX)) 12708 return -EOPNOTSUPP; 12709 12710 if (channel->rx_count > tp->rxq_max || 12711 channel->tx_count > tp->txq_max) 12712 return -EINVAL; 12713 12714 tp->rxq_req = channel->rx_count; 12715 tp->txq_req = channel->tx_count; 12716 12717 if (!netif_running(dev)) 12718 return 0; 12719 12720 tg3_stop(tp); 12721 12722 tg3_carrier_off(tp); 12723 12724 tg3_start(tp, true, false, false); 12725 12726 return 0; 12727 } 12728 12729 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 12730 { 12731 switch (stringset) { 12732 case ETH_SS_STATS: 12733 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); 12734 break; 12735 case ETH_SS_TEST: 12736 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); 12737 break; 12738 default: 12739 WARN_ON(1); /* we need a WARN() */ 12740 break; 12741 } 12742 } 12743 12744 static int tg3_set_phys_id(struct net_device *dev, 12745 enum ethtool_phys_id_state state) 12746 { 12747 struct tg3 *tp = netdev_priv(dev); 12748 12749 switch (state) { 12750 case ETHTOOL_ID_ACTIVE: 12751 return 1; /* cycle on/off once per second */ 12752 12753 case ETHTOOL_ID_ON: 12754 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12755 LED_CTRL_1000MBPS_ON | 12756 LED_CTRL_100MBPS_ON | 12757 LED_CTRL_10MBPS_ON | 12758 LED_CTRL_TRAFFIC_OVERRIDE | 12759 LED_CTRL_TRAFFIC_BLINK | 12760 LED_CTRL_TRAFFIC_LED); 12761 break; 12762 12763 case ETHTOOL_ID_OFF: 12764 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12765 LED_CTRL_TRAFFIC_OVERRIDE); 12766 break; 12767 12768 case ETHTOOL_ID_INACTIVE: 12769 tw32(MAC_LED_CTRL, tp->led_ctrl); 12770 break; 12771 } 12772 12773 return 0; 12774 } 12775 12776 static void tg3_get_ethtool_stats(struct net_device *dev, 12777 struct ethtool_stats *estats, u64 *tmp_stats) 12778 { 12779 struct tg3 *tp = netdev_priv(dev); 12780 12781 if (tp->hw_stats) 12782 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats); 12783 else 12784 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats)); 12785 } 12786 12787 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen) 12788 { 12789 int i; 12790 __be32 *buf; 12791 u32 offset = 0, len = 0; 12792 u32 magic, val; 12793 12794 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) 12795 return NULL; 12796 12797 if (magic == TG3_EEPROM_MAGIC) { 12798 for (offset = TG3_NVM_DIR_START; 12799 offset < TG3_NVM_DIR_END; 12800 offset += TG3_NVM_DIRENT_SIZE) { 12801 if (tg3_nvram_read(tp, offset, &val)) 12802 return NULL; 12803 12804 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == 12805 TG3_NVM_DIRTYPE_EXTVPD) 12806 break; 12807 } 12808 12809 if (offset != TG3_NVM_DIR_END) { 12810 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; 12811 if (tg3_nvram_read(tp, offset + 4, &offset)) 12812 return NULL; 12813 12814 offset = tg3_nvram_logical_addr(tp, offset); 12815 } 12816 12817 if (!offset || !len) { 12818 offset = TG3_NVM_VPD_OFF; 12819 len = TG3_NVM_VPD_LEN; 12820 } 12821 12822 buf = kmalloc(len, GFP_KERNEL); 12823 if (!buf) 12824 return NULL; 12825 12826 for (i = 0; i < len; i += 4) { 12827 /* The data is in little-endian format in NVRAM. 12828 * Use the big-endian read routines to preserve 12829 * the byte order as it exists in NVRAM. 12830 */ 12831 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) 12832 goto error; 12833 } 12834 *vpdlen = len; 12835 } else { 12836 buf = pci_vpd_alloc(tp->pdev, vpdlen); 12837 if (IS_ERR(buf)) 12838 return NULL; 12839 } 12840 12841 return buf; 12842 12843 error: 12844 kfree(buf); 12845 return NULL; 12846 } 12847 12848 #define NVRAM_TEST_SIZE 0x100 12849 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 12850 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 12851 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c 12852 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 12853 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 12854 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 12855 #define NVRAM_SELFBOOT_HW_SIZE 0x20 12856 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c 12857 12858 static int tg3_test_nvram(struct tg3 *tp) 12859 { 12860 u32 csum, magic; 12861 __be32 *buf; 12862 int i, j, k, err = 0, size; 12863 unsigned int len; 12864 12865 if (tg3_flag(tp, NO_NVRAM)) 12866 return 0; 12867 12868 if (tg3_nvram_read(tp, 0, &magic) != 0) 12869 return -EIO; 12870 12871 if (magic == TG3_EEPROM_MAGIC) 12872 size = NVRAM_TEST_SIZE; 12873 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { 12874 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == 12875 TG3_EEPROM_SB_FORMAT_1) { 12876 switch (magic & TG3_EEPROM_SB_REVISION_MASK) { 12877 case TG3_EEPROM_SB_REVISION_0: 12878 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; 12879 break; 12880 case TG3_EEPROM_SB_REVISION_2: 12881 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; 12882 break; 12883 case TG3_EEPROM_SB_REVISION_3: 12884 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; 12885 break; 12886 case TG3_EEPROM_SB_REVISION_4: 12887 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; 12888 break; 12889 case TG3_EEPROM_SB_REVISION_5: 12890 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; 12891 break; 12892 case TG3_EEPROM_SB_REVISION_6: 12893 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; 12894 break; 12895 default: 12896 return -EIO; 12897 } 12898 } else 12899 return 0; 12900 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 12901 size = NVRAM_SELFBOOT_HW_SIZE; 12902 else 12903 return -EIO; 12904 12905 buf = kmalloc(size, GFP_KERNEL); 12906 if (buf == NULL) 12907 return -ENOMEM; 12908 12909 err = -EIO; 12910 for (i = 0, j = 0; i < size; i += 4, j++) { 12911 err = tg3_nvram_read_be32(tp, i, &buf[j]); 12912 if (err) 12913 break; 12914 } 12915 if (i < size) 12916 goto out; 12917 12918 /* Selfboot format */ 12919 magic = be32_to_cpu(buf[0]); 12920 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == 12921 TG3_EEPROM_MAGIC_FW) { 12922 u8 *buf8 = (u8 *) buf, csum8 = 0; 12923 12924 if ((magic & TG3_EEPROM_SB_REVISION_MASK) == 12925 TG3_EEPROM_SB_REVISION_2) { 12926 /* For rev 2, the csum doesn't include the MBA. */ 12927 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) 12928 csum8 += buf8[i]; 12929 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) 12930 csum8 += buf8[i]; 12931 } else { 12932 for (i = 0; i < size; i++) 12933 csum8 += buf8[i]; 12934 } 12935 12936 if (csum8 == 0) { 12937 err = 0; 12938 goto out; 12939 } 12940 12941 err = -EIO; 12942 goto out; 12943 } 12944 12945 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == 12946 TG3_EEPROM_MAGIC_HW) { 12947 u8 data[NVRAM_SELFBOOT_DATA_SIZE]; 12948 u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; 12949 u8 *buf8 = (u8 *) buf; 12950 12951 /* Separate the parity bits and the data bytes. */ 12952 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { 12953 if ((i == 0) || (i == 8)) { 12954 int l; 12955 u8 msk; 12956 12957 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) 12958 parity[k++] = buf8[i] & msk; 12959 i++; 12960 } else if (i == 16) { 12961 int l; 12962 u8 msk; 12963 12964 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) 12965 parity[k++] = buf8[i] & msk; 12966 i++; 12967 12968 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) 12969 parity[k++] = buf8[i] & msk; 12970 i++; 12971 } 12972 data[j++] = buf8[i]; 12973 } 12974 12975 err = -EIO; 12976 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { 12977 u8 hw8 = hweight8(data[i]); 12978 12979 if ((hw8 & 0x1) && parity[i]) 12980 goto out; 12981 else if (!(hw8 & 0x1) && !parity[i]) 12982 goto out; 12983 } 12984 err = 0; 12985 goto out; 12986 } 12987 12988 err = -EIO; 12989 12990 /* Bootstrap checksum at offset 0x10 */ 12991 csum = calc_crc((unsigned char *) buf, 0x10); 12992 if (csum != le32_to_cpu(buf[0x10/4])) 12993 goto out; 12994 12995 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ 12996 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); 12997 if (csum != le32_to_cpu(buf[0xfc/4])) 12998 goto out; 12999 13000 kfree(buf); 13001 13002 buf = tg3_vpd_readblock(tp, &len); 13003 if (!buf) 13004 return -ENOMEM; 13005 13006 err = pci_vpd_check_csum(buf, len); 13007 /* go on if no checksum found */ 13008 if (err == 1) 13009 err = 0; 13010 out: 13011 kfree(buf); 13012 return err; 13013 } 13014 13015 #define TG3_SERDES_TIMEOUT_SEC 2 13016 #define TG3_COPPER_TIMEOUT_SEC 6 13017 13018 static int tg3_test_link(struct tg3 *tp) 13019 { 13020 int i, max; 13021 13022 if (!netif_running(tp->dev)) 13023 return -ENODEV; 13024 13025 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 13026 max = TG3_SERDES_TIMEOUT_SEC; 13027 else 13028 max = TG3_COPPER_TIMEOUT_SEC; 13029 13030 for (i = 0; i < max; i++) { 13031 if (tp->link_up) 13032 return 0; 13033 13034 if (msleep_interruptible(1000)) 13035 break; 13036 } 13037 13038 return -EIO; 13039 } 13040 13041 /* Only test the commonly used registers */ 13042 static int tg3_test_registers(struct tg3 *tp) 13043 { 13044 int i, is_5705, is_5750; 13045 u32 offset, read_mask, write_mask, val, save_val, read_val; 13046 static struct { 13047 u16 offset; 13048 u16 flags; 13049 #define TG3_FL_5705 0x1 13050 #define TG3_FL_NOT_5705 0x2 13051 #define TG3_FL_NOT_5788 0x4 13052 #define TG3_FL_NOT_5750 0x8 13053 u32 read_mask; 13054 u32 write_mask; 13055 } reg_tbl[] = { 13056 /* MAC Control Registers */ 13057 { MAC_MODE, TG3_FL_NOT_5705, 13058 0x00000000, 0x00ef6f8c }, 13059 { MAC_MODE, TG3_FL_5705, 13060 0x00000000, 0x01ef6b8c }, 13061 { MAC_STATUS, TG3_FL_NOT_5705, 13062 0x03800107, 0x00000000 }, 13063 { MAC_STATUS, TG3_FL_5705, 13064 0x03800100, 0x00000000 }, 13065 { MAC_ADDR_0_HIGH, 0x0000, 13066 0x00000000, 0x0000ffff }, 13067 { MAC_ADDR_0_LOW, 0x0000, 13068 0x00000000, 0xffffffff }, 13069 { MAC_RX_MTU_SIZE, 0x0000, 13070 0x00000000, 0x0000ffff }, 13071 { MAC_TX_MODE, 0x0000, 13072 0x00000000, 0x00000070 }, 13073 { MAC_TX_LENGTHS, 0x0000, 13074 0x00000000, 0x00003fff }, 13075 { MAC_RX_MODE, TG3_FL_NOT_5705, 13076 0x00000000, 0x000007fc }, 13077 { MAC_RX_MODE, TG3_FL_5705, 13078 0x00000000, 0x000007dc }, 13079 { MAC_HASH_REG_0, 0x0000, 13080 0x00000000, 0xffffffff }, 13081 { MAC_HASH_REG_1, 0x0000, 13082 0x00000000, 0xffffffff }, 13083 { MAC_HASH_REG_2, 0x0000, 13084 0x00000000, 0xffffffff }, 13085 { MAC_HASH_REG_3, 0x0000, 13086 0x00000000, 0xffffffff }, 13087 13088 /* Receive Data and Receive BD Initiator Control Registers. */ 13089 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, 13090 0x00000000, 0xffffffff }, 13091 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, 13092 0x00000000, 0xffffffff }, 13093 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, 13094 0x00000000, 0x00000003 }, 13095 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, 13096 0x00000000, 0xffffffff }, 13097 { RCVDBDI_STD_BD+0, 0x0000, 13098 0x00000000, 0xffffffff }, 13099 { RCVDBDI_STD_BD+4, 0x0000, 13100 0x00000000, 0xffffffff }, 13101 { RCVDBDI_STD_BD+8, 0x0000, 13102 0x00000000, 0xffff0002 }, 13103 { RCVDBDI_STD_BD+0xc, 0x0000, 13104 0x00000000, 0xffffffff }, 13105 13106 /* Receive BD Initiator Control Registers. */ 13107 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, 13108 0x00000000, 0xffffffff }, 13109 { RCVBDI_STD_THRESH, TG3_FL_5705, 13110 0x00000000, 0x000003ff }, 13111 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, 13112 0x00000000, 0xffffffff }, 13113 13114 /* Host Coalescing Control Registers. */ 13115 { HOSTCC_MODE, TG3_FL_NOT_5705, 13116 0x00000000, 0x00000004 }, 13117 { HOSTCC_MODE, TG3_FL_5705, 13118 0x00000000, 0x000000f6 }, 13119 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, 13120 0x00000000, 0xffffffff }, 13121 { HOSTCC_RXCOL_TICKS, TG3_FL_5705, 13122 0x00000000, 0x000003ff }, 13123 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, 13124 0x00000000, 0xffffffff }, 13125 { HOSTCC_TXCOL_TICKS, TG3_FL_5705, 13126 0x00000000, 0x000003ff }, 13127 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, 13128 0x00000000, 0xffffffff }, 13129 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13130 0x00000000, 0x000000ff }, 13131 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, 13132 0x00000000, 0xffffffff }, 13133 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13134 0x00000000, 0x000000ff }, 13135 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, 13136 0x00000000, 0xffffffff }, 13137 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, 13138 0x00000000, 0xffffffff }, 13139 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13140 0x00000000, 0xffffffff }, 13141 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13142 0x00000000, 0x000000ff }, 13143 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13144 0x00000000, 0xffffffff }, 13145 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13146 0x00000000, 0x000000ff }, 13147 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, 13148 0x00000000, 0xffffffff }, 13149 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, 13150 0x00000000, 0xffffffff }, 13151 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, 13152 0x00000000, 0xffffffff }, 13153 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, 13154 0x00000000, 0xffffffff }, 13155 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, 13156 0x00000000, 0xffffffff }, 13157 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, 13158 0xffffffff, 0x00000000 }, 13159 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, 13160 0xffffffff, 0x00000000 }, 13161 13162 /* Buffer Manager Control Registers. */ 13163 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, 13164 0x00000000, 0x007fff80 }, 13165 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, 13166 0x00000000, 0x007fffff }, 13167 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, 13168 0x00000000, 0x0000003f }, 13169 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, 13170 0x00000000, 0x000001ff }, 13171 { BUFMGR_MB_HIGH_WATER, 0x0000, 13172 0x00000000, 0x000001ff }, 13173 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, 13174 0xffffffff, 0x00000000 }, 13175 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, 13176 0xffffffff, 0x00000000 }, 13177 13178 /* Mailbox Registers */ 13179 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, 13180 0x00000000, 0x000001ff }, 13181 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, 13182 0x00000000, 0x000001ff }, 13183 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, 13184 0x00000000, 0x000007ff }, 13185 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, 13186 0x00000000, 0x000001ff }, 13187 13188 { 0xffff, 0x0000, 0x00000000, 0x00000000 }, 13189 }; 13190 13191 is_5705 = is_5750 = 0; 13192 if (tg3_flag(tp, 5705_PLUS)) { 13193 is_5705 = 1; 13194 if (tg3_flag(tp, 5750_PLUS)) 13195 is_5750 = 1; 13196 } 13197 13198 for (i = 0; reg_tbl[i].offset != 0xffff; i++) { 13199 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) 13200 continue; 13201 13202 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) 13203 continue; 13204 13205 if (tg3_flag(tp, IS_5788) && 13206 (reg_tbl[i].flags & TG3_FL_NOT_5788)) 13207 continue; 13208 13209 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) 13210 continue; 13211 13212 offset = (u32) reg_tbl[i].offset; 13213 read_mask = reg_tbl[i].read_mask; 13214 write_mask = reg_tbl[i].write_mask; 13215 13216 /* Save the original register content */ 13217 save_val = tr32(offset); 13218 13219 /* Determine the read-only value. */ 13220 read_val = save_val & read_mask; 13221 13222 /* Write zero to the register, then make sure the read-only bits 13223 * are not changed and the read/write bits are all zeros. 13224 */ 13225 tw32(offset, 0); 13226 13227 val = tr32(offset); 13228 13229 /* Test the read-only and read/write bits. */ 13230 if (((val & read_mask) != read_val) || (val & write_mask)) 13231 goto out; 13232 13233 /* Write ones to all the bits defined by RdMask and WrMask, then 13234 * make sure the read-only bits are not changed and the 13235 * read/write bits are all ones. 13236 */ 13237 tw32(offset, read_mask | write_mask); 13238 13239 val = tr32(offset); 13240 13241 /* Test the read-only bits. */ 13242 if ((val & read_mask) != read_val) 13243 goto out; 13244 13245 /* Test the read/write bits. */ 13246 if ((val & write_mask) != write_mask) 13247 goto out; 13248 13249 tw32(offset, save_val); 13250 } 13251 13252 return 0; 13253 13254 out: 13255 if (netif_msg_hw(tp)) 13256 netdev_err(tp->dev, 13257 "Register test failed at offset %x\n", offset); 13258 tw32(offset, save_val); 13259 return -EIO; 13260 } 13261 13262 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) 13263 { 13264 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; 13265 int i; 13266 u32 j; 13267 13268 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { 13269 for (j = 0; j < len; j += 4) { 13270 u32 val; 13271 13272 tg3_write_mem(tp, offset + j, test_pattern[i]); 13273 tg3_read_mem(tp, offset + j, &val); 13274 if (val != test_pattern[i]) 13275 return -EIO; 13276 } 13277 } 13278 return 0; 13279 } 13280 13281 static int tg3_test_memory(struct tg3 *tp) 13282 { 13283 static struct mem_entry { 13284 u32 offset; 13285 u32 len; 13286 } mem_tbl_570x[] = { 13287 { 0x00000000, 0x00b50}, 13288 { 0x00002000, 0x1c000}, 13289 { 0xffffffff, 0x00000} 13290 }, mem_tbl_5705[] = { 13291 { 0x00000100, 0x0000c}, 13292 { 0x00000200, 0x00008}, 13293 { 0x00004000, 0x00800}, 13294 { 0x00006000, 0x01000}, 13295 { 0x00008000, 0x02000}, 13296 { 0x00010000, 0x0e000}, 13297 { 0xffffffff, 0x00000} 13298 }, mem_tbl_5755[] = { 13299 { 0x00000200, 0x00008}, 13300 { 0x00004000, 0x00800}, 13301 { 0x00006000, 0x00800}, 13302 { 0x00008000, 0x02000}, 13303 { 0x00010000, 0x0c000}, 13304 { 0xffffffff, 0x00000} 13305 }, mem_tbl_5906[] = { 13306 { 0x00000200, 0x00008}, 13307 { 0x00004000, 0x00400}, 13308 { 0x00006000, 0x00400}, 13309 { 0x00008000, 0x01000}, 13310 { 0x00010000, 0x01000}, 13311 { 0xffffffff, 0x00000} 13312 }, mem_tbl_5717[] = { 13313 { 0x00000200, 0x00008}, 13314 { 0x00010000, 0x0a000}, 13315 { 0x00020000, 0x13c00}, 13316 { 0xffffffff, 0x00000} 13317 }, mem_tbl_57765[] = { 13318 { 0x00000200, 0x00008}, 13319 { 0x00004000, 0x00800}, 13320 { 0x00006000, 0x09800}, 13321 { 0x00010000, 0x0a000}, 13322 { 0xffffffff, 0x00000} 13323 }; 13324 struct mem_entry *mem_tbl; 13325 int err = 0; 13326 int i; 13327 13328 if (tg3_flag(tp, 5717_PLUS)) 13329 mem_tbl = mem_tbl_5717; 13330 else if (tg3_flag(tp, 57765_CLASS) || 13331 tg3_asic_rev(tp) == ASIC_REV_5762) 13332 mem_tbl = mem_tbl_57765; 13333 else if (tg3_flag(tp, 5755_PLUS)) 13334 mem_tbl = mem_tbl_5755; 13335 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 13336 mem_tbl = mem_tbl_5906; 13337 else if (tg3_flag(tp, 5705_PLUS)) 13338 mem_tbl = mem_tbl_5705; 13339 else 13340 mem_tbl = mem_tbl_570x; 13341 13342 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { 13343 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); 13344 if (err) 13345 break; 13346 } 13347 13348 return err; 13349 } 13350 13351 #define TG3_TSO_MSS 500 13352 13353 #define TG3_TSO_IP_HDR_LEN 20 13354 #define TG3_TSO_TCP_HDR_LEN 20 13355 #define TG3_TSO_TCP_OPT_LEN 12 13356 13357 static const u8 tg3_tso_header[] = { 13358 0x08, 0x00, 13359 0x45, 0x00, 0x00, 0x00, 13360 0x00, 0x00, 0x40, 0x00, 13361 0x40, 0x06, 0x00, 0x00, 13362 0x0a, 0x00, 0x00, 0x01, 13363 0x0a, 0x00, 0x00, 0x02, 13364 0x0d, 0x00, 0xe0, 0x00, 13365 0x00, 0x00, 0x01, 0x00, 13366 0x00, 0x00, 0x02, 0x00, 13367 0x80, 0x10, 0x10, 0x00, 13368 0x14, 0x09, 0x00, 0x00, 13369 0x01, 0x01, 0x08, 0x0a, 13370 0x11, 0x11, 0x11, 0x11, 13371 0x11, 0x11, 0x11, 0x11, 13372 }; 13373 13374 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) 13375 { 13376 u32 rx_start_idx, rx_idx, tx_idx, opaque_key; 13377 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; 13378 u32 budget; 13379 struct sk_buff *skb; 13380 u8 *tx_data, *rx_data; 13381 dma_addr_t map; 13382 int num_pkts, tx_len, rx_len, i, err; 13383 struct tg3_rx_buffer_desc *desc; 13384 struct tg3_napi *tnapi, *rnapi; 13385 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 13386 13387 tnapi = &tp->napi[0]; 13388 rnapi = &tp->napi[0]; 13389 if (tp->irq_cnt > 1) { 13390 if (tg3_flag(tp, ENABLE_RSS)) 13391 rnapi = &tp->napi[1]; 13392 if (tg3_flag(tp, ENABLE_TSS)) 13393 tnapi = &tp->napi[1]; 13394 } 13395 coal_now = tnapi->coal_now | rnapi->coal_now; 13396 13397 err = -EIO; 13398 13399 tx_len = pktsz; 13400 skb = netdev_alloc_skb(tp->dev, tx_len); 13401 if (!skb) 13402 return -ENOMEM; 13403 13404 tx_data = skb_put(skb, tx_len); 13405 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN); 13406 memset(tx_data + ETH_ALEN, 0x0, 8); 13407 13408 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); 13409 13410 if (tso_loopback) { 13411 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; 13412 13413 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + 13414 TG3_TSO_TCP_OPT_LEN; 13415 13416 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, 13417 sizeof(tg3_tso_header)); 13418 mss = TG3_TSO_MSS; 13419 13420 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); 13421 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); 13422 13423 /* Set the total length field in the IP header */ 13424 iph->tot_len = htons((u16)(mss + hdr_len)); 13425 13426 base_flags = (TXD_FLAG_CPU_PRE_DMA | 13427 TXD_FLAG_CPU_POST_DMA); 13428 13429 if (tg3_flag(tp, HW_TSO_1) || 13430 tg3_flag(tp, HW_TSO_2) || 13431 tg3_flag(tp, HW_TSO_3)) { 13432 struct tcphdr *th; 13433 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; 13434 th = (struct tcphdr *)&tx_data[val]; 13435 th->check = 0; 13436 } else 13437 base_flags |= TXD_FLAG_TCPUDP_CSUM; 13438 13439 if (tg3_flag(tp, HW_TSO_3)) { 13440 mss |= (hdr_len & 0xc) << 12; 13441 if (hdr_len & 0x10) 13442 base_flags |= 0x00000010; 13443 base_flags |= (hdr_len & 0x3e0) << 5; 13444 } else if (tg3_flag(tp, HW_TSO_2)) 13445 mss |= hdr_len << 9; 13446 else if (tg3_flag(tp, HW_TSO_1) || 13447 tg3_asic_rev(tp) == ASIC_REV_5705) { 13448 mss |= (TG3_TSO_TCP_OPT_LEN << 9); 13449 } else { 13450 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); 13451 } 13452 13453 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); 13454 } else { 13455 num_pkts = 1; 13456 data_off = ETH_HLEN; 13457 13458 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 13459 tx_len > VLAN_ETH_FRAME_LEN) 13460 base_flags |= TXD_FLAG_JMB_PKT; 13461 } 13462 13463 for (i = data_off; i < tx_len; i++) 13464 tx_data[i] = (u8) (i & 0xff); 13465 13466 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE); 13467 if (dma_mapping_error(&tp->pdev->dev, map)) { 13468 dev_kfree_skb(skb); 13469 return -EIO; 13470 } 13471 13472 val = tnapi->tx_prod; 13473 tnapi->tx_buffers[val].skb = skb; 13474 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); 13475 13476 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13477 rnapi->coal_now); 13478 13479 udelay(10); 13480 13481 rx_start_idx = rnapi->hw_status->idx[0].rx_producer; 13482 13483 budget = tg3_tx_avail(tnapi); 13484 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, 13485 base_flags | TXD_FLAG_END, mss, 0)) { 13486 tnapi->tx_buffers[val].skb = NULL; 13487 dev_kfree_skb(skb); 13488 return -EIO; 13489 } 13490 13491 tnapi->tx_prod++; 13492 13493 /* Sync BD data before updating mailbox */ 13494 wmb(); 13495 13496 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); 13497 tr32_mailbox(tnapi->prodmbox); 13498 13499 udelay(10); 13500 13501 /* 350 usec to allow enough time on some 10/100 Mbps devices. */ 13502 for (i = 0; i < 35; i++) { 13503 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13504 coal_now); 13505 13506 udelay(10); 13507 13508 tx_idx = tnapi->hw_status->idx[0].tx_consumer; 13509 rx_idx = rnapi->hw_status->idx[0].rx_producer; 13510 if ((tx_idx == tnapi->tx_prod) && 13511 (rx_idx == (rx_start_idx + num_pkts))) 13512 break; 13513 } 13514 13515 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1); 13516 dev_kfree_skb(skb); 13517 13518 if (tx_idx != tnapi->tx_prod) 13519 goto out; 13520 13521 if (rx_idx != rx_start_idx + num_pkts) 13522 goto out; 13523 13524 val = data_off; 13525 while (rx_idx != rx_start_idx) { 13526 desc = &rnapi->rx_rcb[rx_start_idx++]; 13527 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 13528 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 13529 13530 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 13531 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) 13532 goto out; 13533 13534 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) 13535 - ETH_FCS_LEN; 13536 13537 if (!tso_loopback) { 13538 if (rx_len != tx_len) 13539 goto out; 13540 13541 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { 13542 if (opaque_key != RXD_OPAQUE_RING_STD) 13543 goto out; 13544 } else { 13545 if (opaque_key != RXD_OPAQUE_RING_JUMBO) 13546 goto out; 13547 } 13548 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 13549 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 13550 >> RXD_TCPCSUM_SHIFT != 0xffff) { 13551 goto out; 13552 } 13553 13554 if (opaque_key == RXD_OPAQUE_RING_STD) { 13555 rx_data = tpr->rx_std_buffers[desc_idx].data; 13556 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], 13557 mapping); 13558 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 13559 rx_data = tpr->rx_jmb_buffers[desc_idx].data; 13560 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], 13561 mapping); 13562 } else 13563 goto out; 13564 13565 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len, 13566 DMA_FROM_DEVICE); 13567 13568 rx_data += TG3_RX_OFFSET(tp); 13569 for (i = data_off; i < rx_len; i++, val++) { 13570 if (*(rx_data + i) != (u8) (val & 0xff)) 13571 goto out; 13572 } 13573 } 13574 13575 err = 0; 13576 13577 /* tg3_free_rings will unmap and free the rx_data */ 13578 out: 13579 return err; 13580 } 13581 13582 #define TG3_STD_LOOPBACK_FAILED 1 13583 #define TG3_JMB_LOOPBACK_FAILED 2 13584 #define TG3_TSO_LOOPBACK_FAILED 4 13585 #define TG3_LOOPBACK_FAILED \ 13586 (TG3_STD_LOOPBACK_FAILED | \ 13587 TG3_JMB_LOOPBACK_FAILED | \ 13588 TG3_TSO_LOOPBACK_FAILED) 13589 13590 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk) 13591 { 13592 int err = -EIO; 13593 u32 eee_cap; 13594 u32 jmb_pkt_sz = 9000; 13595 13596 if (tp->dma_limit) 13597 jmb_pkt_sz = tp->dma_limit - ETH_HLEN; 13598 13599 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; 13600 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 13601 13602 if (!netif_running(tp->dev)) { 13603 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13604 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13605 if (do_extlpbk) 13606 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13607 goto done; 13608 } 13609 13610 err = tg3_reset_hw(tp, true); 13611 if (err) { 13612 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13613 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13614 if (do_extlpbk) 13615 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13616 goto done; 13617 } 13618 13619 if (tg3_flag(tp, ENABLE_RSS)) { 13620 int i; 13621 13622 /* Reroute all rx packets to the 1st queue */ 13623 for (i = MAC_RSS_INDIR_TBL_0; 13624 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) 13625 tw32(i, 0x0); 13626 } 13627 13628 /* HW errata - mac loopback fails in some cases on 5780. 13629 * Normal traffic and PHY loopback are not affected by 13630 * errata. Also, the MAC loopback test is deprecated for 13631 * all newer ASIC revisions. 13632 */ 13633 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 13634 !tg3_flag(tp, CPMU_PRESENT)) { 13635 tg3_mac_loopback(tp, true); 13636 13637 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13638 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13639 13640 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13641 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13642 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13643 13644 tg3_mac_loopback(tp, false); 13645 } 13646 13647 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 13648 !tg3_flag(tp, USE_PHYLIB)) { 13649 int i; 13650 13651 tg3_phy_lpbk_set(tp, 0, false); 13652 13653 /* Wait for link */ 13654 for (i = 0; i < 100; i++) { 13655 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 13656 break; 13657 mdelay(1); 13658 } 13659 13660 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13661 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13662 if (tg3_flag(tp, TSO_CAPABLE) && 13663 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13664 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED; 13665 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13666 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13667 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13668 13669 if (do_extlpbk) { 13670 tg3_phy_lpbk_set(tp, 0, true); 13671 13672 /* All link indications report up, but the hardware 13673 * isn't really ready for about 20 msec. Double it 13674 * to be sure. 13675 */ 13676 mdelay(40); 13677 13678 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13679 data[TG3_EXT_LOOPB_TEST] |= 13680 TG3_STD_LOOPBACK_FAILED; 13681 if (tg3_flag(tp, TSO_CAPABLE) && 13682 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13683 data[TG3_EXT_LOOPB_TEST] |= 13684 TG3_TSO_LOOPBACK_FAILED; 13685 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13686 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13687 data[TG3_EXT_LOOPB_TEST] |= 13688 TG3_JMB_LOOPBACK_FAILED; 13689 } 13690 13691 /* Re-enable gphy autopowerdown. */ 13692 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 13693 tg3_phy_toggle_apd(tp, true); 13694 } 13695 13696 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] | 13697 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0; 13698 13699 done: 13700 tp->phy_flags |= eee_cap; 13701 13702 return err; 13703 } 13704 13705 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, 13706 u64 *data) 13707 { 13708 struct tg3 *tp = netdev_priv(dev); 13709 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; 13710 13711 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 13712 if (tg3_power_up(tp)) { 13713 etest->flags |= ETH_TEST_FL_FAILED; 13714 memset(data, 1, sizeof(u64) * TG3_NUM_TEST); 13715 return; 13716 } 13717 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 13718 } 13719 13720 memset(data, 0, sizeof(u64) * TG3_NUM_TEST); 13721 13722 if (tg3_test_nvram(tp) != 0) { 13723 etest->flags |= ETH_TEST_FL_FAILED; 13724 data[TG3_NVRAM_TEST] = 1; 13725 } 13726 if (!doextlpbk && tg3_test_link(tp)) { 13727 etest->flags |= ETH_TEST_FL_FAILED; 13728 data[TG3_LINK_TEST] = 1; 13729 } 13730 if (etest->flags & ETH_TEST_FL_OFFLINE) { 13731 int err, err2 = 0, irq_sync = 0; 13732 13733 if (netif_running(dev)) { 13734 tg3_phy_stop(tp); 13735 tg3_netif_stop(tp); 13736 irq_sync = 1; 13737 } 13738 13739 tg3_full_lock(tp, irq_sync); 13740 tg3_halt(tp, RESET_KIND_SUSPEND, 1); 13741 err = tg3_nvram_lock(tp); 13742 tg3_halt_cpu(tp, RX_CPU_BASE); 13743 if (!tg3_flag(tp, 5705_PLUS)) 13744 tg3_halt_cpu(tp, TX_CPU_BASE); 13745 if (!err) 13746 tg3_nvram_unlock(tp); 13747 13748 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 13749 tg3_phy_reset(tp); 13750 13751 if (tg3_test_registers(tp) != 0) { 13752 etest->flags |= ETH_TEST_FL_FAILED; 13753 data[TG3_REGISTER_TEST] = 1; 13754 } 13755 13756 if (tg3_test_memory(tp) != 0) { 13757 etest->flags |= ETH_TEST_FL_FAILED; 13758 data[TG3_MEMORY_TEST] = 1; 13759 } 13760 13761 if (doextlpbk) 13762 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 13763 13764 if (tg3_test_loopback(tp, data, doextlpbk)) 13765 etest->flags |= ETH_TEST_FL_FAILED; 13766 13767 tg3_full_unlock(tp); 13768 13769 if (tg3_test_interrupt(tp) != 0) { 13770 etest->flags |= ETH_TEST_FL_FAILED; 13771 data[TG3_INTERRUPT_TEST] = 1; 13772 } 13773 13774 tg3_full_lock(tp, 0); 13775 13776 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 13777 if (netif_running(dev)) { 13778 tg3_flag_set(tp, INIT_COMPLETE); 13779 err2 = tg3_restart_hw(tp, true); 13780 if (!err2) 13781 tg3_netif_start(tp); 13782 } 13783 13784 tg3_full_unlock(tp); 13785 13786 if (irq_sync && !err2) 13787 tg3_phy_start(tp); 13788 } 13789 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 13790 tg3_power_down_prepare(tp); 13791 13792 } 13793 13794 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 13795 { 13796 struct tg3 *tp = netdev_priv(dev); 13797 struct hwtstamp_config stmpconf; 13798 13799 if (!tg3_flag(tp, PTP_CAPABLE)) 13800 return -EOPNOTSUPP; 13801 13802 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) 13803 return -EFAULT; 13804 13805 if (stmpconf.flags) 13806 return -EINVAL; 13807 13808 if (stmpconf.tx_type != HWTSTAMP_TX_ON && 13809 stmpconf.tx_type != HWTSTAMP_TX_OFF) 13810 return -ERANGE; 13811 13812 switch (stmpconf.rx_filter) { 13813 case HWTSTAMP_FILTER_NONE: 13814 tp->rxptpctl = 0; 13815 break; 13816 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 13817 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13818 TG3_RX_PTP_CTL_ALL_V1_EVENTS; 13819 break; 13820 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 13821 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13822 TG3_RX_PTP_CTL_SYNC_EVNT; 13823 break; 13824 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 13825 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13826 TG3_RX_PTP_CTL_DELAY_REQ; 13827 break; 13828 case HWTSTAMP_FILTER_PTP_V2_EVENT: 13829 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13830 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13831 break; 13832 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 13833 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13834 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13835 break; 13836 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 13837 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13838 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13839 break; 13840 case HWTSTAMP_FILTER_PTP_V2_SYNC: 13841 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13842 TG3_RX_PTP_CTL_SYNC_EVNT; 13843 break; 13844 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 13845 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13846 TG3_RX_PTP_CTL_SYNC_EVNT; 13847 break; 13848 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 13849 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13850 TG3_RX_PTP_CTL_SYNC_EVNT; 13851 break; 13852 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 13853 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13854 TG3_RX_PTP_CTL_DELAY_REQ; 13855 break; 13856 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 13857 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13858 TG3_RX_PTP_CTL_DELAY_REQ; 13859 break; 13860 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 13861 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13862 TG3_RX_PTP_CTL_DELAY_REQ; 13863 break; 13864 default: 13865 return -ERANGE; 13866 } 13867 13868 if (netif_running(dev) && tp->rxptpctl) 13869 tw32(TG3_RX_PTP_CTL, 13870 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 13871 13872 if (stmpconf.tx_type == HWTSTAMP_TX_ON) 13873 tg3_flag_set(tp, TX_TSTAMP_EN); 13874 else 13875 tg3_flag_clear(tp, TX_TSTAMP_EN); 13876 13877 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13878 -EFAULT : 0; 13879 } 13880 13881 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 13882 { 13883 struct tg3 *tp = netdev_priv(dev); 13884 struct hwtstamp_config stmpconf; 13885 13886 if (!tg3_flag(tp, PTP_CAPABLE)) 13887 return -EOPNOTSUPP; 13888 13889 stmpconf.flags = 0; 13890 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ? 13891 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF); 13892 13893 switch (tp->rxptpctl) { 13894 case 0: 13895 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE; 13896 break; 13897 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS: 13898 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 13899 break; 13900 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13901 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 13902 break; 13903 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13904 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 13905 break; 13906 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13907 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 13908 break; 13909 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13910 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 13911 break; 13912 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13913 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 13914 break; 13915 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13916 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 13917 break; 13918 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13919 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC; 13920 break; 13921 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13922 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 13923 break; 13924 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13925 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 13926 break; 13927 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13928 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ; 13929 break; 13930 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13931 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 13932 break; 13933 default: 13934 WARN_ON_ONCE(1); 13935 return -ERANGE; 13936 } 13937 13938 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13939 -EFAULT : 0; 13940 } 13941 13942 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 13943 { 13944 struct mii_ioctl_data *data = if_mii(ifr); 13945 struct tg3 *tp = netdev_priv(dev); 13946 int err; 13947 13948 if (tg3_flag(tp, USE_PHYLIB)) { 13949 struct phy_device *phydev; 13950 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 13951 return -EAGAIN; 13952 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 13953 return phy_mii_ioctl(phydev, ifr, cmd); 13954 } 13955 13956 switch (cmd) { 13957 case SIOCGMIIPHY: 13958 data->phy_id = tp->phy_addr; 13959 13960 fallthrough; 13961 case SIOCGMIIREG: { 13962 u32 mii_regval; 13963 13964 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 13965 break; /* We have no PHY */ 13966 13967 if (!netif_running(dev)) 13968 return -EAGAIN; 13969 13970 spin_lock_bh(&tp->lock); 13971 err = __tg3_readphy(tp, data->phy_id & 0x1f, 13972 data->reg_num & 0x1f, &mii_regval); 13973 spin_unlock_bh(&tp->lock); 13974 13975 data->val_out = mii_regval; 13976 13977 return err; 13978 } 13979 13980 case SIOCSMIIREG: 13981 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 13982 break; /* We have no PHY */ 13983 13984 if (!netif_running(dev)) 13985 return -EAGAIN; 13986 13987 spin_lock_bh(&tp->lock); 13988 err = __tg3_writephy(tp, data->phy_id & 0x1f, 13989 data->reg_num & 0x1f, data->val_in); 13990 spin_unlock_bh(&tp->lock); 13991 13992 return err; 13993 13994 case SIOCSHWTSTAMP: 13995 return tg3_hwtstamp_set(dev, ifr); 13996 13997 case SIOCGHWTSTAMP: 13998 return tg3_hwtstamp_get(dev, ifr); 13999 14000 default: 14001 /* do nothing */ 14002 break; 14003 } 14004 return -EOPNOTSUPP; 14005 } 14006 14007 static int tg3_get_coalesce(struct net_device *dev, 14008 struct ethtool_coalesce *ec, 14009 struct kernel_ethtool_coalesce *kernel_coal, 14010 struct netlink_ext_ack *extack) 14011 { 14012 struct tg3 *tp = netdev_priv(dev); 14013 14014 memcpy(ec, &tp->coal, sizeof(*ec)); 14015 return 0; 14016 } 14017 14018 static int tg3_set_coalesce(struct net_device *dev, 14019 struct ethtool_coalesce *ec, 14020 struct kernel_ethtool_coalesce *kernel_coal, 14021 struct netlink_ext_ack *extack) 14022 { 14023 struct tg3 *tp = netdev_priv(dev); 14024 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; 14025 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; 14026 14027 if (!tg3_flag(tp, 5705_PLUS)) { 14028 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; 14029 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; 14030 max_stat_coal_ticks = MAX_STAT_COAL_TICKS; 14031 min_stat_coal_ticks = MIN_STAT_COAL_TICKS; 14032 } 14033 14034 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || 14035 (!ec->rx_coalesce_usecs) || 14036 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || 14037 (!ec->tx_coalesce_usecs) || 14038 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || 14039 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || 14040 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || 14041 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || 14042 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || 14043 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || 14044 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || 14045 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) 14046 return -EINVAL; 14047 14048 /* Only copy relevant parameters, ignore all others. */ 14049 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; 14050 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; 14051 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; 14052 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; 14053 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; 14054 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; 14055 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; 14056 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; 14057 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; 14058 14059 if (netif_running(dev)) { 14060 tg3_full_lock(tp, 0); 14061 __tg3_set_coalesce(tp, &tp->coal); 14062 tg3_full_unlock(tp); 14063 } 14064 return 0; 14065 } 14066 14067 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) 14068 { 14069 struct tg3 *tp = netdev_priv(dev); 14070 14071 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14072 netdev_warn(tp->dev, "Board does not support EEE!\n"); 14073 return -EOPNOTSUPP; 14074 } 14075 14076 if (edata->advertised != tp->eee.advertised) { 14077 netdev_warn(tp->dev, 14078 "Direct manipulation of EEE advertisement is not supported\n"); 14079 return -EINVAL; 14080 } 14081 14082 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) { 14083 netdev_warn(tp->dev, 14084 "Maximal Tx Lpi timer supported is %#x(u)\n", 14085 TG3_CPMU_DBTMR1_LNKIDLE_MAX); 14086 return -EINVAL; 14087 } 14088 14089 tp->eee = *edata; 14090 14091 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 14092 tg3_warn_mgmt_link_flap(tp); 14093 14094 if (netif_running(tp->dev)) { 14095 tg3_full_lock(tp, 0); 14096 tg3_setup_eee(tp); 14097 tg3_phy_reset(tp); 14098 tg3_full_unlock(tp); 14099 } 14100 14101 return 0; 14102 } 14103 14104 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata) 14105 { 14106 struct tg3 *tp = netdev_priv(dev); 14107 14108 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14109 netdev_warn(tp->dev, 14110 "Board does not support EEE!\n"); 14111 return -EOPNOTSUPP; 14112 } 14113 14114 *edata = tp->eee; 14115 return 0; 14116 } 14117 14118 static const struct ethtool_ops tg3_ethtool_ops = { 14119 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 14120 ETHTOOL_COALESCE_MAX_FRAMES | 14121 ETHTOOL_COALESCE_USECS_IRQ | 14122 ETHTOOL_COALESCE_MAX_FRAMES_IRQ | 14123 ETHTOOL_COALESCE_STATS_BLOCK_USECS, 14124 .get_drvinfo = tg3_get_drvinfo, 14125 .get_regs_len = tg3_get_regs_len, 14126 .get_regs = tg3_get_regs, 14127 .get_wol = tg3_get_wol, 14128 .set_wol = tg3_set_wol, 14129 .get_msglevel = tg3_get_msglevel, 14130 .set_msglevel = tg3_set_msglevel, 14131 .nway_reset = tg3_nway_reset, 14132 .get_link = ethtool_op_get_link, 14133 .get_eeprom_len = tg3_get_eeprom_len, 14134 .get_eeprom = tg3_get_eeprom, 14135 .set_eeprom = tg3_set_eeprom, 14136 .get_ringparam = tg3_get_ringparam, 14137 .set_ringparam = tg3_set_ringparam, 14138 .get_pauseparam = tg3_get_pauseparam, 14139 .set_pauseparam = tg3_set_pauseparam, 14140 .self_test = tg3_self_test, 14141 .get_strings = tg3_get_strings, 14142 .set_phys_id = tg3_set_phys_id, 14143 .get_ethtool_stats = tg3_get_ethtool_stats, 14144 .get_coalesce = tg3_get_coalesce, 14145 .set_coalesce = tg3_set_coalesce, 14146 .get_sset_count = tg3_get_sset_count, 14147 .get_rxnfc = tg3_get_rxnfc, 14148 .get_rxfh_indir_size = tg3_get_rxfh_indir_size, 14149 .get_rxfh = tg3_get_rxfh, 14150 .set_rxfh = tg3_set_rxfh, 14151 .get_channels = tg3_get_channels, 14152 .set_channels = tg3_set_channels, 14153 .get_ts_info = tg3_get_ts_info, 14154 .get_eee = tg3_get_eee, 14155 .set_eee = tg3_set_eee, 14156 .get_link_ksettings = tg3_get_link_ksettings, 14157 .set_link_ksettings = tg3_set_link_ksettings, 14158 }; 14159 14160 static void tg3_get_stats64(struct net_device *dev, 14161 struct rtnl_link_stats64 *stats) 14162 { 14163 struct tg3 *tp = netdev_priv(dev); 14164 14165 spin_lock_bh(&tp->lock); 14166 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) { 14167 *stats = tp->net_stats_prev; 14168 spin_unlock_bh(&tp->lock); 14169 return; 14170 } 14171 14172 tg3_get_nstats(tp, stats); 14173 spin_unlock_bh(&tp->lock); 14174 } 14175 14176 static void tg3_set_rx_mode(struct net_device *dev) 14177 { 14178 struct tg3 *tp = netdev_priv(dev); 14179 14180 if (!netif_running(dev)) 14181 return; 14182 14183 tg3_full_lock(tp, 0); 14184 __tg3_set_rx_mode(dev); 14185 tg3_full_unlock(tp); 14186 } 14187 14188 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, 14189 int new_mtu) 14190 { 14191 dev->mtu = new_mtu; 14192 14193 if (new_mtu > ETH_DATA_LEN) { 14194 if (tg3_flag(tp, 5780_CLASS)) { 14195 netdev_update_features(dev); 14196 tg3_flag_clear(tp, TSO_CAPABLE); 14197 } else { 14198 tg3_flag_set(tp, JUMBO_RING_ENABLE); 14199 } 14200 } else { 14201 if (tg3_flag(tp, 5780_CLASS)) { 14202 tg3_flag_set(tp, TSO_CAPABLE); 14203 netdev_update_features(dev); 14204 } 14205 tg3_flag_clear(tp, JUMBO_RING_ENABLE); 14206 } 14207 } 14208 14209 static int tg3_change_mtu(struct net_device *dev, int new_mtu) 14210 { 14211 struct tg3 *tp = netdev_priv(dev); 14212 int err; 14213 bool reset_phy = false; 14214 14215 if (!netif_running(dev)) { 14216 /* We'll just catch it later when the 14217 * device is up'd. 14218 */ 14219 tg3_set_mtu(dev, tp, new_mtu); 14220 return 0; 14221 } 14222 14223 tg3_phy_stop(tp); 14224 14225 tg3_netif_stop(tp); 14226 14227 tg3_set_mtu(dev, tp, new_mtu); 14228 14229 tg3_full_lock(tp, 1); 14230 14231 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 14232 14233 /* Reset PHY, otherwise the read DMA engine will be in a mode that 14234 * breaks all requests to 256 bytes. 14235 */ 14236 if (tg3_asic_rev(tp) == ASIC_REV_57766 || 14237 tg3_asic_rev(tp) == ASIC_REV_5717 || 14238 tg3_asic_rev(tp) == ASIC_REV_5719 || 14239 tg3_asic_rev(tp) == ASIC_REV_5720) 14240 reset_phy = true; 14241 14242 err = tg3_restart_hw(tp, reset_phy); 14243 14244 if (!err) 14245 tg3_netif_start(tp); 14246 14247 tg3_full_unlock(tp); 14248 14249 if (!err) 14250 tg3_phy_start(tp); 14251 14252 return err; 14253 } 14254 14255 static const struct net_device_ops tg3_netdev_ops = { 14256 .ndo_open = tg3_open, 14257 .ndo_stop = tg3_close, 14258 .ndo_start_xmit = tg3_start_xmit, 14259 .ndo_get_stats64 = tg3_get_stats64, 14260 .ndo_validate_addr = eth_validate_addr, 14261 .ndo_set_rx_mode = tg3_set_rx_mode, 14262 .ndo_set_mac_address = tg3_set_mac_addr, 14263 .ndo_eth_ioctl = tg3_ioctl, 14264 .ndo_tx_timeout = tg3_tx_timeout, 14265 .ndo_change_mtu = tg3_change_mtu, 14266 .ndo_fix_features = tg3_fix_features, 14267 .ndo_set_features = tg3_set_features, 14268 #ifdef CONFIG_NET_POLL_CONTROLLER 14269 .ndo_poll_controller = tg3_poll_controller, 14270 #endif 14271 }; 14272 14273 static void tg3_get_eeprom_size(struct tg3 *tp) 14274 { 14275 u32 cursize, val, magic; 14276 14277 tp->nvram_size = EEPROM_CHIP_SIZE; 14278 14279 if (tg3_nvram_read(tp, 0, &magic) != 0) 14280 return; 14281 14282 if ((magic != TG3_EEPROM_MAGIC) && 14283 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && 14284 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) 14285 return; 14286 14287 /* 14288 * Size the chip by reading offsets at increasing powers of two. 14289 * When we encounter our validation signature, we know the addressing 14290 * has wrapped around, and thus have our chip size. 14291 */ 14292 cursize = 0x10; 14293 14294 while (cursize < tp->nvram_size) { 14295 if (tg3_nvram_read(tp, cursize, &val) != 0) 14296 return; 14297 14298 if (val == magic) 14299 break; 14300 14301 cursize <<= 1; 14302 } 14303 14304 tp->nvram_size = cursize; 14305 } 14306 14307 static void tg3_get_nvram_size(struct tg3 *tp) 14308 { 14309 u32 val; 14310 14311 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) 14312 return; 14313 14314 /* Selfboot format */ 14315 if (val != TG3_EEPROM_MAGIC) { 14316 tg3_get_eeprom_size(tp); 14317 return; 14318 } 14319 14320 if (tg3_nvram_read(tp, 0xf0, &val) == 0) { 14321 if (val != 0) { 14322 /* This is confusing. We want to operate on the 14323 * 16-bit value at offset 0xf2. The tg3_nvram_read() 14324 * call will read from NVRAM and byteswap the data 14325 * according to the byteswapping settings for all 14326 * other register accesses. This ensures the data we 14327 * want will always reside in the lower 16-bits. 14328 * However, the data in NVRAM is in LE format, which 14329 * means the data from the NVRAM read will always be 14330 * opposite the endianness of the CPU. The 16-bit 14331 * byteswap then brings the data to CPU endianness. 14332 */ 14333 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; 14334 return; 14335 } 14336 } 14337 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14338 } 14339 14340 static void tg3_get_nvram_info(struct tg3 *tp) 14341 { 14342 u32 nvcfg1; 14343 14344 nvcfg1 = tr32(NVRAM_CFG1); 14345 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { 14346 tg3_flag_set(tp, FLASH); 14347 } else { 14348 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14349 tw32(NVRAM_CFG1, nvcfg1); 14350 } 14351 14352 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 14353 tg3_flag(tp, 5780_CLASS)) { 14354 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { 14355 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: 14356 tp->nvram_jedecnum = JEDEC_ATMEL; 14357 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14358 tg3_flag_set(tp, NVRAM_BUFFERED); 14359 break; 14360 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: 14361 tp->nvram_jedecnum = JEDEC_ATMEL; 14362 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; 14363 break; 14364 case FLASH_VENDOR_ATMEL_EEPROM: 14365 tp->nvram_jedecnum = JEDEC_ATMEL; 14366 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14367 tg3_flag_set(tp, NVRAM_BUFFERED); 14368 break; 14369 case FLASH_VENDOR_ST: 14370 tp->nvram_jedecnum = JEDEC_ST; 14371 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; 14372 tg3_flag_set(tp, NVRAM_BUFFERED); 14373 break; 14374 case FLASH_VENDOR_SAIFUN: 14375 tp->nvram_jedecnum = JEDEC_SAIFUN; 14376 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; 14377 break; 14378 case FLASH_VENDOR_SST_SMALL: 14379 case FLASH_VENDOR_SST_LARGE: 14380 tp->nvram_jedecnum = JEDEC_SST; 14381 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; 14382 break; 14383 } 14384 } else { 14385 tp->nvram_jedecnum = JEDEC_ATMEL; 14386 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14387 tg3_flag_set(tp, NVRAM_BUFFERED); 14388 } 14389 } 14390 14391 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) 14392 { 14393 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { 14394 case FLASH_5752PAGE_SIZE_256: 14395 tp->nvram_pagesize = 256; 14396 break; 14397 case FLASH_5752PAGE_SIZE_512: 14398 tp->nvram_pagesize = 512; 14399 break; 14400 case FLASH_5752PAGE_SIZE_1K: 14401 tp->nvram_pagesize = 1024; 14402 break; 14403 case FLASH_5752PAGE_SIZE_2K: 14404 tp->nvram_pagesize = 2048; 14405 break; 14406 case FLASH_5752PAGE_SIZE_4K: 14407 tp->nvram_pagesize = 4096; 14408 break; 14409 case FLASH_5752PAGE_SIZE_264: 14410 tp->nvram_pagesize = 264; 14411 break; 14412 case FLASH_5752PAGE_SIZE_528: 14413 tp->nvram_pagesize = 528; 14414 break; 14415 } 14416 } 14417 14418 static void tg3_get_5752_nvram_info(struct tg3 *tp) 14419 { 14420 u32 nvcfg1; 14421 14422 nvcfg1 = tr32(NVRAM_CFG1); 14423 14424 /* NVRAM protection for TPM */ 14425 if (nvcfg1 & (1 << 27)) 14426 tg3_flag_set(tp, PROTECTED_NVRAM); 14427 14428 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14429 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 14430 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: 14431 tp->nvram_jedecnum = JEDEC_ATMEL; 14432 tg3_flag_set(tp, NVRAM_BUFFERED); 14433 break; 14434 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14435 tp->nvram_jedecnum = JEDEC_ATMEL; 14436 tg3_flag_set(tp, NVRAM_BUFFERED); 14437 tg3_flag_set(tp, FLASH); 14438 break; 14439 case FLASH_5752VENDOR_ST_M45PE10: 14440 case FLASH_5752VENDOR_ST_M45PE20: 14441 case FLASH_5752VENDOR_ST_M45PE40: 14442 tp->nvram_jedecnum = JEDEC_ST; 14443 tg3_flag_set(tp, NVRAM_BUFFERED); 14444 tg3_flag_set(tp, FLASH); 14445 break; 14446 } 14447 14448 if (tg3_flag(tp, FLASH)) { 14449 tg3_nvram_get_pagesize(tp, nvcfg1); 14450 } else { 14451 /* For eeprom, set pagesize to maximum eeprom size */ 14452 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14453 14454 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14455 tw32(NVRAM_CFG1, nvcfg1); 14456 } 14457 } 14458 14459 static void tg3_get_5755_nvram_info(struct tg3 *tp) 14460 { 14461 u32 nvcfg1, protect = 0; 14462 14463 nvcfg1 = tr32(NVRAM_CFG1); 14464 14465 /* NVRAM protection for TPM */ 14466 if (nvcfg1 & (1 << 27)) { 14467 tg3_flag_set(tp, PROTECTED_NVRAM); 14468 protect = 1; 14469 } 14470 14471 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14472 switch (nvcfg1) { 14473 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14474 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14475 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14476 case FLASH_5755VENDOR_ATMEL_FLASH_5: 14477 tp->nvram_jedecnum = JEDEC_ATMEL; 14478 tg3_flag_set(tp, NVRAM_BUFFERED); 14479 tg3_flag_set(tp, FLASH); 14480 tp->nvram_pagesize = 264; 14481 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || 14482 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) 14483 tp->nvram_size = (protect ? 0x3e200 : 14484 TG3_NVRAM_SIZE_512KB); 14485 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) 14486 tp->nvram_size = (protect ? 0x1f200 : 14487 TG3_NVRAM_SIZE_256KB); 14488 else 14489 tp->nvram_size = (protect ? 0x1f200 : 14490 TG3_NVRAM_SIZE_128KB); 14491 break; 14492 case FLASH_5752VENDOR_ST_M45PE10: 14493 case FLASH_5752VENDOR_ST_M45PE20: 14494 case FLASH_5752VENDOR_ST_M45PE40: 14495 tp->nvram_jedecnum = JEDEC_ST; 14496 tg3_flag_set(tp, NVRAM_BUFFERED); 14497 tg3_flag_set(tp, FLASH); 14498 tp->nvram_pagesize = 256; 14499 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) 14500 tp->nvram_size = (protect ? 14501 TG3_NVRAM_SIZE_64KB : 14502 TG3_NVRAM_SIZE_128KB); 14503 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) 14504 tp->nvram_size = (protect ? 14505 TG3_NVRAM_SIZE_64KB : 14506 TG3_NVRAM_SIZE_256KB); 14507 else 14508 tp->nvram_size = (protect ? 14509 TG3_NVRAM_SIZE_128KB : 14510 TG3_NVRAM_SIZE_512KB); 14511 break; 14512 } 14513 } 14514 14515 static void tg3_get_5787_nvram_info(struct tg3 *tp) 14516 { 14517 u32 nvcfg1; 14518 14519 nvcfg1 = tr32(NVRAM_CFG1); 14520 14521 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14522 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: 14523 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14524 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: 14525 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14526 tp->nvram_jedecnum = JEDEC_ATMEL; 14527 tg3_flag_set(tp, NVRAM_BUFFERED); 14528 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14529 14530 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14531 tw32(NVRAM_CFG1, nvcfg1); 14532 break; 14533 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14534 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14535 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14536 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14537 tp->nvram_jedecnum = JEDEC_ATMEL; 14538 tg3_flag_set(tp, NVRAM_BUFFERED); 14539 tg3_flag_set(tp, FLASH); 14540 tp->nvram_pagesize = 264; 14541 break; 14542 case FLASH_5752VENDOR_ST_M45PE10: 14543 case FLASH_5752VENDOR_ST_M45PE20: 14544 case FLASH_5752VENDOR_ST_M45PE40: 14545 tp->nvram_jedecnum = JEDEC_ST; 14546 tg3_flag_set(tp, NVRAM_BUFFERED); 14547 tg3_flag_set(tp, FLASH); 14548 tp->nvram_pagesize = 256; 14549 break; 14550 } 14551 } 14552 14553 static void tg3_get_5761_nvram_info(struct tg3 *tp) 14554 { 14555 u32 nvcfg1, protect = 0; 14556 14557 nvcfg1 = tr32(NVRAM_CFG1); 14558 14559 /* NVRAM protection for TPM */ 14560 if (nvcfg1 & (1 << 27)) { 14561 tg3_flag_set(tp, PROTECTED_NVRAM); 14562 protect = 1; 14563 } 14564 14565 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14566 switch (nvcfg1) { 14567 case FLASH_5761VENDOR_ATMEL_ADB021D: 14568 case FLASH_5761VENDOR_ATMEL_ADB041D: 14569 case FLASH_5761VENDOR_ATMEL_ADB081D: 14570 case FLASH_5761VENDOR_ATMEL_ADB161D: 14571 case FLASH_5761VENDOR_ATMEL_MDB021D: 14572 case FLASH_5761VENDOR_ATMEL_MDB041D: 14573 case FLASH_5761VENDOR_ATMEL_MDB081D: 14574 case FLASH_5761VENDOR_ATMEL_MDB161D: 14575 tp->nvram_jedecnum = JEDEC_ATMEL; 14576 tg3_flag_set(tp, NVRAM_BUFFERED); 14577 tg3_flag_set(tp, FLASH); 14578 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14579 tp->nvram_pagesize = 256; 14580 break; 14581 case FLASH_5761VENDOR_ST_A_M45PE20: 14582 case FLASH_5761VENDOR_ST_A_M45PE40: 14583 case FLASH_5761VENDOR_ST_A_M45PE80: 14584 case FLASH_5761VENDOR_ST_A_M45PE16: 14585 case FLASH_5761VENDOR_ST_M_M45PE20: 14586 case FLASH_5761VENDOR_ST_M_M45PE40: 14587 case FLASH_5761VENDOR_ST_M_M45PE80: 14588 case FLASH_5761VENDOR_ST_M_M45PE16: 14589 tp->nvram_jedecnum = JEDEC_ST; 14590 tg3_flag_set(tp, NVRAM_BUFFERED); 14591 tg3_flag_set(tp, FLASH); 14592 tp->nvram_pagesize = 256; 14593 break; 14594 } 14595 14596 if (protect) { 14597 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); 14598 } else { 14599 switch (nvcfg1) { 14600 case FLASH_5761VENDOR_ATMEL_ADB161D: 14601 case FLASH_5761VENDOR_ATMEL_MDB161D: 14602 case FLASH_5761VENDOR_ST_A_M45PE16: 14603 case FLASH_5761VENDOR_ST_M_M45PE16: 14604 tp->nvram_size = TG3_NVRAM_SIZE_2MB; 14605 break; 14606 case FLASH_5761VENDOR_ATMEL_ADB081D: 14607 case FLASH_5761VENDOR_ATMEL_MDB081D: 14608 case FLASH_5761VENDOR_ST_A_M45PE80: 14609 case FLASH_5761VENDOR_ST_M_M45PE80: 14610 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14611 break; 14612 case FLASH_5761VENDOR_ATMEL_ADB041D: 14613 case FLASH_5761VENDOR_ATMEL_MDB041D: 14614 case FLASH_5761VENDOR_ST_A_M45PE40: 14615 case FLASH_5761VENDOR_ST_M_M45PE40: 14616 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14617 break; 14618 case FLASH_5761VENDOR_ATMEL_ADB021D: 14619 case FLASH_5761VENDOR_ATMEL_MDB021D: 14620 case FLASH_5761VENDOR_ST_A_M45PE20: 14621 case FLASH_5761VENDOR_ST_M_M45PE20: 14622 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14623 break; 14624 } 14625 } 14626 } 14627 14628 static void tg3_get_5906_nvram_info(struct tg3 *tp) 14629 { 14630 tp->nvram_jedecnum = JEDEC_ATMEL; 14631 tg3_flag_set(tp, NVRAM_BUFFERED); 14632 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14633 } 14634 14635 static void tg3_get_57780_nvram_info(struct tg3 *tp) 14636 { 14637 u32 nvcfg1; 14638 14639 nvcfg1 = tr32(NVRAM_CFG1); 14640 14641 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14642 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14643 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14644 tp->nvram_jedecnum = JEDEC_ATMEL; 14645 tg3_flag_set(tp, NVRAM_BUFFERED); 14646 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14647 14648 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14649 tw32(NVRAM_CFG1, nvcfg1); 14650 return; 14651 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14652 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14653 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14654 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14655 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14656 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14657 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14658 tp->nvram_jedecnum = JEDEC_ATMEL; 14659 tg3_flag_set(tp, NVRAM_BUFFERED); 14660 tg3_flag_set(tp, FLASH); 14661 14662 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14663 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14664 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14665 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14666 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14667 break; 14668 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14669 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14670 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14671 break; 14672 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14673 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14674 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14675 break; 14676 } 14677 break; 14678 case FLASH_5752VENDOR_ST_M45PE10: 14679 case FLASH_5752VENDOR_ST_M45PE20: 14680 case FLASH_5752VENDOR_ST_M45PE40: 14681 tp->nvram_jedecnum = JEDEC_ST; 14682 tg3_flag_set(tp, NVRAM_BUFFERED); 14683 tg3_flag_set(tp, FLASH); 14684 14685 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14686 case FLASH_5752VENDOR_ST_M45PE10: 14687 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14688 break; 14689 case FLASH_5752VENDOR_ST_M45PE20: 14690 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14691 break; 14692 case FLASH_5752VENDOR_ST_M45PE40: 14693 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14694 break; 14695 } 14696 break; 14697 default: 14698 tg3_flag_set(tp, NO_NVRAM); 14699 return; 14700 } 14701 14702 tg3_nvram_get_pagesize(tp, nvcfg1); 14703 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14704 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14705 } 14706 14707 14708 static void tg3_get_5717_nvram_info(struct tg3 *tp) 14709 { 14710 u32 nvcfg1; 14711 14712 nvcfg1 = tr32(NVRAM_CFG1); 14713 14714 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14715 case FLASH_5717VENDOR_ATMEL_EEPROM: 14716 case FLASH_5717VENDOR_MICRO_EEPROM: 14717 tp->nvram_jedecnum = JEDEC_ATMEL; 14718 tg3_flag_set(tp, NVRAM_BUFFERED); 14719 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14720 14721 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14722 tw32(NVRAM_CFG1, nvcfg1); 14723 return; 14724 case FLASH_5717VENDOR_ATMEL_MDB011D: 14725 case FLASH_5717VENDOR_ATMEL_ADB011B: 14726 case FLASH_5717VENDOR_ATMEL_ADB011D: 14727 case FLASH_5717VENDOR_ATMEL_MDB021D: 14728 case FLASH_5717VENDOR_ATMEL_ADB021B: 14729 case FLASH_5717VENDOR_ATMEL_ADB021D: 14730 case FLASH_5717VENDOR_ATMEL_45USPT: 14731 tp->nvram_jedecnum = JEDEC_ATMEL; 14732 tg3_flag_set(tp, NVRAM_BUFFERED); 14733 tg3_flag_set(tp, FLASH); 14734 14735 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14736 case FLASH_5717VENDOR_ATMEL_MDB021D: 14737 /* Detect size with tg3_nvram_get_size() */ 14738 break; 14739 case FLASH_5717VENDOR_ATMEL_ADB021B: 14740 case FLASH_5717VENDOR_ATMEL_ADB021D: 14741 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14742 break; 14743 default: 14744 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14745 break; 14746 } 14747 break; 14748 case FLASH_5717VENDOR_ST_M_M25PE10: 14749 case FLASH_5717VENDOR_ST_A_M25PE10: 14750 case FLASH_5717VENDOR_ST_M_M45PE10: 14751 case FLASH_5717VENDOR_ST_A_M45PE10: 14752 case FLASH_5717VENDOR_ST_M_M25PE20: 14753 case FLASH_5717VENDOR_ST_A_M25PE20: 14754 case FLASH_5717VENDOR_ST_M_M45PE20: 14755 case FLASH_5717VENDOR_ST_A_M45PE20: 14756 case FLASH_5717VENDOR_ST_25USPT: 14757 case FLASH_5717VENDOR_ST_45USPT: 14758 tp->nvram_jedecnum = JEDEC_ST; 14759 tg3_flag_set(tp, NVRAM_BUFFERED); 14760 tg3_flag_set(tp, FLASH); 14761 14762 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14763 case FLASH_5717VENDOR_ST_M_M25PE20: 14764 case FLASH_5717VENDOR_ST_M_M45PE20: 14765 /* Detect size with tg3_nvram_get_size() */ 14766 break; 14767 case FLASH_5717VENDOR_ST_A_M25PE20: 14768 case FLASH_5717VENDOR_ST_A_M45PE20: 14769 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14770 break; 14771 default: 14772 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14773 break; 14774 } 14775 break; 14776 default: 14777 tg3_flag_set(tp, NO_NVRAM); 14778 return; 14779 } 14780 14781 tg3_nvram_get_pagesize(tp, nvcfg1); 14782 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14783 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14784 } 14785 14786 static void tg3_get_5720_nvram_info(struct tg3 *tp) 14787 { 14788 u32 nvcfg1, nvmpinstrp, nv_status; 14789 14790 nvcfg1 = tr32(NVRAM_CFG1); 14791 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; 14792 14793 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14794 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) { 14795 tg3_flag_set(tp, NO_NVRAM); 14796 return; 14797 } 14798 14799 switch (nvmpinstrp) { 14800 case FLASH_5762_MX25L_100: 14801 case FLASH_5762_MX25L_200: 14802 case FLASH_5762_MX25L_400: 14803 case FLASH_5762_MX25L_800: 14804 case FLASH_5762_MX25L_160_320: 14805 tp->nvram_pagesize = 4096; 14806 tp->nvram_jedecnum = JEDEC_MACRONIX; 14807 tg3_flag_set(tp, NVRAM_BUFFERED); 14808 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14809 tg3_flag_set(tp, FLASH); 14810 nv_status = tr32(NVRAM_AUTOSENSE_STATUS); 14811 tp->nvram_size = 14812 (1 << (nv_status >> AUTOSENSE_DEVID & 14813 AUTOSENSE_DEVID_MASK) 14814 << AUTOSENSE_SIZE_IN_MB); 14815 return; 14816 14817 case FLASH_5762_EEPROM_HD: 14818 nvmpinstrp = FLASH_5720_EEPROM_HD; 14819 break; 14820 case FLASH_5762_EEPROM_LD: 14821 nvmpinstrp = FLASH_5720_EEPROM_LD; 14822 break; 14823 case FLASH_5720VENDOR_M_ST_M45PE20: 14824 /* This pinstrap supports multiple sizes, so force it 14825 * to read the actual size from location 0xf0. 14826 */ 14827 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT; 14828 break; 14829 } 14830 } 14831 14832 switch (nvmpinstrp) { 14833 case FLASH_5720_EEPROM_HD: 14834 case FLASH_5720_EEPROM_LD: 14835 tp->nvram_jedecnum = JEDEC_ATMEL; 14836 tg3_flag_set(tp, NVRAM_BUFFERED); 14837 14838 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14839 tw32(NVRAM_CFG1, nvcfg1); 14840 if (nvmpinstrp == FLASH_5720_EEPROM_HD) 14841 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14842 else 14843 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; 14844 return; 14845 case FLASH_5720VENDOR_M_ATMEL_DB011D: 14846 case FLASH_5720VENDOR_A_ATMEL_DB011B: 14847 case FLASH_5720VENDOR_A_ATMEL_DB011D: 14848 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14849 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14850 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14851 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14852 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14853 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14854 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14855 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14856 case FLASH_5720VENDOR_ATMEL_45USPT: 14857 tp->nvram_jedecnum = JEDEC_ATMEL; 14858 tg3_flag_set(tp, NVRAM_BUFFERED); 14859 tg3_flag_set(tp, FLASH); 14860 14861 switch (nvmpinstrp) { 14862 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14863 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14864 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14865 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14866 break; 14867 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14868 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14869 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14870 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14871 break; 14872 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14873 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14874 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14875 break; 14876 default: 14877 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14878 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14879 break; 14880 } 14881 break; 14882 case FLASH_5720VENDOR_M_ST_M25PE10: 14883 case FLASH_5720VENDOR_M_ST_M45PE10: 14884 case FLASH_5720VENDOR_A_ST_M25PE10: 14885 case FLASH_5720VENDOR_A_ST_M45PE10: 14886 case FLASH_5720VENDOR_M_ST_M25PE20: 14887 case FLASH_5720VENDOR_M_ST_M45PE20: 14888 case FLASH_5720VENDOR_A_ST_M25PE20: 14889 case FLASH_5720VENDOR_A_ST_M45PE20: 14890 case FLASH_5720VENDOR_M_ST_M25PE40: 14891 case FLASH_5720VENDOR_M_ST_M45PE40: 14892 case FLASH_5720VENDOR_A_ST_M25PE40: 14893 case FLASH_5720VENDOR_A_ST_M45PE40: 14894 case FLASH_5720VENDOR_M_ST_M25PE80: 14895 case FLASH_5720VENDOR_M_ST_M45PE80: 14896 case FLASH_5720VENDOR_A_ST_M25PE80: 14897 case FLASH_5720VENDOR_A_ST_M45PE80: 14898 case FLASH_5720VENDOR_ST_25USPT: 14899 case FLASH_5720VENDOR_ST_45USPT: 14900 tp->nvram_jedecnum = JEDEC_ST; 14901 tg3_flag_set(tp, NVRAM_BUFFERED); 14902 tg3_flag_set(tp, FLASH); 14903 14904 switch (nvmpinstrp) { 14905 case FLASH_5720VENDOR_M_ST_M25PE20: 14906 case FLASH_5720VENDOR_M_ST_M45PE20: 14907 case FLASH_5720VENDOR_A_ST_M25PE20: 14908 case FLASH_5720VENDOR_A_ST_M45PE20: 14909 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14910 break; 14911 case FLASH_5720VENDOR_M_ST_M25PE40: 14912 case FLASH_5720VENDOR_M_ST_M45PE40: 14913 case FLASH_5720VENDOR_A_ST_M25PE40: 14914 case FLASH_5720VENDOR_A_ST_M45PE40: 14915 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14916 break; 14917 case FLASH_5720VENDOR_M_ST_M25PE80: 14918 case FLASH_5720VENDOR_M_ST_M45PE80: 14919 case FLASH_5720VENDOR_A_ST_M25PE80: 14920 case FLASH_5720VENDOR_A_ST_M45PE80: 14921 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14922 break; 14923 default: 14924 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14925 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14926 break; 14927 } 14928 break; 14929 default: 14930 tg3_flag_set(tp, NO_NVRAM); 14931 return; 14932 } 14933 14934 tg3_nvram_get_pagesize(tp, nvcfg1); 14935 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14936 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14937 14938 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14939 u32 val; 14940 14941 if (tg3_nvram_read(tp, 0, &val)) 14942 return; 14943 14944 if (val != TG3_EEPROM_MAGIC && 14945 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) 14946 tg3_flag_set(tp, NO_NVRAM); 14947 } 14948 } 14949 14950 /* Chips other than 5700/5701 use the NVRAM for fetching info. */ 14951 static void tg3_nvram_init(struct tg3 *tp) 14952 { 14953 if (tg3_flag(tp, IS_SSB_CORE)) { 14954 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */ 14955 tg3_flag_clear(tp, NVRAM); 14956 tg3_flag_clear(tp, NVRAM_BUFFERED); 14957 tg3_flag_set(tp, NO_NVRAM); 14958 return; 14959 } 14960 14961 tw32_f(GRC_EEPROM_ADDR, 14962 (EEPROM_ADDR_FSM_RESET | 14963 (EEPROM_DEFAULT_CLOCK_PERIOD << 14964 EEPROM_ADDR_CLKPERD_SHIFT))); 14965 14966 msleep(1); 14967 14968 /* Enable seeprom accesses. */ 14969 tw32_f(GRC_LOCAL_CTRL, 14970 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); 14971 udelay(100); 14972 14973 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 14974 tg3_asic_rev(tp) != ASIC_REV_5701) { 14975 tg3_flag_set(tp, NVRAM); 14976 14977 if (tg3_nvram_lock(tp)) { 14978 netdev_warn(tp->dev, 14979 "Cannot get nvram lock, %s failed\n", 14980 __func__); 14981 return; 14982 } 14983 tg3_enable_nvram_access(tp); 14984 14985 tp->nvram_size = 0; 14986 14987 if (tg3_asic_rev(tp) == ASIC_REV_5752) 14988 tg3_get_5752_nvram_info(tp); 14989 else if (tg3_asic_rev(tp) == ASIC_REV_5755) 14990 tg3_get_5755_nvram_info(tp); 14991 else if (tg3_asic_rev(tp) == ASIC_REV_5787 || 14992 tg3_asic_rev(tp) == ASIC_REV_5784 || 14993 tg3_asic_rev(tp) == ASIC_REV_5785) 14994 tg3_get_5787_nvram_info(tp); 14995 else if (tg3_asic_rev(tp) == ASIC_REV_5761) 14996 tg3_get_5761_nvram_info(tp); 14997 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 14998 tg3_get_5906_nvram_info(tp); 14999 else if (tg3_asic_rev(tp) == ASIC_REV_57780 || 15000 tg3_flag(tp, 57765_CLASS)) 15001 tg3_get_57780_nvram_info(tp); 15002 else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 15003 tg3_asic_rev(tp) == ASIC_REV_5719) 15004 tg3_get_5717_nvram_info(tp); 15005 else if (tg3_asic_rev(tp) == ASIC_REV_5720 || 15006 tg3_asic_rev(tp) == ASIC_REV_5762) 15007 tg3_get_5720_nvram_info(tp); 15008 else 15009 tg3_get_nvram_info(tp); 15010 15011 if (tp->nvram_size == 0) 15012 tg3_get_nvram_size(tp); 15013 15014 tg3_disable_nvram_access(tp); 15015 tg3_nvram_unlock(tp); 15016 15017 } else { 15018 tg3_flag_clear(tp, NVRAM); 15019 tg3_flag_clear(tp, NVRAM_BUFFERED); 15020 15021 tg3_get_eeprom_size(tp); 15022 } 15023 } 15024 15025 struct subsys_tbl_ent { 15026 u16 subsys_vendor, subsys_devid; 15027 u32 phy_id; 15028 }; 15029 15030 static struct subsys_tbl_ent subsys_id_to_phy_id[] = { 15031 /* Broadcom boards. */ 15032 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15033 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, 15034 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15035 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, 15036 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15037 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, 15038 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15039 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, 15040 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15041 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, 15042 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15043 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, 15044 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15045 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, 15046 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15047 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, 15048 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15049 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, 15050 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15051 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, 15052 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15053 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, 15054 15055 /* 3com boards. */ 15056 { TG3PCI_SUBVENDOR_ID_3COM, 15057 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, 15058 { TG3PCI_SUBVENDOR_ID_3COM, 15059 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, 15060 { TG3PCI_SUBVENDOR_ID_3COM, 15061 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, 15062 { TG3PCI_SUBVENDOR_ID_3COM, 15063 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, 15064 { TG3PCI_SUBVENDOR_ID_3COM, 15065 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, 15066 15067 /* DELL boards. */ 15068 { TG3PCI_SUBVENDOR_ID_DELL, 15069 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, 15070 { TG3PCI_SUBVENDOR_ID_DELL, 15071 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, 15072 { TG3PCI_SUBVENDOR_ID_DELL, 15073 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, 15074 { TG3PCI_SUBVENDOR_ID_DELL, 15075 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, 15076 15077 /* Compaq boards. */ 15078 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15079 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, 15080 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15081 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, 15082 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15083 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, 15084 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15085 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, 15086 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15087 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, 15088 15089 /* IBM boards. */ 15090 { TG3PCI_SUBVENDOR_ID_IBM, 15091 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } 15092 }; 15093 15094 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp) 15095 { 15096 int i; 15097 15098 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { 15099 if ((subsys_id_to_phy_id[i].subsys_vendor == 15100 tp->pdev->subsystem_vendor) && 15101 (subsys_id_to_phy_id[i].subsys_devid == 15102 tp->pdev->subsystem_device)) 15103 return &subsys_id_to_phy_id[i]; 15104 } 15105 return NULL; 15106 } 15107 15108 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp) 15109 { 15110 u32 val; 15111 15112 tp->phy_id = TG3_PHY_ID_INVALID; 15113 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15114 15115 /* Assume an onboard device and WOL capable by default. */ 15116 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15117 tg3_flag_set(tp, WOL_CAP); 15118 15119 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15120 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { 15121 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15122 tg3_flag_set(tp, IS_NIC); 15123 } 15124 val = tr32(VCPU_CFGSHDW); 15125 if (val & VCPU_CFGSHDW_ASPM_DBNC) 15126 tg3_flag_set(tp, ASPM_WORKAROUND); 15127 if ((val & VCPU_CFGSHDW_WOL_ENABLE) && 15128 (val & VCPU_CFGSHDW_WOL_MAGPKT)) { 15129 tg3_flag_set(tp, WOL_ENABLE); 15130 device_set_wakeup_enable(&tp->pdev->dev, true); 15131 } 15132 goto done; 15133 } 15134 15135 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 15136 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 15137 u32 nic_cfg, led_cfg; 15138 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0; 15139 u32 nic_phy_id, ver, eeprom_phy_id; 15140 int eeprom_phy_serdes = 0; 15141 15142 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 15143 tp->nic_sram_data_cfg = nic_cfg; 15144 15145 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); 15146 ver >>= NIC_SRAM_DATA_VER_SHIFT; 15147 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 15148 tg3_asic_rev(tp) != ASIC_REV_5701 && 15149 tg3_asic_rev(tp) != ASIC_REV_5703 && 15150 (ver > 0) && (ver < 0x100)) 15151 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); 15152 15153 if (tg3_asic_rev(tp) == ASIC_REV_5785) 15154 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); 15155 15156 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 15157 tg3_asic_rev(tp) == ASIC_REV_5719 || 15158 tg3_asic_rev(tp) == ASIC_REV_5720) 15159 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5); 15160 15161 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == 15162 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) 15163 eeprom_phy_serdes = 1; 15164 15165 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); 15166 if (nic_phy_id != 0) { 15167 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; 15168 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; 15169 15170 eeprom_phy_id = (id1 >> 16) << 10; 15171 eeprom_phy_id |= (id2 & 0xfc00) << 16; 15172 eeprom_phy_id |= (id2 & 0x03ff) << 0; 15173 } else 15174 eeprom_phy_id = 0; 15175 15176 tp->phy_id = eeprom_phy_id; 15177 if (eeprom_phy_serdes) { 15178 if (!tg3_flag(tp, 5705_PLUS)) 15179 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15180 else 15181 tp->phy_flags |= TG3_PHYFLG_MII_SERDES; 15182 } 15183 15184 if (tg3_flag(tp, 5750_PLUS)) 15185 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | 15186 SHASTA_EXT_LED_MODE_MASK); 15187 else 15188 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; 15189 15190 switch (led_cfg) { 15191 default: 15192 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: 15193 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15194 break; 15195 15196 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: 15197 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15198 break; 15199 15200 case NIC_SRAM_DATA_CFG_LED_MODE_MAC: 15201 tp->led_ctrl = LED_CTRL_MODE_MAC; 15202 15203 /* Default to PHY_1_MODE if 0 (MAC_MODE) is 15204 * read on some older 5700/5701 bootcode. 15205 */ 15206 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 15207 tg3_asic_rev(tp) == ASIC_REV_5701) 15208 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15209 15210 break; 15211 15212 case SHASTA_EXT_LED_SHARED: 15213 tp->led_ctrl = LED_CTRL_MODE_SHARED; 15214 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 15215 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1) 15216 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15217 LED_CTRL_MODE_PHY_2); 15218 15219 if (tg3_flag(tp, 5717_PLUS) || 15220 tg3_asic_rev(tp) == ASIC_REV_5762) 15221 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE | 15222 LED_CTRL_BLINK_RATE_MASK; 15223 15224 break; 15225 15226 case SHASTA_EXT_LED_MAC: 15227 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; 15228 break; 15229 15230 case SHASTA_EXT_LED_COMBO: 15231 tp->led_ctrl = LED_CTRL_MODE_COMBO; 15232 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) 15233 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15234 LED_CTRL_MODE_PHY_2); 15235 break; 15236 15237 } 15238 15239 if ((tg3_asic_rev(tp) == ASIC_REV_5700 || 15240 tg3_asic_rev(tp) == ASIC_REV_5701) && 15241 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) 15242 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15243 15244 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) 15245 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15246 15247 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { 15248 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15249 if ((tp->pdev->subsystem_vendor == 15250 PCI_VENDOR_ID_ARIMA) && 15251 (tp->pdev->subsystem_device == 0x205a || 15252 tp->pdev->subsystem_device == 0x2063)) 15253 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15254 } else { 15255 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15256 tg3_flag_set(tp, IS_NIC); 15257 } 15258 15259 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 15260 tg3_flag_set(tp, ENABLE_ASF); 15261 if (tg3_flag(tp, 5750_PLUS)) 15262 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 15263 } 15264 15265 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && 15266 tg3_flag(tp, 5750_PLUS)) 15267 tg3_flag_set(tp, ENABLE_APE); 15268 15269 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && 15270 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) 15271 tg3_flag_clear(tp, WOL_CAP); 15272 15273 if (tg3_flag(tp, WOL_CAP) && 15274 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { 15275 tg3_flag_set(tp, WOL_ENABLE); 15276 device_set_wakeup_enable(&tp->pdev->dev, true); 15277 } 15278 15279 if (cfg2 & (1 << 17)) 15280 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; 15281 15282 /* serdes signal pre-emphasis in register 0x590 set by */ 15283 /* bootcode if bit 18 is set */ 15284 if (cfg2 & (1 << 18)) 15285 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; 15286 15287 if ((tg3_flag(tp, 57765_PLUS) || 15288 (tg3_asic_rev(tp) == ASIC_REV_5784 && 15289 tg3_chip_rev(tp) != CHIPREV_5784_AX)) && 15290 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 15291 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; 15292 15293 if (tg3_flag(tp, PCI_EXPRESS)) { 15294 u32 cfg3; 15295 15296 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); 15297 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 15298 !tg3_flag(tp, 57765_PLUS) && 15299 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)) 15300 tg3_flag_set(tp, ASPM_WORKAROUND); 15301 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID) 15302 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 15303 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK) 15304 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 15305 } 15306 15307 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) 15308 tg3_flag_set(tp, RGMII_INBAND_DISABLE); 15309 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) 15310 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); 15311 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) 15312 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); 15313 15314 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV) 15315 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV; 15316 } 15317 done: 15318 if (tg3_flag(tp, WOL_CAP)) 15319 device_set_wakeup_enable(&tp->pdev->dev, 15320 tg3_flag(tp, WOL_ENABLE)); 15321 else 15322 device_set_wakeup_capable(&tp->pdev->dev, false); 15323 } 15324 15325 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val) 15326 { 15327 int i, err; 15328 u32 val2, off = offset * 8; 15329 15330 err = tg3_nvram_lock(tp); 15331 if (err) 15332 return err; 15333 15334 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE); 15335 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN | 15336 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START); 15337 tg3_ape_read32(tp, TG3_APE_OTP_CTRL); 15338 udelay(10); 15339 15340 for (i = 0; i < 100; i++) { 15341 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS); 15342 if (val2 & APE_OTP_STATUS_CMD_DONE) { 15343 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA); 15344 break; 15345 } 15346 udelay(10); 15347 } 15348 15349 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0); 15350 15351 tg3_nvram_unlock(tp); 15352 if (val2 & APE_OTP_STATUS_CMD_DONE) 15353 return 0; 15354 15355 return -EBUSY; 15356 } 15357 15358 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd) 15359 { 15360 int i; 15361 u32 val; 15362 15363 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); 15364 tw32(OTP_CTRL, cmd); 15365 15366 /* Wait for up to 1 ms for command to execute. */ 15367 for (i = 0; i < 100; i++) { 15368 val = tr32(OTP_STATUS); 15369 if (val & OTP_STATUS_CMD_DONE) 15370 break; 15371 udelay(10); 15372 } 15373 15374 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; 15375 } 15376 15377 /* Read the gphy configuration from the OTP region of the chip. The gphy 15378 * configuration is a 32-bit value that straddles the alignment boundary. 15379 * We do two 32-bit reads and then shift and merge the results. 15380 */ 15381 static u32 tg3_read_otp_phycfg(struct tg3 *tp) 15382 { 15383 u32 bhalf_otp, thalf_otp; 15384 15385 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); 15386 15387 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) 15388 return 0; 15389 15390 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); 15391 15392 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15393 return 0; 15394 15395 thalf_otp = tr32(OTP_READ_DATA); 15396 15397 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); 15398 15399 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15400 return 0; 15401 15402 bhalf_otp = tr32(OTP_READ_DATA); 15403 15404 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); 15405 } 15406 15407 static void tg3_phy_init_link_config(struct tg3 *tp) 15408 { 15409 u32 adv = ADVERTISED_Autoneg; 15410 15411 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 15412 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV)) 15413 adv |= ADVERTISED_1000baseT_Half; 15414 adv |= ADVERTISED_1000baseT_Full; 15415 } 15416 15417 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 15418 adv |= ADVERTISED_100baseT_Half | 15419 ADVERTISED_100baseT_Full | 15420 ADVERTISED_10baseT_Half | 15421 ADVERTISED_10baseT_Full | 15422 ADVERTISED_TP; 15423 else 15424 adv |= ADVERTISED_FIBRE; 15425 15426 tp->link_config.advertising = adv; 15427 tp->link_config.speed = SPEED_UNKNOWN; 15428 tp->link_config.duplex = DUPLEX_UNKNOWN; 15429 tp->link_config.autoneg = AUTONEG_ENABLE; 15430 tp->link_config.active_speed = SPEED_UNKNOWN; 15431 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 15432 15433 tp->old_link = -1; 15434 } 15435 15436 static int tg3_phy_probe(struct tg3 *tp) 15437 { 15438 u32 hw_phy_id_1, hw_phy_id_2; 15439 u32 hw_phy_id, hw_phy_id_masked; 15440 int err; 15441 15442 /* flow control autonegotiation is default behavior */ 15443 tg3_flag_set(tp, PAUSE_AUTONEG); 15444 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; 15445 15446 if (tg3_flag(tp, ENABLE_APE)) { 15447 switch (tp->pci_fn) { 15448 case 0: 15449 tp->phy_ape_lock = TG3_APE_LOCK_PHY0; 15450 break; 15451 case 1: 15452 tp->phy_ape_lock = TG3_APE_LOCK_PHY1; 15453 break; 15454 case 2: 15455 tp->phy_ape_lock = TG3_APE_LOCK_PHY2; 15456 break; 15457 case 3: 15458 tp->phy_ape_lock = TG3_APE_LOCK_PHY3; 15459 break; 15460 } 15461 } 15462 15463 if (!tg3_flag(tp, ENABLE_ASF) && 15464 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15465 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 15466 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 15467 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 15468 15469 if (tg3_flag(tp, USE_PHYLIB)) 15470 return tg3_phy_init(tp); 15471 15472 /* Reading the PHY ID register can conflict with ASF 15473 * firmware access to the PHY hardware. 15474 */ 15475 err = 0; 15476 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { 15477 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; 15478 } else { 15479 /* Now read the physical PHY_ID from the chip and verify 15480 * that it is sane. If it doesn't look good, we fall back 15481 * to either the hard-coded table based PHY_ID and failing 15482 * that the value found in the eeprom area. 15483 */ 15484 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); 15485 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); 15486 15487 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; 15488 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; 15489 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; 15490 15491 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; 15492 } 15493 15494 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { 15495 tp->phy_id = hw_phy_id; 15496 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) 15497 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15498 else 15499 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; 15500 } else { 15501 if (tp->phy_id != TG3_PHY_ID_INVALID) { 15502 /* Do nothing, phy ID already set up in 15503 * tg3_get_eeprom_hw_cfg(). 15504 */ 15505 } else { 15506 struct subsys_tbl_ent *p; 15507 15508 /* No eeprom signature? Try the hardcoded 15509 * subsys device table. 15510 */ 15511 p = tg3_lookup_by_subsys(tp); 15512 if (p) { 15513 tp->phy_id = p->phy_id; 15514 } else if (!tg3_flag(tp, IS_SSB_CORE)) { 15515 /* For now we saw the IDs 0xbc050cd0, 15516 * 0xbc050f80 and 0xbc050c30 on devices 15517 * connected to an BCM4785 and there are 15518 * probably more. Just assume that the phy is 15519 * supported when it is connected to a SSB core 15520 * for now. 15521 */ 15522 return -ENODEV; 15523 } 15524 15525 if (!tp->phy_id || 15526 tp->phy_id == TG3_PHY_ID_BCM8002) 15527 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15528 } 15529 } 15530 15531 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15532 (tg3_asic_rev(tp) == ASIC_REV_5719 || 15533 tg3_asic_rev(tp) == ASIC_REV_5720 || 15534 tg3_asic_rev(tp) == ASIC_REV_57766 || 15535 tg3_asic_rev(tp) == ASIC_REV_5762 || 15536 (tg3_asic_rev(tp) == ASIC_REV_5717 && 15537 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || 15538 (tg3_asic_rev(tp) == ASIC_REV_57765 && 15539 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) { 15540 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 15541 15542 tp->eee.supported = SUPPORTED_100baseT_Full | 15543 SUPPORTED_1000baseT_Full; 15544 tp->eee.advertised = ADVERTISED_100baseT_Full | 15545 ADVERTISED_1000baseT_Full; 15546 tp->eee.eee_enabled = 1; 15547 tp->eee.tx_lpi_enabled = 1; 15548 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; 15549 } 15550 15551 tg3_phy_init_link_config(tp); 15552 15553 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 15554 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15555 !tg3_flag(tp, ENABLE_APE) && 15556 !tg3_flag(tp, ENABLE_ASF)) { 15557 u32 bmsr, dummy; 15558 15559 tg3_readphy(tp, MII_BMSR, &bmsr); 15560 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 15561 (bmsr & BMSR_LSTATUS)) 15562 goto skip_phy_reset; 15563 15564 err = tg3_phy_reset(tp); 15565 if (err) 15566 return err; 15567 15568 tg3_phy_set_wirespeed(tp); 15569 15570 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) { 15571 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, 15572 tp->link_config.flowctrl); 15573 15574 tg3_writephy(tp, MII_BMCR, 15575 BMCR_ANENABLE | BMCR_ANRESTART); 15576 } 15577 } 15578 15579 skip_phy_reset: 15580 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 15581 err = tg3_init_5401phy_dsp(tp); 15582 if (err) 15583 return err; 15584 15585 err = tg3_init_5401phy_dsp(tp); 15586 } 15587 15588 return err; 15589 } 15590 15591 static void tg3_read_vpd(struct tg3 *tp) 15592 { 15593 u8 *vpd_data; 15594 unsigned int len, vpdlen; 15595 int i; 15596 15597 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); 15598 if (!vpd_data) 15599 goto out_no_vpd; 15600 15601 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15602 PCI_VPD_RO_KEYWORD_MFR_ID, &len); 15603 if (i < 0) 15604 goto partno; 15605 15606 if (len != 4 || memcmp(vpd_data + i, "1028", 4)) 15607 goto partno; 15608 15609 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15610 PCI_VPD_RO_KEYWORD_VENDOR0, &len); 15611 if (i < 0) 15612 goto partno; 15613 15614 memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); 15615 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i); 15616 15617 partno: 15618 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15619 PCI_VPD_RO_KEYWORD_PARTNO, &len); 15620 if (i < 0) 15621 goto out_not_found; 15622 15623 if (len > TG3_BPN_SIZE) 15624 goto out_not_found; 15625 15626 memcpy(tp->board_part_number, &vpd_data[i], len); 15627 15628 out_not_found: 15629 kfree(vpd_data); 15630 if (tp->board_part_number[0]) 15631 return; 15632 15633 out_no_vpd: 15634 if (tg3_asic_rev(tp) == ASIC_REV_5717) { 15635 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 15636 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C) 15637 strcpy(tp->board_part_number, "BCM5717"); 15638 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) 15639 strcpy(tp->board_part_number, "BCM5718"); 15640 else 15641 goto nomatch; 15642 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) { 15643 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) 15644 strcpy(tp->board_part_number, "BCM57780"); 15645 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) 15646 strcpy(tp->board_part_number, "BCM57760"); 15647 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) 15648 strcpy(tp->board_part_number, "BCM57790"); 15649 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) 15650 strcpy(tp->board_part_number, "BCM57788"); 15651 else 15652 goto nomatch; 15653 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) { 15654 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) 15655 strcpy(tp->board_part_number, "BCM57761"); 15656 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) 15657 strcpy(tp->board_part_number, "BCM57765"); 15658 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) 15659 strcpy(tp->board_part_number, "BCM57781"); 15660 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) 15661 strcpy(tp->board_part_number, "BCM57785"); 15662 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) 15663 strcpy(tp->board_part_number, "BCM57791"); 15664 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) 15665 strcpy(tp->board_part_number, "BCM57795"); 15666 else 15667 goto nomatch; 15668 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) { 15669 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) 15670 strcpy(tp->board_part_number, "BCM57762"); 15671 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) 15672 strcpy(tp->board_part_number, "BCM57766"); 15673 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782) 15674 strcpy(tp->board_part_number, "BCM57782"); 15675 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 15676 strcpy(tp->board_part_number, "BCM57786"); 15677 else 15678 goto nomatch; 15679 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15680 strcpy(tp->board_part_number, "BCM95906"); 15681 } else { 15682 nomatch: 15683 strcpy(tp->board_part_number, "none"); 15684 } 15685 } 15686 15687 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) 15688 { 15689 u32 val; 15690 15691 if (tg3_nvram_read(tp, offset, &val) || 15692 (val & 0xfc000000) != 0x0c000000 || 15693 tg3_nvram_read(tp, offset + 4, &val) || 15694 val != 0) 15695 return 0; 15696 15697 return 1; 15698 } 15699 15700 static void tg3_read_bc_ver(struct tg3 *tp) 15701 { 15702 u32 val, offset, start, ver_offset; 15703 int i, dst_off; 15704 bool newver = false; 15705 15706 if (tg3_nvram_read(tp, 0xc, &offset) || 15707 tg3_nvram_read(tp, 0x4, &start)) 15708 return; 15709 15710 offset = tg3_nvram_logical_addr(tp, offset); 15711 15712 if (tg3_nvram_read(tp, offset, &val)) 15713 return; 15714 15715 if ((val & 0xfc000000) == 0x0c000000) { 15716 if (tg3_nvram_read(tp, offset + 4, &val)) 15717 return; 15718 15719 if (val == 0) 15720 newver = true; 15721 } 15722 15723 dst_off = strlen(tp->fw_ver); 15724 15725 if (newver) { 15726 if (TG3_VER_SIZE - dst_off < 16 || 15727 tg3_nvram_read(tp, offset + 8, &ver_offset)) 15728 return; 15729 15730 offset = offset + ver_offset - start; 15731 for (i = 0; i < 16; i += 4) { 15732 __be32 v; 15733 if (tg3_nvram_read_be32(tp, offset + i, &v)) 15734 return; 15735 15736 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); 15737 } 15738 } else { 15739 u32 major, minor; 15740 15741 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) 15742 return; 15743 15744 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> 15745 TG3_NVM_BCVER_MAJSFT; 15746 minor = ver_offset & TG3_NVM_BCVER_MINMSK; 15747 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, 15748 "v%d.%02d", major, minor); 15749 } 15750 } 15751 15752 static void tg3_read_hwsb_ver(struct tg3 *tp) 15753 { 15754 u32 val, major, minor; 15755 15756 /* Use native endian representation */ 15757 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) 15758 return; 15759 15760 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> 15761 TG3_NVM_HWSB_CFG1_MAJSFT; 15762 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> 15763 TG3_NVM_HWSB_CFG1_MINSFT; 15764 15765 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); 15766 } 15767 15768 static void tg3_read_sb_ver(struct tg3 *tp, u32 val) 15769 { 15770 u32 offset, major, minor, build; 15771 15772 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); 15773 15774 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) 15775 return; 15776 15777 switch (val & TG3_EEPROM_SB_REVISION_MASK) { 15778 case TG3_EEPROM_SB_REVISION_0: 15779 offset = TG3_EEPROM_SB_F1R0_EDH_OFF; 15780 break; 15781 case TG3_EEPROM_SB_REVISION_2: 15782 offset = TG3_EEPROM_SB_F1R2_EDH_OFF; 15783 break; 15784 case TG3_EEPROM_SB_REVISION_3: 15785 offset = TG3_EEPROM_SB_F1R3_EDH_OFF; 15786 break; 15787 case TG3_EEPROM_SB_REVISION_4: 15788 offset = TG3_EEPROM_SB_F1R4_EDH_OFF; 15789 break; 15790 case TG3_EEPROM_SB_REVISION_5: 15791 offset = TG3_EEPROM_SB_F1R5_EDH_OFF; 15792 break; 15793 case TG3_EEPROM_SB_REVISION_6: 15794 offset = TG3_EEPROM_SB_F1R6_EDH_OFF; 15795 break; 15796 default: 15797 return; 15798 } 15799 15800 if (tg3_nvram_read(tp, offset, &val)) 15801 return; 15802 15803 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> 15804 TG3_EEPROM_SB_EDH_BLD_SHFT; 15805 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> 15806 TG3_EEPROM_SB_EDH_MAJ_SHFT; 15807 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; 15808 15809 if (minor > 99 || build > 26) 15810 return; 15811 15812 offset = strlen(tp->fw_ver); 15813 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, 15814 " v%d.%02d", major, minor); 15815 15816 if (build > 0) { 15817 offset = strlen(tp->fw_ver); 15818 if (offset < TG3_VER_SIZE - 1) 15819 tp->fw_ver[offset] = 'a' + build - 1; 15820 } 15821 } 15822 15823 static void tg3_read_mgmtfw_ver(struct tg3 *tp) 15824 { 15825 u32 val, offset, start; 15826 int i, vlen; 15827 15828 for (offset = TG3_NVM_DIR_START; 15829 offset < TG3_NVM_DIR_END; 15830 offset += TG3_NVM_DIRENT_SIZE) { 15831 if (tg3_nvram_read(tp, offset, &val)) 15832 return; 15833 15834 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) 15835 break; 15836 } 15837 15838 if (offset == TG3_NVM_DIR_END) 15839 return; 15840 15841 if (!tg3_flag(tp, 5705_PLUS)) 15842 start = 0x08000000; 15843 else if (tg3_nvram_read(tp, offset - 4, &start)) 15844 return; 15845 15846 if (tg3_nvram_read(tp, offset + 4, &offset) || 15847 !tg3_fw_img_is_valid(tp, offset) || 15848 tg3_nvram_read(tp, offset + 8, &val)) 15849 return; 15850 15851 offset += val - start; 15852 15853 vlen = strlen(tp->fw_ver); 15854 15855 tp->fw_ver[vlen++] = ','; 15856 tp->fw_ver[vlen++] = ' '; 15857 15858 for (i = 0; i < 4; i++) { 15859 __be32 v; 15860 if (tg3_nvram_read_be32(tp, offset, &v)) 15861 return; 15862 15863 offset += sizeof(v); 15864 15865 if (vlen > TG3_VER_SIZE - sizeof(v)) { 15866 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); 15867 break; 15868 } 15869 15870 memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); 15871 vlen += sizeof(v); 15872 } 15873 } 15874 15875 static void tg3_probe_ncsi(struct tg3 *tp) 15876 { 15877 u32 apedata; 15878 15879 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 15880 if (apedata != APE_SEG_SIG_MAGIC) 15881 return; 15882 15883 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 15884 if (!(apedata & APE_FW_STATUS_READY)) 15885 return; 15886 15887 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) 15888 tg3_flag_set(tp, APE_HAS_NCSI); 15889 } 15890 15891 static void tg3_read_dash_ver(struct tg3 *tp) 15892 { 15893 int vlen; 15894 u32 apedata; 15895 char *fwtype; 15896 15897 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); 15898 15899 if (tg3_flag(tp, APE_HAS_NCSI)) 15900 fwtype = "NCSI"; 15901 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725) 15902 fwtype = "SMASH"; 15903 else 15904 fwtype = "DASH"; 15905 15906 vlen = strlen(tp->fw_ver); 15907 15908 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", 15909 fwtype, 15910 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, 15911 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, 15912 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, 15913 (apedata & APE_FW_VERSION_BLDMSK)); 15914 } 15915 15916 static void tg3_read_otp_ver(struct tg3 *tp) 15917 { 15918 u32 val, val2; 15919 15920 if (tg3_asic_rev(tp) != ASIC_REV_5762) 15921 return; 15922 15923 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) && 15924 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) && 15925 TG3_OTP_MAGIC0_VALID(val)) { 15926 u64 val64 = (u64) val << 32 | val2; 15927 u32 ver = 0; 15928 int i, vlen; 15929 15930 for (i = 0; i < 7; i++) { 15931 if ((val64 & 0xff) == 0) 15932 break; 15933 ver = val64 & 0xff; 15934 val64 >>= 8; 15935 } 15936 vlen = strlen(tp->fw_ver); 15937 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver); 15938 } 15939 } 15940 15941 static void tg3_read_fw_ver(struct tg3 *tp) 15942 { 15943 u32 val; 15944 bool vpd_vers = false; 15945 15946 if (tp->fw_ver[0] != 0) 15947 vpd_vers = true; 15948 15949 if (tg3_flag(tp, NO_NVRAM)) { 15950 strcat(tp->fw_ver, "sb"); 15951 tg3_read_otp_ver(tp); 15952 return; 15953 } 15954 15955 if (tg3_nvram_read(tp, 0, &val)) 15956 return; 15957 15958 if (val == TG3_EEPROM_MAGIC) 15959 tg3_read_bc_ver(tp); 15960 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) 15961 tg3_read_sb_ver(tp, val); 15962 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 15963 tg3_read_hwsb_ver(tp); 15964 15965 if (tg3_flag(tp, ENABLE_ASF)) { 15966 if (tg3_flag(tp, ENABLE_APE)) { 15967 tg3_probe_ncsi(tp); 15968 if (!vpd_vers) 15969 tg3_read_dash_ver(tp); 15970 } else if (!vpd_vers) { 15971 tg3_read_mgmtfw_ver(tp); 15972 } 15973 } 15974 15975 tp->fw_ver[TG3_VER_SIZE - 1] = 0; 15976 } 15977 15978 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) 15979 { 15980 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 15981 return TG3_RX_RET_MAX_SIZE_5717; 15982 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) 15983 return TG3_RX_RET_MAX_SIZE_5700; 15984 else 15985 return TG3_RX_RET_MAX_SIZE_5705; 15986 } 15987 15988 static const struct pci_device_id tg3_write_reorder_chipsets[] = { 15989 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 15990 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, 15991 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, 15992 { }, 15993 }; 15994 15995 static struct pci_dev *tg3_find_peer(struct tg3 *tp) 15996 { 15997 struct pci_dev *peer; 15998 unsigned int func, devnr = tp->pdev->devfn & ~7; 15999 16000 for (func = 0; func < 8; func++) { 16001 peer = pci_get_slot(tp->pdev->bus, devnr | func); 16002 if (peer && peer != tp->pdev) 16003 break; 16004 pci_dev_put(peer); 16005 } 16006 /* 5704 can be configured in single-port mode, set peer to 16007 * tp->pdev in that case. 16008 */ 16009 if (!peer) { 16010 peer = tp->pdev; 16011 return peer; 16012 } 16013 16014 /* 16015 * We don't need to keep the refcount elevated; there's no way 16016 * to remove one half of this device without removing the other 16017 */ 16018 pci_dev_put(peer); 16019 16020 return peer; 16021 } 16022 16023 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) 16024 { 16025 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT; 16026 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) { 16027 u32 reg; 16028 16029 /* All devices that use the alternate 16030 * ASIC REV location have a CPMU. 16031 */ 16032 tg3_flag_set(tp, CPMU_PRESENT); 16033 16034 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 16035 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 16036 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 16037 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 16038 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 16039 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 16040 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 16041 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 16042 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 16043 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 16044 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) 16045 reg = TG3PCI_GEN2_PRODID_ASICREV; 16046 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || 16047 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || 16048 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || 16049 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || 16050 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || 16051 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || 16052 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 || 16053 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 || 16054 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 || 16055 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 16056 reg = TG3PCI_GEN15_PRODID_ASICREV; 16057 else 16058 reg = TG3PCI_PRODID_ASICREV; 16059 16060 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id); 16061 } 16062 16063 /* Wrong chip ID in 5752 A0. This code can be removed later 16064 * as A0 is not in production. 16065 */ 16066 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW) 16067 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; 16068 16069 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0) 16070 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0; 16071 16072 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16073 tg3_asic_rev(tp) == ASIC_REV_5719 || 16074 tg3_asic_rev(tp) == ASIC_REV_5720) 16075 tg3_flag_set(tp, 5717_PLUS); 16076 16077 if (tg3_asic_rev(tp) == ASIC_REV_57765 || 16078 tg3_asic_rev(tp) == ASIC_REV_57766) 16079 tg3_flag_set(tp, 57765_CLASS); 16080 16081 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) || 16082 tg3_asic_rev(tp) == ASIC_REV_5762) 16083 tg3_flag_set(tp, 57765_PLUS); 16084 16085 /* Intentionally exclude ASIC_REV_5906 */ 16086 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16087 tg3_asic_rev(tp) == ASIC_REV_5787 || 16088 tg3_asic_rev(tp) == ASIC_REV_5784 || 16089 tg3_asic_rev(tp) == ASIC_REV_5761 || 16090 tg3_asic_rev(tp) == ASIC_REV_5785 || 16091 tg3_asic_rev(tp) == ASIC_REV_57780 || 16092 tg3_flag(tp, 57765_PLUS)) 16093 tg3_flag_set(tp, 5755_PLUS); 16094 16095 if (tg3_asic_rev(tp) == ASIC_REV_5780 || 16096 tg3_asic_rev(tp) == ASIC_REV_5714) 16097 tg3_flag_set(tp, 5780_CLASS); 16098 16099 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16100 tg3_asic_rev(tp) == ASIC_REV_5752 || 16101 tg3_asic_rev(tp) == ASIC_REV_5906 || 16102 tg3_flag(tp, 5755_PLUS) || 16103 tg3_flag(tp, 5780_CLASS)) 16104 tg3_flag_set(tp, 5750_PLUS); 16105 16106 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 16107 tg3_flag(tp, 5750_PLUS)) 16108 tg3_flag_set(tp, 5705_PLUS); 16109 } 16110 16111 static bool tg3_10_100_only_device(struct tg3 *tp, 16112 const struct pci_device_id *ent) 16113 { 16114 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK; 16115 16116 if ((tg3_asic_rev(tp) == ASIC_REV_5703 && 16117 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 16118 (tp->phy_flags & TG3_PHYFLG_IS_FET)) 16119 return true; 16120 16121 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) { 16122 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 16123 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100) 16124 return true; 16125 } else { 16126 return true; 16127 } 16128 } 16129 16130 return false; 16131 } 16132 16133 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) 16134 { 16135 u32 misc_ctrl_reg; 16136 u32 pci_state_reg, grc_misc_cfg; 16137 u32 val; 16138 u16 pci_cmd; 16139 int err; 16140 16141 /* Force memory write invalidate off. If we leave it on, 16142 * then on 5700_BX chips we have to enable a workaround. 16143 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary 16144 * to match the cacheline size. The Broadcom driver have this 16145 * workaround but turns MWI off all the times so never uses 16146 * it. This seems to suggest that the workaround is insufficient. 16147 */ 16148 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16149 pci_cmd &= ~PCI_COMMAND_INVALIDATE; 16150 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16151 16152 /* Important! -- Make sure register accesses are byteswapped 16153 * correctly. Also, for those chips that require it, make 16154 * sure that indirect register accesses are enabled before 16155 * the first operation. 16156 */ 16157 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16158 &misc_ctrl_reg); 16159 tp->misc_host_ctrl |= (misc_ctrl_reg & 16160 MISC_HOST_CTRL_CHIPREV); 16161 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16162 tp->misc_host_ctrl); 16163 16164 tg3_detect_asic_rev(tp, misc_ctrl_reg); 16165 16166 /* If we have 5702/03 A1 or A2 on certain ICH chipsets, 16167 * we need to disable memory and use config. cycles 16168 * only to access all registers. The 5702/03 chips 16169 * can mistakenly decode the special cycles from the 16170 * ICH chipsets as memory write cycles, causing corruption 16171 * of register and memory space. Only certain ICH bridges 16172 * will drive special cycles with non-zero data during the 16173 * address phase which can fall within the 5703's address 16174 * range. This is not an ICH bug as the PCI spec allows 16175 * non-zero address during special cycles. However, only 16176 * these ICH bridges are known to drive non-zero addresses 16177 * during special cycles. 16178 * 16179 * Since special cycles do not cross PCI bridges, we only 16180 * enable this workaround if the 5703 is on the secondary 16181 * bus of these ICH bridges. 16182 */ 16183 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) || 16184 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) { 16185 static struct tg3_dev_id { 16186 u32 vendor; 16187 u32 device; 16188 u32 rev; 16189 } ich_chipsets[] = { 16190 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, 16191 PCI_ANY_ID }, 16192 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, 16193 PCI_ANY_ID }, 16194 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, 16195 0xa }, 16196 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, 16197 PCI_ANY_ID }, 16198 { }, 16199 }; 16200 struct tg3_dev_id *pci_id = &ich_chipsets[0]; 16201 struct pci_dev *bridge = NULL; 16202 16203 while (pci_id->vendor != 0) { 16204 bridge = pci_get_device(pci_id->vendor, pci_id->device, 16205 bridge); 16206 if (!bridge) { 16207 pci_id++; 16208 continue; 16209 } 16210 if (pci_id->rev != PCI_ANY_ID) { 16211 if (bridge->revision > pci_id->rev) 16212 continue; 16213 } 16214 if (bridge->subordinate && 16215 (bridge->subordinate->number == 16216 tp->pdev->bus->number)) { 16217 tg3_flag_set(tp, ICH_WORKAROUND); 16218 pci_dev_put(bridge); 16219 break; 16220 } 16221 } 16222 } 16223 16224 if (tg3_asic_rev(tp) == ASIC_REV_5701) { 16225 static struct tg3_dev_id { 16226 u32 vendor; 16227 u32 device; 16228 } bridge_chipsets[] = { 16229 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, 16230 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, 16231 { }, 16232 }; 16233 struct tg3_dev_id *pci_id = &bridge_chipsets[0]; 16234 struct pci_dev *bridge = NULL; 16235 16236 while (pci_id->vendor != 0) { 16237 bridge = pci_get_device(pci_id->vendor, 16238 pci_id->device, 16239 bridge); 16240 if (!bridge) { 16241 pci_id++; 16242 continue; 16243 } 16244 if (bridge->subordinate && 16245 (bridge->subordinate->number <= 16246 tp->pdev->bus->number) && 16247 (bridge->subordinate->busn_res.end >= 16248 tp->pdev->bus->number)) { 16249 tg3_flag_set(tp, 5701_DMA_BUG); 16250 pci_dev_put(bridge); 16251 break; 16252 } 16253 } 16254 } 16255 16256 /* The EPB bridge inside 5714, 5715, and 5780 cannot support 16257 * DMA addresses > 40-bit. This bridge may have other additional 16258 * 57xx devices behind it in some 4-port NIC designs for example. 16259 * Any tg3 device found behind the bridge will also need the 40-bit 16260 * DMA workaround. 16261 */ 16262 if (tg3_flag(tp, 5780_CLASS)) { 16263 tg3_flag_set(tp, 40BIT_DMA_BUG); 16264 tp->msi_cap = tp->pdev->msi_cap; 16265 } else { 16266 struct pci_dev *bridge = NULL; 16267 16268 do { 16269 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, 16270 PCI_DEVICE_ID_SERVERWORKS_EPB, 16271 bridge); 16272 if (bridge && bridge->subordinate && 16273 (bridge->subordinate->number <= 16274 tp->pdev->bus->number) && 16275 (bridge->subordinate->busn_res.end >= 16276 tp->pdev->bus->number)) { 16277 tg3_flag_set(tp, 40BIT_DMA_BUG); 16278 pci_dev_put(bridge); 16279 break; 16280 } 16281 } while (bridge); 16282 } 16283 16284 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16285 tg3_asic_rev(tp) == ASIC_REV_5714) 16286 tp->pdev_peer = tg3_find_peer(tp); 16287 16288 /* Determine TSO capabilities */ 16289 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0) 16290 ; /* Do nothing. HW bug. */ 16291 else if (tg3_flag(tp, 57765_PLUS)) 16292 tg3_flag_set(tp, HW_TSO_3); 16293 else if (tg3_flag(tp, 5755_PLUS) || 16294 tg3_asic_rev(tp) == ASIC_REV_5906) 16295 tg3_flag_set(tp, HW_TSO_2); 16296 else if (tg3_flag(tp, 5750_PLUS)) { 16297 tg3_flag_set(tp, HW_TSO_1); 16298 tg3_flag_set(tp, TSO_BUG); 16299 if (tg3_asic_rev(tp) == ASIC_REV_5750 && 16300 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2) 16301 tg3_flag_clear(tp, TSO_BUG); 16302 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 && 16303 tg3_asic_rev(tp) != ASIC_REV_5701 && 16304 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 16305 tg3_flag_set(tp, FW_TSO); 16306 tg3_flag_set(tp, TSO_BUG); 16307 if (tg3_asic_rev(tp) == ASIC_REV_5705) 16308 tp->fw_needed = FIRMWARE_TG3TSO5; 16309 else 16310 tp->fw_needed = FIRMWARE_TG3TSO; 16311 } 16312 16313 /* Selectively allow TSO based on operating conditions */ 16314 if (tg3_flag(tp, HW_TSO_1) || 16315 tg3_flag(tp, HW_TSO_2) || 16316 tg3_flag(tp, HW_TSO_3) || 16317 tg3_flag(tp, FW_TSO)) { 16318 /* For firmware TSO, assume ASF is disabled. 16319 * We'll disable TSO later if we discover ASF 16320 * is enabled in tg3_get_eeprom_hw_cfg(). 16321 */ 16322 tg3_flag_set(tp, TSO_CAPABLE); 16323 } else { 16324 tg3_flag_clear(tp, TSO_CAPABLE); 16325 tg3_flag_clear(tp, TSO_BUG); 16326 tp->fw_needed = NULL; 16327 } 16328 16329 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) 16330 tp->fw_needed = FIRMWARE_TG3; 16331 16332 if (tg3_asic_rev(tp) == ASIC_REV_57766) 16333 tp->fw_needed = FIRMWARE_TG357766; 16334 16335 tp->irq_max = 1; 16336 16337 if (tg3_flag(tp, 5750_PLUS)) { 16338 tg3_flag_set(tp, SUPPORT_MSI); 16339 if (tg3_chip_rev(tp) == CHIPREV_5750_AX || 16340 tg3_chip_rev(tp) == CHIPREV_5750_BX || 16341 (tg3_asic_rev(tp) == ASIC_REV_5714 && 16342 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 && 16343 tp->pdev_peer == tp->pdev)) 16344 tg3_flag_clear(tp, SUPPORT_MSI); 16345 16346 if (tg3_flag(tp, 5755_PLUS) || 16347 tg3_asic_rev(tp) == ASIC_REV_5906) { 16348 tg3_flag_set(tp, 1SHOT_MSI); 16349 } 16350 16351 if (tg3_flag(tp, 57765_PLUS)) { 16352 tg3_flag_set(tp, SUPPORT_MSIX); 16353 tp->irq_max = TG3_IRQ_MAX_VECS; 16354 } 16355 } 16356 16357 tp->txq_max = 1; 16358 tp->rxq_max = 1; 16359 if (tp->irq_max > 1) { 16360 tp->rxq_max = TG3_RSS_MAX_NUM_QS; 16361 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS); 16362 16363 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 16364 tg3_asic_rev(tp) == ASIC_REV_5720) 16365 tp->txq_max = tp->irq_max - 1; 16366 } 16367 16368 if (tg3_flag(tp, 5755_PLUS) || 16369 tg3_asic_rev(tp) == ASIC_REV_5906) 16370 tg3_flag_set(tp, SHORT_DMA_BUG); 16371 16372 if (tg3_asic_rev(tp) == ASIC_REV_5719) 16373 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; 16374 16375 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16376 tg3_asic_rev(tp) == ASIC_REV_5719 || 16377 tg3_asic_rev(tp) == ASIC_REV_5720 || 16378 tg3_asic_rev(tp) == ASIC_REV_5762) 16379 tg3_flag_set(tp, LRG_PROD_RING_CAP); 16380 16381 if (tg3_flag(tp, 57765_PLUS) && 16382 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0) 16383 tg3_flag_set(tp, USE_JUMBO_BDFLAG); 16384 16385 if (!tg3_flag(tp, 5705_PLUS) || 16386 tg3_flag(tp, 5780_CLASS) || 16387 tg3_flag(tp, USE_JUMBO_BDFLAG)) 16388 tg3_flag_set(tp, JUMBO_CAPABLE); 16389 16390 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16391 &pci_state_reg); 16392 16393 if (pci_is_pcie(tp->pdev)) { 16394 u16 lnkctl; 16395 16396 tg3_flag_set(tp, PCI_EXPRESS); 16397 16398 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl); 16399 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { 16400 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16401 tg3_flag_clear(tp, HW_TSO_2); 16402 tg3_flag_clear(tp, TSO_CAPABLE); 16403 } 16404 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 16405 tg3_asic_rev(tp) == ASIC_REV_5761 || 16406 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 || 16407 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1) 16408 tg3_flag_set(tp, CLKREQ_BUG); 16409 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) { 16410 tg3_flag_set(tp, L1PLLPD_EN); 16411 } 16412 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) { 16413 /* BCM5785 devices are effectively PCIe devices, and should 16414 * follow PCIe codepaths, but do not have a PCIe capabilities 16415 * section. 16416 */ 16417 tg3_flag_set(tp, PCI_EXPRESS); 16418 } else if (!tg3_flag(tp, 5705_PLUS) || 16419 tg3_flag(tp, 5780_CLASS)) { 16420 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); 16421 if (!tp->pcix_cap) { 16422 dev_err(&tp->pdev->dev, 16423 "Cannot find PCI-X capability, aborting\n"); 16424 return -EIO; 16425 } 16426 16427 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) 16428 tg3_flag_set(tp, PCIX_MODE); 16429 } 16430 16431 /* If we have an AMD 762 or VIA K8T800 chipset, write 16432 * reordering to the mailbox registers done by the host 16433 * controller can cause major troubles. We read back from 16434 * every mailbox register write to force the writes to be 16435 * posted to the chip in order. 16436 */ 16437 if (pci_dev_present(tg3_write_reorder_chipsets) && 16438 !tg3_flag(tp, PCI_EXPRESS)) 16439 tg3_flag_set(tp, MBOX_WRITE_REORDER); 16440 16441 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 16442 &tp->pci_cacheline_sz); 16443 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16444 &tp->pci_lat_timer); 16445 if (tg3_asic_rev(tp) == ASIC_REV_5703 && 16446 tp->pci_lat_timer < 64) { 16447 tp->pci_lat_timer = 64; 16448 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16449 tp->pci_lat_timer); 16450 } 16451 16452 /* Important! -- It is critical that the PCI-X hw workaround 16453 * situation is decided before the first MMIO register access. 16454 */ 16455 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) { 16456 /* 5700 BX chips need to have their TX producer index 16457 * mailboxes written twice to workaround a bug. 16458 */ 16459 tg3_flag_set(tp, TXD_MBOX_HWBUG); 16460 16461 /* If we are in PCI-X mode, enable register write workaround. 16462 * 16463 * The workaround is to use indirect register accesses 16464 * for all chip writes not to mailbox registers. 16465 */ 16466 if (tg3_flag(tp, PCIX_MODE)) { 16467 u32 pm_reg; 16468 16469 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16470 16471 /* The chip can have it's power management PCI config 16472 * space registers clobbered due to this bug. 16473 * So explicitly force the chip into D0 here. 16474 */ 16475 pci_read_config_dword(tp->pdev, 16476 tp->pdev->pm_cap + PCI_PM_CTRL, 16477 &pm_reg); 16478 pm_reg &= ~PCI_PM_CTRL_STATE_MASK; 16479 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; 16480 pci_write_config_dword(tp->pdev, 16481 tp->pdev->pm_cap + PCI_PM_CTRL, 16482 pm_reg); 16483 16484 /* Also, force SERR#/PERR# in PCI command. */ 16485 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16486 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; 16487 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16488 } 16489 } 16490 16491 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) 16492 tg3_flag_set(tp, PCI_HIGH_SPEED); 16493 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) 16494 tg3_flag_set(tp, PCI_32BIT); 16495 16496 /* Chip-specific fixup from Broadcom driver */ 16497 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) && 16498 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { 16499 pci_state_reg |= PCISTATE_RETRY_SAME_DMA; 16500 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); 16501 } 16502 16503 /* Default fast path register access methods */ 16504 tp->read32 = tg3_read32; 16505 tp->write32 = tg3_write32; 16506 tp->read32_mbox = tg3_read32; 16507 tp->write32_mbox = tg3_write32; 16508 tp->write32_tx_mbox = tg3_write32; 16509 tp->write32_rx_mbox = tg3_write32; 16510 16511 /* Various workaround register access methods */ 16512 if (tg3_flag(tp, PCIX_TARGET_HWBUG)) 16513 tp->write32 = tg3_write_indirect_reg32; 16514 else if (tg3_asic_rev(tp) == ASIC_REV_5701 || 16515 (tg3_flag(tp, PCI_EXPRESS) && 16516 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) { 16517 /* 16518 * Back to back register writes can cause problems on these 16519 * chips, the workaround is to read back all reg writes 16520 * except those to mailbox regs. 16521 * 16522 * See tg3_write_indirect_reg32(). 16523 */ 16524 tp->write32 = tg3_write_flush_reg32; 16525 } 16526 16527 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { 16528 tp->write32_tx_mbox = tg3_write32_tx_mbox; 16529 if (tg3_flag(tp, MBOX_WRITE_REORDER)) 16530 tp->write32_rx_mbox = tg3_write_flush_reg32; 16531 } 16532 16533 if (tg3_flag(tp, ICH_WORKAROUND)) { 16534 tp->read32 = tg3_read_indirect_reg32; 16535 tp->write32 = tg3_write_indirect_reg32; 16536 tp->read32_mbox = tg3_read_indirect_mbox; 16537 tp->write32_mbox = tg3_write_indirect_mbox; 16538 tp->write32_tx_mbox = tg3_write_indirect_mbox; 16539 tp->write32_rx_mbox = tg3_write_indirect_mbox; 16540 16541 iounmap(tp->regs); 16542 tp->regs = NULL; 16543 16544 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16545 pci_cmd &= ~PCI_COMMAND_MEMORY; 16546 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16547 } 16548 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16549 tp->read32_mbox = tg3_read32_mbox_5906; 16550 tp->write32_mbox = tg3_write32_mbox_5906; 16551 tp->write32_tx_mbox = tg3_write32_mbox_5906; 16552 tp->write32_rx_mbox = tg3_write32_mbox_5906; 16553 } 16554 16555 if (tp->write32 == tg3_write_indirect_reg32 || 16556 (tg3_flag(tp, PCIX_MODE) && 16557 (tg3_asic_rev(tp) == ASIC_REV_5700 || 16558 tg3_asic_rev(tp) == ASIC_REV_5701))) 16559 tg3_flag_set(tp, SRAM_USE_CONFIG); 16560 16561 /* The memory arbiter has to be enabled in order for SRAM accesses 16562 * to succeed. Normally on powerup the tg3 chip firmware will make 16563 * sure it is enabled, but other entities such as system netboot 16564 * code might disable it. 16565 */ 16566 val = tr32(MEMARB_MODE); 16567 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 16568 16569 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; 16570 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16571 tg3_flag(tp, 5780_CLASS)) { 16572 if (tg3_flag(tp, PCIX_MODE)) { 16573 pci_read_config_dword(tp->pdev, 16574 tp->pcix_cap + PCI_X_STATUS, 16575 &val); 16576 tp->pci_fn = val & 0x7; 16577 } 16578 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16579 tg3_asic_rev(tp) == ASIC_REV_5719 || 16580 tg3_asic_rev(tp) == ASIC_REV_5720) { 16581 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); 16582 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG) 16583 val = tr32(TG3_CPMU_STATUS); 16584 16585 if (tg3_asic_rev(tp) == ASIC_REV_5717) 16586 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0; 16587 else 16588 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >> 16589 TG3_CPMU_STATUS_FSHFT_5719; 16590 } 16591 16592 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 16593 tp->write32_tx_mbox = tg3_write_flush_reg32; 16594 tp->write32_rx_mbox = tg3_write_flush_reg32; 16595 } 16596 16597 /* Get eeprom hw config before calling tg3_set_power_state(). 16598 * In particular, the TG3_FLAG_IS_NIC flag must be 16599 * determined before calling tg3_set_power_state() so that 16600 * we know whether or not to switch out of Vaux power. 16601 * When the flag is set, it means that GPIO1 is used for eeprom 16602 * write protect and also implies that it is a LOM where GPIOs 16603 * are not used to switch power. 16604 */ 16605 tg3_get_eeprom_hw_cfg(tp); 16606 16607 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) { 16608 tg3_flag_clear(tp, TSO_CAPABLE); 16609 tg3_flag_clear(tp, TSO_BUG); 16610 tp->fw_needed = NULL; 16611 } 16612 16613 if (tg3_flag(tp, ENABLE_APE)) { 16614 /* Allow reads and writes to the 16615 * APE register and memory space. 16616 */ 16617 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | 16618 PCISTATE_ALLOW_APE_SHMEM_WR | 16619 PCISTATE_ALLOW_APE_PSPACE_WR; 16620 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, 16621 pci_state_reg); 16622 16623 tg3_ape_lock_init(tp); 16624 tp->ape_hb_interval = 16625 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC); 16626 } 16627 16628 /* Set up tp->grc_local_ctrl before calling 16629 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high 16630 * will bring 5700's external PHY out of reset. 16631 * It is also used as eeprom write protect on LOMs. 16632 */ 16633 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; 16634 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16635 tg3_flag(tp, EEPROM_WRITE_PROT)) 16636 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 16637 GRC_LCLCTRL_GPIO_OUTPUT1); 16638 /* Unused GPIO3 must be driven as output on 5752 because there 16639 * are no pull-up resistors on unused GPIO pins. 16640 */ 16641 else if (tg3_asic_rev(tp) == ASIC_REV_5752) 16642 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 16643 16644 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16645 tg3_asic_rev(tp) == ASIC_REV_57780 || 16646 tg3_flag(tp, 57765_CLASS)) 16647 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16648 16649 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 16650 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 16651 /* Turn off the debug UART. */ 16652 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16653 if (tg3_flag(tp, IS_NIC)) 16654 /* Keep VMain power. */ 16655 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 16656 GRC_LCLCTRL_GPIO_OUTPUT0; 16657 } 16658 16659 if (tg3_asic_rev(tp) == ASIC_REV_5762) 16660 tp->grc_local_ctrl |= 16661 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL; 16662 16663 /* Switch out of Vaux if it is a NIC */ 16664 tg3_pwrsrc_switch_to_vmain(tp); 16665 16666 /* Derive initial jumbo mode from MTU assigned in 16667 * ether_setup() via the alloc_etherdev() call 16668 */ 16669 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) 16670 tg3_flag_set(tp, JUMBO_RING_ENABLE); 16671 16672 /* Determine WakeOnLan speed to use. */ 16673 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16674 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16675 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16676 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) { 16677 tg3_flag_clear(tp, WOL_SPEED_100MB); 16678 } else { 16679 tg3_flag_set(tp, WOL_SPEED_100MB); 16680 } 16681 16682 if (tg3_asic_rev(tp) == ASIC_REV_5906) 16683 tp->phy_flags |= TG3_PHYFLG_IS_FET; 16684 16685 /* A few boards don't want Ethernet@WireSpeed phy feature */ 16686 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16687 (tg3_asic_rev(tp) == ASIC_REV_5705 && 16688 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) && 16689 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) || 16690 (tp->phy_flags & TG3_PHYFLG_IS_FET) || 16691 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 16692 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; 16693 16694 if (tg3_chip_rev(tp) == CHIPREV_5703_AX || 16695 tg3_chip_rev(tp) == CHIPREV_5704_AX) 16696 tp->phy_flags |= TG3_PHYFLG_ADC_BUG; 16697 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) 16698 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; 16699 16700 if (tg3_flag(tp, 5705_PLUS) && 16701 !(tp->phy_flags & TG3_PHYFLG_IS_FET) && 16702 tg3_asic_rev(tp) != ASIC_REV_5785 && 16703 tg3_asic_rev(tp) != ASIC_REV_57780 && 16704 !tg3_flag(tp, 57765_PLUS)) { 16705 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16706 tg3_asic_rev(tp) == ASIC_REV_5787 || 16707 tg3_asic_rev(tp) == ASIC_REV_5784 || 16708 tg3_asic_rev(tp) == ASIC_REV_5761) { 16709 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && 16710 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) 16711 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; 16712 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) 16713 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; 16714 } else 16715 tp->phy_flags |= TG3_PHYFLG_BER_BUG; 16716 } 16717 16718 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 16719 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 16720 tp->phy_otp = tg3_read_otp_phycfg(tp); 16721 if (tp->phy_otp == 0) 16722 tp->phy_otp = TG3_OTP_DEFAULT; 16723 } 16724 16725 if (tg3_flag(tp, CPMU_PRESENT)) 16726 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; 16727 else 16728 tp->mi_mode = MAC_MI_MODE_BASE; 16729 16730 tp->coalesce_mode = 0; 16731 if (tg3_chip_rev(tp) != CHIPREV_5700_AX && 16732 tg3_chip_rev(tp) != CHIPREV_5700_BX) 16733 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 16734 16735 /* Set these bits to enable statistics workaround. */ 16736 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16737 tg3_asic_rev(tp) == ASIC_REV_5762 || 16738 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 16739 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) { 16740 tp->coalesce_mode |= HOSTCC_MODE_ATTN; 16741 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; 16742 } 16743 16744 if (tg3_asic_rev(tp) == ASIC_REV_5785 || 16745 tg3_asic_rev(tp) == ASIC_REV_57780) 16746 tg3_flag_set(tp, USE_PHYLIB); 16747 16748 err = tg3_mdio_init(tp); 16749 if (err) 16750 return err; 16751 16752 /* Initialize data/descriptor byte/word swapping. */ 16753 val = tr32(GRC_MODE); 16754 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 16755 tg3_asic_rev(tp) == ASIC_REV_5762) 16756 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | 16757 GRC_MODE_WORD_SWAP_B2HRX_DATA | 16758 GRC_MODE_B2HRX_ENABLE | 16759 GRC_MODE_HTX2B_ENABLE | 16760 GRC_MODE_HOST_STACKUP); 16761 else 16762 val &= GRC_MODE_HOST_STACKUP; 16763 16764 tw32(GRC_MODE, val | tp->grc_mode); 16765 16766 tg3_switch_clocks(tp); 16767 16768 /* Clear this out for sanity. */ 16769 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 16770 16771 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ 16772 tw32(TG3PCI_REG_BASE_ADDR, 0); 16773 16774 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16775 &pci_state_reg); 16776 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && 16777 !tg3_flag(tp, PCIX_TARGET_HWBUG)) { 16778 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16779 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16780 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 || 16781 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) { 16782 void __iomem *sram_base; 16783 16784 /* Write some dummy words into the SRAM status block 16785 * area, see if it reads back correctly. If the return 16786 * value is bad, force enable the PCIX workaround. 16787 */ 16788 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; 16789 16790 writel(0x00000000, sram_base); 16791 writel(0x00000000, sram_base + 4); 16792 writel(0xffffffff, sram_base + 4); 16793 if (readl(sram_base) != 0x00000000) 16794 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16795 } 16796 } 16797 16798 udelay(50); 16799 tg3_nvram_init(tp); 16800 16801 /* If the device has an NVRAM, no need to load patch firmware */ 16802 if (tg3_asic_rev(tp) == ASIC_REV_57766 && 16803 !tg3_flag(tp, NO_NVRAM)) 16804 tp->fw_needed = NULL; 16805 16806 grc_misc_cfg = tr32(GRC_MISC_CFG); 16807 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; 16808 16809 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 16810 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || 16811 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 16812 tg3_flag_set(tp, IS_5788); 16813 16814 if (!tg3_flag(tp, IS_5788) && 16815 tg3_asic_rev(tp) != ASIC_REV_5700) 16816 tg3_flag_set(tp, TAGGED_STATUS); 16817 if (tg3_flag(tp, TAGGED_STATUS)) { 16818 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | 16819 HOSTCC_MODE_CLRTICK_TXBD); 16820 16821 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; 16822 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16823 tp->misc_host_ctrl); 16824 } 16825 16826 /* Preserve the APE MAC_MODE bits */ 16827 if (tg3_flag(tp, ENABLE_APE)) 16828 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 16829 else 16830 tp->mac_mode = 0; 16831 16832 if (tg3_10_100_only_device(tp, ent)) 16833 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; 16834 16835 err = tg3_phy_probe(tp); 16836 if (err) { 16837 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); 16838 /* ... but do not return immediately ... */ 16839 tg3_mdio_fini(tp); 16840 } 16841 16842 tg3_read_vpd(tp); 16843 tg3_read_fw_ver(tp); 16844 16845 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 16846 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16847 } else { 16848 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16849 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16850 else 16851 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16852 } 16853 16854 /* 5700 {AX,BX} chips have a broken status block link 16855 * change bit implementation, so we must use the 16856 * status register in those cases. 16857 */ 16858 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16859 tg3_flag_set(tp, USE_LINKCHG_REG); 16860 else 16861 tg3_flag_clear(tp, USE_LINKCHG_REG); 16862 16863 /* The led_ctrl is set during tg3_phy_probe, here we might 16864 * have to force the link status polling mechanism based 16865 * upon subsystem IDs. 16866 */ 16867 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 16868 tg3_asic_rev(tp) == ASIC_REV_5701 && 16869 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 16870 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16871 tg3_flag_set(tp, USE_LINKCHG_REG); 16872 } 16873 16874 /* For all SERDES we poll the MAC status register. */ 16875 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 16876 tg3_flag_set(tp, POLL_SERDES); 16877 else 16878 tg3_flag_clear(tp, POLL_SERDES); 16879 16880 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF)) 16881 tg3_flag_set(tp, POLL_CPMU_LINK); 16882 16883 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN; 16884 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; 16885 if (tg3_asic_rev(tp) == ASIC_REV_5701 && 16886 tg3_flag(tp, PCIX_MODE)) { 16887 tp->rx_offset = NET_SKB_PAD; 16888 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 16889 tp->rx_copy_thresh = ~(u16)0; 16890 #endif 16891 } 16892 16893 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; 16894 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; 16895 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; 16896 16897 tp->rx_std_max_post = tp->rx_std_ring_mask + 1; 16898 16899 /* Increment the rx prod index on the rx std ring by at most 16900 * 8 for these chips to workaround hw errata. 16901 */ 16902 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16903 tg3_asic_rev(tp) == ASIC_REV_5752 || 16904 tg3_asic_rev(tp) == ASIC_REV_5755) 16905 tp->rx_std_max_post = 8; 16906 16907 if (tg3_flag(tp, ASPM_WORKAROUND)) 16908 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & 16909 PCIE_PWR_MGMT_L1_THRESH_MSK; 16910 16911 return err; 16912 } 16913 16914 static int tg3_get_device_address(struct tg3 *tp, u8 *addr) 16915 { 16916 u32 hi, lo, mac_offset; 16917 int addr_ok = 0; 16918 int err; 16919 16920 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr)) 16921 return 0; 16922 16923 if (tg3_flag(tp, IS_SSB_CORE)) { 16924 err = ssb_gige_get_macaddr(tp->pdev, addr); 16925 if (!err && is_valid_ether_addr(addr)) 16926 return 0; 16927 } 16928 16929 mac_offset = 0x7c; 16930 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16931 tg3_flag(tp, 5780_CLASS)) { 16932 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 16933 mac_offset = 0xcc; 16934 if (tg3_nvram_lock(tp)) 16935 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); 16936 else 16937 tg3_nvram_unlock(tp); 16938 } else if (tg3_flag(tp, 5717_PLUS)) { 16939 if (tp->pci_fn & 1) 16940 mac_offset = 0xcc; 16941 if (tp->pci_fn > 1) 16942 mac_offset += 0x18c; 16943 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) 16944 mac_offset = 0x10; 16945 16946 /* First try to get it from MAC address mailbox. */ 16947 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); 16948 if ((hi >> 16) == 0x484b) { 16949 addr[0] = (hi >> 8) & 0xff; 16950 addr[1] = (hi >> 0) & 0xff; 16951 16952 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); 16953 addr[2] = (lo >> 24) & 0xff; 16954 addr[3] = (lo >> 16) & 0xff; 16955 addr[4] = (lo >> 8) & 0xff; 16956 addr[5] = (lo >> 0) & 0xff; 16957 16958 /* Some old bootcode may report a 0 MAC address in SRAM */ 16959 addr_ok = is_valid_ether_addr(addr); 16960 } 16961 if (!addr_ok) { 16962 /* Next, try NVRAM. */ 16963 if (!tg3_flag(tp, NO_NVRAM) && 16964 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && 16965 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { 16966 memcpy(&addr[0], ((char *)&hi) + 2, 2); 16967 memcpy(&addr[2], (char *)&lo, sizeof(lo)); 16968 } 16969 /* Finally just fetch it out of the MAC control regs. */ 16970 else { 16971 hi = tr32(MAC_ADDR_0_HIGH); 16972 lo = tr32(MAC_ADDR_0_LOW); 16973 16974 addr[5] = lo & 0xff; 16975 addr[4] = (lo >> 8) & 0xff; 16976 addr[3] = (lo >> 16) & 0xff; 16977 addr[2] = (lo >> 24) & 0xff; 16978 addr[1] = hi & 0xff; 16979 addr[0] = (hi >> 8) & 0xff; 16980 } 16981 } 16982 16983 if (!is_valid_ether_addr(addr)) 16984 return -EINVAL; 16985 return 0; 16986 } 16987 16988 #define BOUNDARY_SINGLE_CACHELINE 1 16989 #define BOUNDARY_MULTI_CACHELINE 2 16990 16991 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) 16992 { 16993 int cacheline_size; 16994 u8 byte; 16995 int goal; 16996 16997 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); 16998 if (byte == 0) 16999 cacheline_size = 1024; 17000 else 17001 cacheline_size = (int) byte * 4; 17002 17003 /* On 5703 and later chips, the boundary bits have no 17004 * effect. 17005 */ 17006 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17007 tg3_asic_rev(tp) != ASIC_REV_5701 && 17008 !tg3_flag(tp, PCI_EXPRESS)) 17009 goto out; 17010 17011 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) 17012 goal = BOUNDARY_MULTI_CACHELINE; 17013 #else 17014 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) 17015 goal = BOUNDARY_SINGLE_CACHELINE; 17016 #else 17017 goal = 0; 17018 #endif 17019 #endif 17020 17021 if (tg3_flag(tp, 57765_PLUS)) { 17022 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 17023 goto out; 17024 } 17025 17026 if (!goal) 17027 goto out; 17028 17029 /* PCI controllers on most RISC systems tend to disconnect 17030 * when a device tries to burst across a cache-line boundary. 17031 * Therefore, letting tg3 do so just wastes PCI bandwidth. 17032 * 17033 * Unfortunately, for PCI-E there are only limited 17034 * write-side controls for this, and thus for reads 17035 * we will still get the disconnects. We'll also waste 17036 * these PCI cycles for both read and write for chips 17037 * other than 5700 and 5701 which do not implement the 17038 * boundary bits. 17039 */ 17040 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { 17041 switch (cacheline_size) { 17042 case 16: 17043 case 32: 17044 case 64: 17045 case 128: 17046 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17047 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | 17048 DMA_RWCTRL_WRITE_BNDRY_128_PCIX); 17049 } else { 17050 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17051 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17052 } 17053 break; 17054 17055 case 256: 17056 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | 17057 DMA_RWCTRL_WRITE_BNDRY_256_PCIX); 17058 break; 17059 17060 default: 17061 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17062 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17063 break; 17064 } 17065 } else if (tg3_flag(tp, PCI_EXPRESS)) { 17066 switch (cacheline_size) { 17067 case 16: 17068 case 32: 17069 case 64: 17070 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17071 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17072 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; 17073 break; 17074 } 17075 fallthrough; 17076 case 128: 17077 default: 17078 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17079 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; 17080 break; 17081 } 17082 } else { 17083 switch (cacheline_size) { 17084 case 16: 17085 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17086 val |= (DMA_RWCTRL_READ_BNDRY_16 | 17087 DMA_RWCTRL_WRITE_BNDRY_16); 17088 break; 17089 } 17090 fallthrough; 17091 case 32: 17092 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17093 val |= (DMA_RWCTRL_READ_BNDRY_32 | 17094 DMA_RWCTRL_WRITE_BNDRY_32); 17095 break; 17096 } 17097 fallthrough; 17098 case 64: 17099 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17100 val |= (DMA_RWCTRL_READ_BNDRY_64 | 17101 DMA_RWCTRL_WRITE_BNDRY_64); 17102 break; 17103 } 17104 fallthrough; 17105 case 128: 17106 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17107 val |= (DMA_RWCTRL_READ_BNDRY_128 | 17108 DMA_RWCTRL_WRITE_BNDRY_128); 17109 break; 17110 } 17111 fallthrough; 17112 case 256: 17113 val |= (DMA_RWCTRL_READ_BNDRY_256 | 17114 DMA_RWCTRL_WRITE_BNDRY_256); 17115 break; 17116 case 512: 17117 val |= (DMA_RWCTRL_READ_BNDRY_512 | 17118 DMA_RWCTRL_WRITE_BNDRY_512); 17119 break; 17120 case 1024: 17121 default: 17122 val |= (DMA_RWCTRL_READ_BNDRY_1024 | 17123 DMA_RWCTRL_WRITE_BNDRY_1024); 17124 break; 17125 } 17126 } 17127 17128 out: 17129 return val; 17130 } 17131 17132 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, 17133 int size, bool to_device) 17134 { 17135 struct tg3_internal_buffer_desc test_desc; 17136 u32 sram_dma_descs; 17137 int i, ret; 17138 17139 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; 17140 17141 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); 17142 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); 17143 tw32(RDMAC_STATUS, 0); 17144 tw32(WDMAC_STATUS, 0); 17145 17146 tw32(BUFMGR_MODE, 0); 17147 tw32(FTQ_RESET, 0); 17148 17149 test_desc.addr_hi = ((u64) buf_dma) >> 32; 17150 test_desc.addr_lo = buf_dma & 0xffffffff; 17151 test_desc.nic_mbuf = 0x00002100; 17152 test_desc.len = size; 17153 17154 /* 17155 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz 17156 * the *second* time the tg3 driver was getting loaded after an 17157 * initial scan. 17158 * 17159 * Broadcom tells me: 17160 * ...the DMA engine is connected to the GRC block and a DMA 17161 * reset may affect the GRC block in some unpredictable way... 17162 * The behavior of resets to individual blocks has not been tested. 17163 * 17164 * Broadcom noted the GRC reset will also reset all sub-components. 17165 */ 17166 if (to_device) { 17167 test_desc.cqid_sqid = (13 << 8) | 2; 17168 17169 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); 17170 udelay(40); 17171 } else { 17172 test_desc.cqid_sqid = (16 << 8) | 7; 17173 17174 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); 17175 udelay(40); 17176 } 17177 test_desc.flags = 0x00000005; 17178 17179 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { 17180 u32 val; 17181 17182 val = *(((u32 *)&test_desc) + i); 17183 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 17184 sram_dma_descs + (i * sizeof(u32))); 17185 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 17186 } 17187 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 17188 17189 if (to_device) 17190 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); 17191 else 17192 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); 17193 17194 ret = -ENODEV; 17195 for (i = 0; i < 40; i++) { 17196 u32 val; 17197 17198 if (to_device) 17199 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); 17200 else 17201 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); 17202 if ((val & 0xffff) == sram_dma_descs) { 17203 ret = 0; 17204 break; 17205 } 17206 17207 udelay(100); 17208 } 17209 17210 return ret; 17211 } 17212 17213 #define TEST_BUFFER_SIZE 0x2000 17214 17215 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = { 17216 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, 17217 { }, 17218 }; 17219 17220 static int tg3_test_dma(struct tg3 *tp) 17221 { 17222 dma_addr_t buf_dma; 17223 u32 *buf, saved_dma_rwctrl; 17224 int ret = 0; 17225 17226 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, 17227 &buf_dma, GFP_KERNEL); 17228 if (!buf) { 17229 ret = -ENOMEM; 17230 goto out_nofree; 17231 } 17232 17233 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 17234 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 17235 17236 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 17237 17238 if (tg3_flag(tp, 57765_PLUS)) 17239 goto out; 17240 17241 if (tg3_flag(tp, PCI_EXPRESS)) { 17242 /* DMA read watermark not used on PCIE */ 17243 tp->dma_rwctrl |= 0x00180000; 17244 } else if (!tg3_flag(tp, PCIX_MODE)) { 17245 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 17246 tg3_asic_rev(tp) == ASIC_REV_5750) 17247 tp->dma_rwctrl |= 0x003f0000; 17248 else 17249 tp->dma_rwctrl |= 0x003f000f; 17250 } else { 17251 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17252 tg3_asic_rev(tp) == ASIC_REV_5704) { 17253 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); 17254 u32 read_water = 0x7; 17255 17256 /* If the 5704 is behind the EPB bridge, we can 17257 * do the less restrictive ONE_DMA workaround for 17258 * better performance. 17259 */ 17260 if (tg3_flag(tp, 40BIT_DMA_BUG) && 17261 tg3_asic_rev(tp) == ASIC_REV_5704) 17262 tp->dma_rwctrl |= 0x8000; 17263 else if (ccval == 0x6 || ccval == 0x7) 17264 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17265 17266 if (tg3_asic_rev(tp) == ASIC_REV_5703) 17267 read_water = 4; 17268 /* Set bit 23 to enable PCIX hw bug fix */ 17269 tp->dma_rwctrl |= 17270 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | 17271 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | 17272 (1 << 23); 17273 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) { 17274 /* 5780 always in PCIX mode */ 17275 tp->dma_rwctrl |= 0x00144000; 17276 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) { 17277 /* 5714 always in PCIX mode */ 17278 tp->dma_rwctrl |= 0x00148000; 17279 } else { 17280 tp->dma_rwctrl |= 0x001b000f; 17281 } 17282 } 17283 if (tg3_flag(tp, ONE_DMA_AT_ONCE)) 17284 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17285 17286 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17287 tg3_asic_rev(tp) == ASIC_REV_5704) 17288 tp->dma_rwctrl &= 0xfffffff0; 17289 17290 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 17291 tg3_asic_rev(tp) == ASIC_REV_5701) { 17292 /* Remove this if it causes problems for some boards. */ 17293 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; 17294 17295 /* On 5700/5701 chips, we need to set this bit. 17296 * Otherwise the chip will issue cacheline transactions 17297 * to streamable DMA memory with not all the byte 17298 * enables turned on. This is an error on several 17299 * RISC PCI controllers, in particular sparc64. 17300 * 17301 * On 5703/5704 chips, this bit has been reassigned 17302 * a different meaning. In particular, it is used 17303 * on those chips to enable a PCI-X workaround. 17304 */ 17305 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; 17306 } 17307 17308 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17309 17310 17311 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17312 tg3_asic_rev(tp) != ASIC_REV_5701) 17313 goto out; 17314 17315 /* It is best to perform DMA test with maximum write burst size 17316 * to expose the 5700/5701 write DMA bug. 17317 */ 17318 saved_dma_rwctrl = tp->dma_rwctrl; 17319 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17320 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17321 17322 while (1) { 17323 u32 *p = buf, i; 17324 17325 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) 17326 p[i] = i; 17327 17328 /* Send the buffer to the chip. */ 17329 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true); 17330 if (ret) { 17331 dev_err(&tp->pdev->dev, 17332 "%s: Buffer write failed. err = %d\n", 17333 __func__, ret); 17334 break; 17335 } 17336 17337 /* Now read it back. */ 17338 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false); 17339 if (ret) { 17340 dev_err(&tp->pdev->dev, "%s: Buffer read failed. " 17341 "err = %d\n", __func__, ret); 17342 break; 17343 } 17344 17345 /* Verify it. */ 17346 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { 17347 if (p[i] == i) 17348 continue; 17349 17350 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17351 DMA_RWCTRL_WRITE_BNDRY_16) { 17352 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17353 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17354 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17355 break; 17356 } else { 17357 dev_err(&tp->pdev->dev, 17358 "%s: Buffer corrupted on read back! " 17359 "(%d != %d)\n", __func__, p[i], i); 17360 ret = -ENODEV; 17361 goto out; 17362 } 17363 } 17364 17365 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { 17366 /* Success. */ 17367 ret = 0; 17368 break; 17369 } 17370 } 17371 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17372 DMA_RWCTRL_WRITE_BNDRY_16) { 17373 /* DMA test passed without adjusting DMA boundary, 17374 * now look for chipsets that are known to expose the 17375 * DMA bug without failing the test. 17376 */ 17377 if (pci_dev_present(tg3_dma_wait_state_chipsets)) { 17378 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17379 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17380 } else { 17381 /* Safe to use the calculated DMA boundary. */ 17382 tp->dma_rwctrl = saved_dma_rwctrl; 17383 } 17384 17385 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17386 } 17387 17388 out: 17389 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); 17390 out_nofree: 17391 return ret; 17392 } 17393 17394 static void tg3_init_bufmgr_config(struct tg3 *tp) 17395 { 17396 if (tg3_flag(tp, 57765_PLUS)) { 17397 tp->bufmgr_config.mbuf_read_dma_low_water = 17398 DEFAULT_MB_RDMA_LOW_WATER_5705; 17399 tp->bufmgr_config.mbuf_mac_rx_low_water = 17400 DEFAULT_MB_MACRX_LOW_WATER_57765; 17401 tp->bufmgr_config.mbuf_high_water = 17402 DEFAULT_MB_HIGH_WATER_57765; 17403 17404 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17405 DEFAULT_MB_RDMA_LOW_WATER_5705; 17406 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17407 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; 17408 tp->bufmgr_config.mbuf_high_water_jumbo = 17409 DEFAULT_MB_HIGH_WATER_JUMBO_57765; 17410 } else if (tg3_flag(tp, 5705_PLUS)) { 17411 tp->bufmgr_config.mbuf_read_dma_low_water = 17412 DEFAULT_MB_RDMA_LOW_WATER_5705; 17413 tp->bufmgr_config.mbuf_mac_rx_low_water = 17414 DEFAULT_MB_MACRX_LOW_WATER_5705; 17415 tp->bufmgr_config.mbuf_high_water = 17416 DEFAULT_MB_HIGH_WATER_5705; 17417 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 17418 tp->bufmgr_config.mbuf_mac_rx_low_water = 17419 DEFAULT_MB_MACRX_LOW_WATER_5906; 17420 tp->bufmgr_config.mbuf_high_water = 17421 DEFAULT_MB_HIGH_WATER_5906; 17422 } 17423 17424 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17425 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; 17426 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17427 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; 17428 tp->bufmgr_config.mbuf_high_water_jumbo = 17429 DEFAULT_MB_HIGH_WATER_JUMBO_5780; 17430 } else { 17431 tp->bufmgr_config.mbuf_read_dma_low_water = 17432 DEFAULT_MB_RDMA_LOW_WATER; 17433 tp->bufmgr_config.mbuf_mac_rx_low_water = 17434 DEFAULT_MB_MACRX_LOW_WATER; 17435 tp->bufmgr_config.mbuf_high_water = 17436 DEFAULT_MB_HIGH_WATER; 17437 17438 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17439 DEFAULT_MB_RDMA_LOW_WATER_JUMBO; 17440 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17441 DEFAULT_MB_MACRX_LOW_WATER_JUMBO; 17442 tp->bufmgr_config.mbuf_high_water_jumbo = 17443 DEFAULT_MB_HIGH_WATER_JUMBO; 17444 } 17445 17446 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; 17447 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; 17448 } 17449 17450 static char *tg3_phy_string(struct tg3 *tp) 17451 { 17452 switch (tp->phy_id & TG3_PHY_ID_MASK) { 17453 case TG3_PHY_ID_BCM5400: return "5400"; 17454 case TG3_PHY_ID_BCM5401: return "5401"; 17455 case TG3_PHY_ID_BCM5411: return "5411"; 17456 case TG3_PHY_ID_BCM5701: return "5701"; 17457 case TG3_PHY_ID_BCM5703: return "5703"; 17458 case TG3_PHY_ID_BCM5704: return "5704"; 17459 case TG3_PHY_ID_BCM5705: return "5705"; 17460 case TG3_PHY_ID_BCM5750: return "5750"; 17461 case TG3_PHY_ID_BCM5752: return "5752"; 17462 case TG3_PHY_ID_BCM5714: return "5714"; 17463 case TG3_PHY_ID_BCM5780: return "5780"; 17464 case TG3_PHY_ID_BCM5755: return "5755"; 17465 case TG3_PHY_ID_BCM5787: return "5787"; 17466 case TG3_PHY_ID_BCM5784: return "5784"; 17467 case TG3_PHY_ID_BCM5756: return "5722/5756"; 17468 case TG3_PHY_ID_BCM5906: return "5906"; 17469 case TG3_PHY_ID_BCM5761: return "5761"; 17470 case TG3_PHY_ID_BCM5718C: return "5718C"; 17471 case TG3_PHY_ID_BCM5718S: return "5718S"; 17472 case TG3_PHY_ID_BCM57765: return "57765"; 17473 case TG3_PHY_ID_BCM5719C: return "5719C"; 17474 case TG3_PHY_ID_BCM5720C: return "5720C"; 17475 case TG3_PHY_ID_BCM5762: return "5762C"; 17476 case TG3_PHY_ID_BCM8002: return "8002/serdes"; 17477 case 0: return "serdes"; 17478 default: return "unknown"; 17479 } 17480 } 17481 17482 static char *tg3_bus_string(struct tg3 *tp, char *str) 17483 { 17484 if (tg3_flag(tp, PCI_EXPRESS)) { 17485 strcpy(str, "PCI Express"); 17486 return str; 17487 } else if (tg3_flag(tp, PCIX_MODE)) { 17488 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; 17489 17490 strcpy(str, "PCIX:"); 17491 17492 if ((clock_ctrl == 7) || 17493 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == 17494 GRC_MISC_CFG_BOARD_ID_5704CIOBE)) 17495 strcat(str, "133MHz"); 17496 else if (clock_ctrl == 0) 17497 strcat(str, "33MHz"); 17498 else if (clock_ctrl == 2) 17499 strcat(str, "50MHz"); 17500 else if (clock_ctrl == 4) 17501 strcat(str, "66MHz"); 17502 else if (clock_ctrl == 6) 17503 strcat(str, "100MHz"); 17504 } else { 17505 strcpy(str, "PCI:"); 17506 if (tg3_flag(tp, PCI_HIGH_SPEED)) 17507 strcat(str, "66MHz"); 17508 else 17509 strcat(str, "33MHz"); 17510 } 17511 if (tg3_flag(tp, PCI_32BIT)) 17512 strcat(str, ":32-bit"); 17513 else 17514 strcat(str, ":64-bit"); 17515 return str; 17516 } 17517 17518 static void tg3_init_coal(struct tg3 *tp) 17519 { 17520 struct ethtool_coalesce *ec = &tp->coal; 17521 17522 memset(ec, 0, sizeof(*ec)); 17523 ec->cmd = ETHTOOL_GCOALESCE; 17524 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; 17525 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; 17526 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; 17527 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; 17528 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; 17529 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; 17530 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; 17531 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; 17532 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; 17533 17534 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | 17535 HOSTCC_MODE_CLRTICK_TXBD)) { 17536 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; 17537 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; 17538 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; 17539 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; 17540 } 17541 17542 if (tg3_flag(tp, 5705_PLUS)) { 17543 ec->rx_coalesce_usecs_irq = 0; 17544 ec->tx_coalesce_usecs_irq = 0; 17545 ec->stats_block_coalesce_usecs = 0; 17546 } 17547 } 17548 17549 static int tg3_init_one(struct pci_dev *pdev, 17550 const struct pci_device_id *ent) 17551 { 17552 struct net_device *dev; 17553 struct tg3 *tp; 17554 int i, err; 17555 u32 sndmbx, rcvmbx, intmbx; 17556 char str[40]; 17557 u64 dma_mask, persist_dma_mask; 17558 netdev_features_t features = 0; 17559 u8 addr[ETH_ALEN] __aligned(2); 17560 17561 err = pci_enable_device(pdev); 17562 if (err) { 17563 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 17564 return err; 17565 } 17566 17567 err = pci_request_regions(pdev, DRV_MODULE_NAME); 17568 if (err) { 17569 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 17570 goto err_out_disable_pdev; 17571 } 17572 17573 pci_set_master(pdev); 17574 17575 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); 17576 if (!dev) { 17577 err = -ENOMEM; 17578 goto err_out_free_res; 17579 } 17580 17581 SET_NETDEV_DEV(dev, &pdev->dev); 17582 17583 tp = netdev_priv(dev); 17584 tp->pdev = pdev; 17585 tp->dev = dev; 17586 tp->rx_mode = TG3_DEF_RX_MODE; 17587 tp->tx_mode = TG3_DEF_TX_MODE; 17588 tp->irq_sync = 1; 17589 tp->pcierr_recovery = false; 17590 17591 if (tg3_debug > 0) 17592 tp->msg_enable = tg3_debug; 17593 else 17594 tp->msg_enable = TG3_DEF_MSG_ENABLE; 17595 17596 if (pdev_is_ssb_gige_core(pdev)) { 17597 tg3_flag_set(tp, IS_SSB_CORE); 17598 if (ssb_gige_must_flush_posted_writes(pdev)) 17599 tg3_flag_set(tp, FLUSH_POSTED_WRITES); 17600 if (ssb_gige_one_dma_at_once(pdev)) 17601 tg3_flag_set(tp, ONE_DMA_AT_ONCE); 17602 if (ssb_gige_have_roboswitch(pdev)) { 17603 tg3_flag_set(tp, USE_PHYLIB); 17604 tg3_flag_set(tp, ROBOSWITCH); 17605 } 17606 if (ssb_gige_is_rgmii(pdev)) 17607 tg3_flag_set(tp, RGMII_MODE); 17608 } 17609 17610 /* The word/byte swap controls here control register access byte 17611 * swapping. DMA data byte swapping is controlled in the GRC_MODE 17612 * setting below. 17613 */ 17614 tp->misc_host_ctrl = 17615 MISC_HOST_CTRL_MASK_PCI_INT | 17616 MISC_HOST_CTRL_WORD_SWAP | 17617 MISC_HOST_CTRL_INDIR_ACCESS | 17618 MISC_HOST_CTRL_PCISTATE_RW; 17619 17620 /* The NONFRM (non-frame) byte/word swap controls take effect 17621 * on descriptor entries, anything which isn't packet data. 17622 * 17623 * The StrongARM chips on the board (one for tx, one for rx) 17624 * are running in big-endian mode. 17625 */ 17626 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | 17627 GRC_MODE_WSWAP_NONFRM_DATA); 17628 #ifdef __BIG_ENDIAN 17629 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; 17630 #endif 17631 spin_lock_init(&tp->lock); 17632 spin_lock_init(&tp->indirect_lock); 17633 INIT_WORK(&tp->reset_task, tg3_reset_task); 17634 17635 tp->regs = pci_ioremap_bar(pdev, BAR_0); 17636 if (!tp->regs) { 17637 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 17638 err = -ENOMEM; 17639 goto err_out_free_dev; 17640 } 17641 17642 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 17643 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || 17644 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || 17645 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || 17646 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 17647 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 17648 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 17649 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 17650 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 17651 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 17652 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 17653 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 17654 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 17655 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 17656 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) { 17657 tg3_flag_set(tp, ENABLE_APE); 17658 tp->aperegs = pci_ioremap_bar(pdev, BAR_2); 17659 if (!tp->aperegs) { 17660 dev_err(&pdev->dev, 17661 "Cannot map APE registers, aborting\n"); 17662 err = -ENOMEM; 17663 goto err_out_iounmap; 17664 } 17665 } 17666 17667 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 17668 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 17669 17670 dev->ethtool_ops = &tg3_ethtool_ops; 17671 dev->watchdog_timeo = TG3_TX_TIMEOUT; 17672 dev->netdev_ops = &tg3_netdev_ops; 17673 dev->irq = pdev->irq; 17674 17675 err = tg3_get_invariants(tp, ent); 17676 if (err) { 17677 dev_err(&pdev->dev, 17678 "Problem fetching invariants of chip, aborting\n"); 17679 goto err_out_apeunmap; 17680 } 17681 17682 /* The EPB bridge inside 5714, 5715, and 5780 and any 17683 * device behind the EPB cannot support DMA addresses > 40-bit. 17684 * On 64-bit systems with IOMMU, use 40-bit dma_mask. 17685 * On 64-bit systems without IOMMU, use 64-bit dma_mask and 17686 * do DMA address check in tg3_start_xmit(). 17687 */ 17688 if (tg3_flag(tp, IS_5788)) 17689 persist_dma_mask = dma_mask = DMA_BIT_MASK(32); 17690 else if (tg3_flag(tp, 40BIT_DMA_BUG)) { 17691 persist_dma_mask = dma_mask = DMA_BIT_MASK(40); 17692 #ifdef CONFIG_HIGHMEM 17693 dma_mask = DMA_BIT_MASK(64); 17694 #endif 17695 } else 17696 persist_dma_mask = dma_mask = DMA_BIT_MASK(64); 17697 17698 /* Configure DMA attributes. */ 17699 if (dma_mask > DMA_BIT_MASK(32)) { 17700 err = dma_set_mask(&pdev->dev, dma_mask); 17701 if (!err) { 17702 features |= NETIF_F_HIGHDMA; 17703 err = dma_set_coherent_mask(&pdev->dev, 17704 persist_dma_mask); 17705 if (err < 0) { 17706 dev_err(&pdev->dev, "Unable to obtain 64 bit " 17707 "DMA for consistent allocations\n"); 17708 goto err_out_apeunmap; 17709 } 17710 } 17711 } 17712 if (err || dma_mask == DMA_BIT_MASK(32)) { 17713 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 17714 if (err) { 17715 dev_err(&pdev->dev, 17716 "No usable DMA configuration, aborting\n"); 17717 goto err_out_apeunmap; 17718 } 17719 } 17720 17721 tg3_init_bufmgr_config(tp); 17722 17723 /* 5700 B0 chips do not support checksumming correctly due 17724 * to hardware bugs. 17725 */ 17726 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) { 17727 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 17728 17729 if (tg3_flag(tp, 5755_PLUS)) 17730 features |= NETIF_F_IPV6_CSUM; 17731 } 17732 17733 /* TSO is on by default on chips that support hardware TSO. 17734 * Firmware TSO on older chips gives lower performance, so it 17735 * is off by default, but can be enabled using ethtool. 17736 */ 17737 if ((tg3_flag(tp, HW_TSO_1) || 17738 tg3_flag(tp, HW_TSO_2) || 17739 tg3_flag(tp, HW_TSO_3)) && 17740 (features & NETIF_F_IP_CSUM)) 17741 features |= NETIF_F_TSO; 17742 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { 17743 if (features & NETIF_F_IPV6_CSUM) 17744 features |= NETIF_F_TSO6; 17745 if (tg3_flag(tp, HW_TSO_3) || 17746 tg3_asic_rev(tp) == ASIC_REV_5761 || 17747 (tg3_asic_rev(tp) == ASIC_REV_5784 && 17748 tg3_chip_rev(tp) != CHIPREV_5784_AX) || 17749 tg3_asic_rev(tp) == ASIC_REV_5785 || 17750 tg3_asic_rev(tp) == ASIC_REV_57780) 17751 features |= NETIF_F_TSO_ECN; 17752 } 17753 17754 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | 17755 NETIF_F_HW_VLAN_CTAG_RX; 17756 dev->vlan_features |= features; 17757 17758 /* 17759 * Add loopback capability only for a subset of devices that support 17760 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY 17761 * loopback for the remaining devices. 17762 */ 17763 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 17764 !tg3_flag(tp, CPMU_PRESENT)) 17765 /* Add the loopback capability */ 17766 features |= NETIF_F_LOOPBACK; 17767 17768 dev->hw_features |= features; 17769 dev->priv_flags |= IFF_UNICAST_FLT; 17770 17771 /* MTU range: 60 - 9000 or 1500, depending on hardware */ 17772 dev->min_mtu = TG3_MIN_MTU; 17773 dev->max_mtu = TG3_MAX_MTU(tp); 17774 17775 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 && 17776 !tg3_flag(tp, TSO_CAPABLE) && 17777 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 17778 tg3_flag_set(tp, MAX_RXPEND_64); 17779 tp->rx_pending = 63; 17780 } 17781 17782 err = tg3_get_device_address(tp, addr); 17783 if (err) { 17784 dev_err(&pdev->dev, 17785 "Could not obtain valid ethernet address, aborting\n"); 17786 goto err_out_apeunmap; 17787 } 17788 eth_hw_addr_set(dev, addr); 17789 17790 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; 17791 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; 17792 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; 17793 for (i = 0; i < tp->irq_max; i++) { 17794 struct tg3_napi *tnapi = &tp->napi[i]; 17795 17796 tnapi->tp = tp; 17797 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; 17798 17799 tnapi->int_mbox = intmbx; 17800 if (i <= 4) 17801 intmbx += 0x8; 17802 else 17803 intmbx += 0x4; 17804 17805 tnapi->consmbox = rcvmbx; 17806 tnapi->prodmbox = sndmbx; 17807 17808 if (i) 17809 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); 17810 else 17811 tnapi->coal_now = HOSTCC_MODE_NOW; 17812 17813 if (!tg3_flag(tp, SUPPORT_MSIX)) 17814 break; 17815 17816 /* 17817 * If we support MSIX, we'll be using RSS. If we're using 17818 * RSS, the first vector only handles link interrupts and the 17819 * remaining vectors handle rx and tx interrupts. Reuse the 17820 * mailbox values for the next iteration. The values we setup 17821 * above are still useful for the single vectored mode. 17822 */ 17823 if (!i) 17824 continue; 17825 17826 rcvmbx += 0x8; 17827 17828 if (sndmbx & 0x4) 17829 sndmbx -= 0x4; 17830 else 17831 sndmbx += 0xc; 17832 } 17833 17834 /* 17835 * Reset chip in case UNDI or EFI driver did not shutdown 17836 * DMA self test will enable WDMAC and we'll see (spurious) 17837 * pending DMA on the PCI bus at that point. 17838 */ 17839 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || 17840 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 17841 tg3_full_lock(tp, 0); 17842 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 17843 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 17844 tg3_full_unlock(tp); 17845 } 17846 17847 err = tg3_test_dma(tp); 17848 if (err) { 17849 dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); 17850 goto err_out_apeunmap; 17851 } 17852 17853 tg3_init_coal(tp); 17854 17855 pci_set_drvdata(pdev, dev); 17856 17857 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 17858 tg3_asic_rev(tp) == ASIC_REV_5720 || 17859 tg3_asic_rev(tp) == ASIC_REV_5762) 17860 tg3_flag_set(tp, PTP_CAPABLE); 17861 17862 tg3_timer_init(tp); 17863 17864 tg3_carrier_off(tp); 17865 17866 err = register_netdev(dev); 17867 if (err) { 17868 dev_err(&pdev->dev, "Cannot register net device, aborting\n"); 17869 goto err_out_apeunmap; 17870 } 17871 17872 if (tg3_flag(tp, PTP_CAPABLE)) { 17873 tg3_ptp_init(tp); 17874 tp->ptp_clock = ptp_clock_register(&tp->ptp_info, 17875 &tp->pdev->dev); 17876 if (IS_ERR(tp->ptp_clock)) 17877 tp->ptp_clock = NULL; 17878 } 17879 17880 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", 17881 tp->board_part_number, 17882 tg3_chip_rev_id(tp), 17883 tg3_bus_string(tp, str), 17884 dev->dev_addr); 17885 17886 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) { 17887 char *ethtype; 17888 17889 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 17890 ethtype = "10/100Base-TX"; 17891 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 17892 ethtype = "1000Base-SX"; 17893 else 17894 ethtype = "10/100/1000Base-T"; 17895 17896 netdev_info(dev, "attached PHY is %s (%s Ethernet) " 17897 "(WireSpeed[%d], EEE[%d])\n", 17898 tg3_phy_string(tp), ethtype, 17899 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, 17900 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); 17901 } 17902 17903 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", 17904 (dev->features & NETIF_F_RXCSUM) != 0, 17905 tg3_flag(tp, USE_LINKCHG_REG) != 0, 17906 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, 17907 tg3_flag(tp, ENABLE_ASF) != 0, 17908 tg3_flag(tp, TSO_CAPABLE) != 0); 17909 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", 17910 tp->dma_rwctrl, 17911 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : 17912 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); 17913 17914 pci_save_state(pdev); 17915 17916 return 0; 17917 17918 err_out_apeunmap: 17919 if (tp->aperegs) { 17920 iounmap(tp->aperegs); 17921 tp->aperegs = NULL; 17922 } 17923 17924 err_out_iounmap: 17925 if (tp->regs) { 17926 iounmap(tp->regs); 17927 tp->regs = NULL; 17928 } 17929 17930 err_out_free_dev: 17931 free_netdev(dev); 17932 17933 err_out_free_res: 17934 pci_release_regions(pdev); 17935 17936 err_out_disable_pdev: 17937 if (pci_is_enabled(pdev)) 17938 pci_disable_device(pdev); 17939 return err; 17940 } 17941 17942 static void tg3_remove_one(struct pci_dev *pdev) 17943 { 17944 struct net_device *dev = pci_get_drvdata(pdev); 17945 17946 if (dev) { 17947 struct tg3 *tp = netdev_priv(dev); 17948 17949 tg3_ptp_fini(tp); 17950 17951 release_firmware(tp->fw); 17952 17953 tg3_reset_task_cancel(tp); 17954 17955 if (tg3_flag(tp, USE_PHYLIB)) { 17956 tg3_phy_fini(tp); 17957 tg3_mdio_fini(tp); 17958 } 17959 17960 unregister_netdev(dev); 17961 if (tp->aperegs) { 17962 iounmap(tp->aperegs); 17963 tp->aperegs = NULL; 17964 } 17965 if (tp->regs) { 17966 iounmap(tp->regs); 17967 tp->regs = NULL; 17968 } 17969 free_netdev(dev); 17970 pci_release_regions(pdev); 17971 pci_disable_device(pdev); 17972 } 17973 } 17974 17975 #ifdef CONFIG_PM_SLEEP 17976 static int tg3_suspend(struct device *device) 17977 { 17978 struct net_device *dev = dev_get_drvdata(device); 17979 struct tg3 *tp = netdev_priv(dev); 17980 int err = 0; 17981 17982 rtnl_lock(); 17983 17984 if (!netif_running(dev)) 17985 goto unlock; 17986 17987 tg3_reset_task_cancel(tp); 17988 tg3_phy_stop(tp); 17989 tg3_netif_stop(tp); 17990 17991 tg3_timer_stop(tp); 17992 17993 tg3_full_lock(tp, 1); 17994 tg3_disable_ints(tp); 17995 tg3_full_unlock(tp); 17996 17997 netif_device_detach(dev); 17998 17999 tg3_full_lock(tp, 0); 18000 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 18001 tg3_flag_clear(tp, INIT_COMPLETE); 18002 tg3_full_unlock(tp); 18003 18004 err = tg3_power_down_prepare(tp); 18005 if (err) { 18006 int err2; 18007 18008 tg3_full_lock(tp, 0); 18009 18010 tg3_flag_set(tp, INIT_COMPLETE); 18011 err2 = tg3_restart_hw(tp, true); 18012 if (err2) 18013 goto out; 18014 18015 tg3_timer_start(tp); 18016 18017 netif_device_attach(dev); 18018 tg3_netif_start(tp); 18019 18020 out: 18021 tg3_full_unlock(tp); 18022 18023 if (!err2) 18024 tg3_phy_start(tp); 18025 } 18026 18027 unlock: 18028 rtnl_unlock(); 18029 return err; 18030 } 18031 18032 static int tg3_resume(struct device *device) 18033 { 18034 struct net_device *dev = dev_get_drvdata(device); 18035 struct tg3 *tp = netdev_priv(dev); 18036 int err = 0; 18037 18038 rtnl_lock(); 18039 18040 if (!netif_running(dev)) 18041 goto unlock; 18042 18043 netif_device_attach(dev); 18044 18045 tg3_full_lock(tp, 0); 18046 18047 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18048 18049 tg3_flag_set(tp, INIT_COMPLETE); 18050 err = tg3_restart_hw(tp, 18051 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); 18052 if (err) 18053 goto out; 18054 18055 tg3_timer_start(tp); 18056 18057 tg3_netif_start(tp); 18058 18059 out: 18060 tg3_full_unlock(tp); 18061 18062 if (!err) 18063 tg3_phy_start(tp); 18064 18065 unlock: 18066 rtnl_unlock(); 18067 return err; 18068 } 18069 #endif /* CONFIG_PM_SLEEP */ 18070 18071 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); 18072 18073 static void tg3_shutdown(struct pci_dev *pdev) 18074 { 18075 struct net_device *dev = pci_get_drvdata(pdev); 18076 struct tg3 *tp = netdev_priv(dev); 18077 18078 rtnl_lock(); 18079 netif_device_detach(dev); 18080 18081 if (netif_running(dev)) 18082 dev_close(dev); 18083 18084 if (system_state == SYSTEM_POWER_OFF) 18085 tg3_power_down(tp); 18086 18087 rtnl_unlock(); 18088 } 18089 18090 /** 18091 * tg3_io_error_detected - called when PCI error is detected 18092 * @pdev: Pointer to PCI device 18093 * @state: The current pci connection state 18094 * 18095 * This function is called after a PCI bus error affecting 18096 * this device has been detected. 18097 */ 18098 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, 18099 pci_channel_state_t state) 18100 { 18101 struct net_device *netdev = pci_get_drvdata(pdev); 18102 struct tg3 *tp = netdev_priv(netdev); 18103 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; 18104 18105 netdev_info(netdev, "PCI I/O error detected\n"); 18106 18107 rtnl_lock(); 18108 18109 /* Could be second call or maybe we don't have netdev yet */ 18110 if (!netdev || tp->pcierr_recovery || !netif_running(netdev)) 18111 goto done; 18112 18113 /* We needn't recover from permanent error */ 18114 if (state == pci_channel_io_frozen) 18115 tp->pcierr_recovery = true; 18116 18117 tg3_phy_stop(tp); 18118 18119 tg3_netif_stop(tp); 18120 18121 tg3_timer_stop(tp); 18122 18123 /* Want to make sure that the reset task doesn't run */ 18124 tg3_reset_task_cancel(tp); 18125 18126 netif_device_detach(netdev); 18127 18128 /* Clean up software state, even if MMIO is blocked */ 18129 tg3_full_lock(tp, 0); 18130 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 18131 tg3_full_unlock(tp); 18132 18133 done: 18134 if (state == pci_channel_io_perm_failure) { 18135 if (netdev) { 18136 tg3_napi_enable(tp); 18137 dev_close(netdev); 18138 } 18139 err = PCI_ERS_RESULT_DISCONNECT; 18140 } else { 18141 pci_disable_device(pdev); 18142 } 18143 18144 rtnl_unlock(); 18145 18146 return err; 18147 } 18148 18149 /** 18150 * tg3_io_slot_reset - called after the pci bus has been reset. 18151 * @pdev: Pointer to PCI device 18152 * 18153 * Restart the card from scratch, as if from a cold-boot. 18154 * At this point, the card has exprienced a hard reset, 18155 * followed by fixups by BIOS, and has its config space 18156 * set up identically to what it was at cold boot. 18157 */ 18158 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) 18159 { 18160 struct net_device *netdev = pci_get_drvdata(pdev); 18161 struct tg3 *tp = netdev_priv(netdev); 18162 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 18163 int err; 18164 18165 rtnl_lock(); 18166 18167 if (pci_enable_device(pdev)) { 18168 dev_err(&pdev->dev, 18169 "Cannot re-enable PCI device after reset.\n"); 18170 goto done; 18171 } 18172 18173 pci_set_master(pdev); 18174 pci_restore_state(pdev); 18175 pci_save_state(pdev); 18176 18177 if (!netdev || !netif_running(netdev)) { 18178 rc = PCI_ERS_RESULT_RECOVERED; 18179 goto done; 18180 } 18181 18182 err = tg3_power_up(tp); 18183 if (err) 18184 goto done; 18185 18186 rc = PCI_ERS_RESULT_RECOVERED; 18187 18188 done: 18189 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { 18190 tg3_napi_enable(tp); 18191 dev_close(netdev); 18192 } 18193 rtnl_unlock(); 18194 18195 return rc; 18196 } 18197 18198 /** 18199 * tg3_io_resume - called when traffic can start flowing again. 18200 * @pdev: Pointer to PCI device 18201 * 18202 * This callback is called when the error recovery driver tells 18203 * us that its OK to resume normal operation. 18204 */ 18205 static void tg3_io_resume(struct pci_dev *pdev) 18206 { 18207 struct net_device *netdev = pci_get_drvdata(pdev); 18208 struct tg3 *tp = netdev_priv(netdev); 18209 int err; 18210 18211 rtnl_lock(); 18212 18213 if (!netdev || !netif_running(netdev)) 18214 goto done; 18215 18216 tg3_full_lock(tp, 0); 18217 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18218 tg3_flag_set(tp, INIT_COMPLETE); 18219 err = tg3_restart_hw(tp, true); 18220 if (err) { 18221 tg3_full_unlock(tp); 18222 netdev_err(netdev, "Cannot restart hardware after reset.\n"); 18223 goto done; 18224 } 18225 18226 netif_device_attach(netdev); 18227 18228 tg3_timer_start(tp); 18229 18230 tg3_netif_start(tp); 18231 18232 tg3_full_unlock(tp); 18233 18234 tg3_phy_start(tp); 18235 18236 done: 18237 tp->pcierr_recovery = false; 18238 rtnl_unlock(); 18239 } 18240 18241 static const struct pci_error_handlers tg3_err_handler = { 18242 .error_detected = tg3_io_error_detected, 18243 .slot_reset = tg3_io_slot_reset, 18244 .resume = tg3_io_resume 18245 }; 18246 18247 static struct pci_driver tg3_driver = { 18248 .name = DRV_MODULE_NAME, 18249 .id_table = tg3_pci_tbl, 18250 .probe = tg3_init_one, 18251 .remove = tg3_remove_one, 18252 .err_handler = &tg3_err_handler, 18253 .driver.pm = &tg3_pm_ops, 18254 .shutdown = tg3_shutdown, 18255 }; 18256 18257 module_pci_driver(tg3_driver); 18258