1 /* 2 * tg3.c: Broadcom Tigon3 ethernet driver. 3 * 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 6 * Copyright (C) 2004 Sun Microsystems Inc. 7 * Copyright (C) 2005-2016 Broadcom Corporation. 8 * Copyright (C) 2016-2017 Broadcom Limited. 9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" 10 * refers to Broadcom Inc. and/or its subsidiaries. 11 * 12 * Firmware is: 13 * Derived from proprietary unpublished source code, 14 * Copyright (C) 2000-2016 Broadcom Corporation. 15 * Copyright (C) 2016-2017 Broadcom Ltd. 16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" 17 * refers to Broadcom Inc. and/or its subsidiaries. 18 * 19 * Permission is hereby granted for the distribution of this firmware 20 * data in hexadecimal or equivalent format, provided this copyright 21 * notice is accompanying it. 22 */ 23 24 25 #include <linux/module.h> 26 #include <linux/moduleparam.h> 27 #include <linux/stringify.h> 28 #include <linux/kernel.h> 29 #include <linux/sched/signal.h> 30 #include <linux/types.h> 31 #include <linux/compiler.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/in.h> 35 #include <linux/interrupt.h> 36 #include <linux/ioport.h> 37 #include <linux/pci.h> 38 #include <linux/netdevice.h> 39 #include <linux/etherdevice.h> 40 #include <linux/skbuff.h> 41 #include <linux/ethtool.h> 42 #include <linux/mdio.h> 43 #include <linux/mii.h> 44 #include <linux/phy.h> 45 #include <linux/brcmphy.h> 46 #include <linux/if.h> 47 #include <linux/if_vlan.h> 48 #include <linux/ip.h> 49 #include <linux/tcp.h> 50 #include <linux/workqueue.h> 51 #include <linux/prefetch.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/firmware.h> 54 #include <linux/ssb/ssb_driver_gige.h> 55 #include <linux/hwmon.h> 56 #include <linux/hwmon-sysfs.h> 57 #include <linux/crc32poly.h> 58 59 #include <net/checksum.h> 60 #include <net/gso.h> 61 #include <net/ip.h> 62 63 #include <linux/io.h> 64 #include <asm/byteorder.h> 65 #include <linux/uaccess.h> 66 67 #include <uapi/linux/net_tstamp.h> 68 #include <linux/ptp_clock_kernel.h> 69 70 #define BAR_0 0 71 #define BAR_2 2 72 73 #include "tg3.h" 74 75 /* Functions & macros to verify TG3_FLAGS types */ 76 77 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) 78 { 79 return test_bit(flag, bits); 80 } 81 82 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) 83 { 84 set_bit(flag, bits); 85 } 86 87 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) 88 { 89 clear_bit(flag, bits); 90 } 91 92 #define tg3_flag(tp, flag) \ 93 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) 94 #define tg3_flag_set(tp, flag) \ 95 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) 96 #define tg3_flag_clear(tp, flag) \ 97 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) 98 99 #define DRV_MODULE_NAME "tg3" 100 /* DO NOT UPDATE TG3_*_NUM defines */ 101 #define TG3_MAJ_NUM 3 102 #define TG3_MIN_NUM 137 103 104 #define RESET_KIND_SHUTDOWN 0 105 #define RESET_KIND_INIT 1 106 #define RESET_KIND_SUSPEND 2 107 108 #define TG3_DEF_RX_MODE 0 109 #define TG3_DEF_TX_MODE 0 110 #define TG3_DEF_MSG_ENABLE \ 111 (NETIF_MSG_DRV | \ 112 NETIF_MSG_PROBE | \ 113 NETIF_MSG_LINK | \ 114 NETIF_MSG_TIMER | \ 115 NETIF_MSG_IFDOWN | \ 116 NETIF_MSG_IFUP | \ 117 NETIF_MSG_RX_ERR | \ 118 NETIF_MSG_TX_ERR) 119 120 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100 121 122 /* length of time before we decide the hardware is borked, 123 * and dev->tx_timeout() should be called to fix the problem 124 */ 125 126 #define TG3_TX_TIMEOUT (5 * HZ) 127 128 /* hardware minimum and maximum for a single frame's data payload */ 129 #define TG3_MIN_MTU ETH_ZLEN 130 #define TG3_MAX_MTU(tp) \ 131 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) 132 133 /* These numbers seem to be hard coded in the NIC firmware somehow. 134 * You can't change the ring sizes, but you can change where you place 135 * them in the NIC onboard memory. 136 */ 137 #define TG3_RX_STD_RING_SIZE(tp) \ 138 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 139 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) 140 #define TG3_DEF_RX_RING_PENDING 200 141 #define TG3_RX_JMB_RING_SIZE(tp) \ 142 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 143 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) 144 #define TG3_DEF_RX_JUMBO_RING_PENDING 100 145 146 /* Do not place this n-ring entries value into the tp struct itself, 147 * we really want to expose these constants to GCC so that modulo et 148 * al. operations are done with shifts and masks instead of with 149 * hw multiply/modulo instructions. Another solution would be to 150 * replace things like '% foo' with '& (foo - 1)'. 151 */ 152 153 #define TG3_TX_RING_SIZE 512 154 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) 155 156 #define TG3_RX_STD_RING_BYTES(tp) \ 157 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) 158 #define TG3_RX_JMB_RING_BYTES(tp) \ 159 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) 160 #define TG3_RX_RCB_RING_BYTES(tp) \ 161 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) 162 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 163 TG3_TX_RING_SIZE) 164 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 165 166 #define TG3_DMA_BYTE_ENAB 64 167 168 #define TG3_RX_STD_DMA_SZ 1536 169 #define TG3_RX_JMB_DMA_SZ 9046 170 171 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) 172 173 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) 174 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 175 176 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \ 177 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) 178 179 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ 180 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) 181 182 /* Due to a hardware bug, the 5701 can only DMA to memory addresses 183 * that are at least dword aligned when used in PCIX mode. The driver 184 * works around this bug by double copying the packet. This workaround 185 * is built into the normal double copy length check for efficiency. 186 * 187 * However, the double copy is only necessary on those architectures 188 * where unaligned memory accesses are inefficient. For those architectures 189 * where unaligned memory accesses incur little penalty, we can reintegrate 190 * the 5701 in the normal rx path. Doing so saves a device structure 191 * dereference by hardcoding the double copy threshold in place. 192 */ 193 #define TG3_RX_COPY_THRESHOLD 256 194 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 195 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD 196 #else 197 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) 198 #endif 199 200 #if (NET_IP_ALIGN != 0) 201 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset) 202 #else 203 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD) 204 #endif 205 206 /* minimum number of free TX descriptors required to wake up TX process */ 207 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 208 #define TG3_TX_BD_DMA_MAX_2K 2048 209 #define TG3_TX_BD_DMA_MAX_4K 4096 210 211 #define TG3_RAW_IP_ALIGN 2 212 213 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3) 214 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1) 215 216 #define TG3_FW_UPDATE_TIMEOUT_SEC 5 217 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2) 218 219 #define FIRMWARE_TG3 "tigon/tg3.bin" 220 #define FIRMWARE_TG357766 "tigon/tg357766.bin" 221 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" 222 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" 223 224 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); 225 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); 226 MODULE_LICENSE("GPL"); 227 MODULE_FIRMWARE(FIRMWARE_TG3); 228 MODULE_FIRMWARE(FIRMWARE_TG3TSO); 229 MODULE_FIRMWARE(FIRMWARE_TG3TSO5); 230 231 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ 232 module_param(tg3_debug, int, 0); 233 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 234 235 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001 236 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002 237 238 static const struct pci_device_id tg3_pci_tbl[] = { 239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, 240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, 241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, 242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, 243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, 244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, 245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, 246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, 247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, 248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, 249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, 250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, 251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, 252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, 253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, 254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, 255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, 256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, 257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901), 258 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 259 TG3_DRV_DATA_FLAG_5705_10_100}, 260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2), 261 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 262 TG3_DRV_DATA_FLAG_5705_10_100}, 263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, 264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F), 265 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 266 TG3_DRV_DATA_FLAG_5705_10_100}, 267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, 268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, 269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, 270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, 271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, 272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F), 273 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, 275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, 276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, 277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, 278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F), 279 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, 281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, 282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, 283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, 284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, 285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, 286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, 287 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M, 288 PCI_VENDOR_ID_LENOVO, 289 TG3PCI_SUBDEVICE_ID_LENOVO_5787M), 290 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, 292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F), 293 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, 295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, 296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, 297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, 298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, 299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, 300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, 301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, 302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, 303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, 304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, 305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, 306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, 307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, 309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, 310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, 311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, 312 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 313 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A), 314 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 315 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 316 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B), 317 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, 319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, 320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790), 321 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, 324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)}, 325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, 326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, 327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, 328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, 329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, 330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791), 331 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795), 333 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, 335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, 336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)}, 337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)}, 338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)}, 339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)}, 340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)}, 341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)}, 342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)}, 343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)}, 344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)}, 345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)}, 346 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 347 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 348 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, 350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, 351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, 352 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, 353 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ 354 {} 355 }; 356 357 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); 358 359 static const struct { 360 const char string[ETH_GSTRING_LEN]; 361 } ethtool_stats_keys[] = { 362 { "rx_octets" }, 363 { "rx_fragments" }, 364 { "rx_ucast_packets" }, 365 { "rx_mcast_packets" }, 366 { "rx_bcast_packets" }, 367 { "rx_fcs_errors" }, 368 { "rx_align_errors" }, 369 { "rx_xon_pause_rcvd" }, 370 { "rx_xoff_pause_rcvd" }, 371 { "rx_mac_ctrl_rcvd" }, 372 { "rx_xoff_entered" }, 373 { "rx_frame_too_long_errors" }, 374 { "rx_jabbers" }, 375 { "rx_undersize_packets" }, 376 { "rx_in_length_errors" }, 377 { "rx_out_length_errors" }, 378 { "rx_64_or_less_octet_packets" }, 379 { "rx_65_to_127_octet_packets" }, 380 { "rx_128_to_255_octet_packets" }, 381 { "rx_256_to_511_octet_packets" }, 382 { "rx_512_to_1023_octet_packets" }, 383 { "rx_1024_to_1522_octet_packets" }, 384 { "rx_1523_to_2047_octet_packets" }, 385 { "rx_2048_to_4095_octet_packets" }, 386 { "rx_4096_to_8191_octet_packets" }, 387 { "rx_8192_to_9022_octet_packets" }, 388 389 { "tx_octets" }, 390 { "tx_collisions" }, 391 392 { "tx_xon_sent" }, 393 { "tx_xoff_sent" }, 394 { "tx_flow_control" }, 395 { "tx_mac_errors" }, 396 { "tx_single_collisions" }, 397 { "tx_mult_collisions" }, 398 { "tx_deferred" }, 399 { "tx_excessive_collisions" }, 400 { "tx_late_collisions" }, 401 { "tx_collide_2times" }, 402 { "tx_collide_3times" }, 403 { "tx_collide_4times" }, 404 { "tx_collide_5times" }, 405 { "tx_collide_6times" }, 406 { "tx_collide_7times" }, 407 { "tx_collide_8times" }, 408 { "tx_collide_9times" }, 409 { "tx_collide_10times" }, 410 { "tx_collide_11times" }, 411 { "tx_collide_12times" }, 412 { "tx_collide_13times" }, 413 { "tx_collide_14times" }, 414 { "tx_collide_15times" }, 415 { "tx_ucast_packets" }, 416 { "tx_mcast_packets" }, 417 { "tx_bcast_packets" }, 418 { "tx_carrier_sense_errors" }, 419 { "tx_discards" }, 420 { "tx_errors" }, 421 422 { "dma_writeq_full" }, 423 { "dma_write_prioq_full" }, 424 { "rxbds_empty" }, 425 { "rx_discards" }, 426 { "rx_errors" }, 427 { "rx_threshold_hit" }, 428 429 { "dma_readq_full" }, 430 { "dma_read_prioq_full" }, 431 { "tx_comp_queue_full" }, 432 433 { "ring_set_send_prod_index" }, 434 { "ring_status_update" }, 435 { "nic_irqs" }, 436 { "nic_avoided_irqs" }, 437 { "nic_tx_threshold_hit" }, 438 439 { "mbuf_lwm_thresh_hit" }, 440 }; 441 442 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) 443 #define TG3_NVRAM_TEST 0 444 #define TG3_LINK_TEST 1 445 #define TG3_REGISTER_TEST 2 446 #define TG3_MEMORY_TEST 3 447 #define TG3_MAC_LOOPB_TEST 4 448 #define TG3_PHY_LOOPB_TEST 5 449 #define TG3_EXT_LOOPB_TEST 6 450 #define TG3_INTERRUPT_TEST 7 451 452 453 static const struct { 454 const char string[ETH_GSTRING_LEN]; 455 } ethtool_test_keys[] = { 456 [TG3_NVRAM_TEST] = { "nvram test (online) " }, 457 [TG3_LINK_TEST] = { "link test (online) " }, 458 [TG3_REGISTER_TEST] = { "register test (offline)" }, 459 [TG3_MEMORY_TEST] = { "memory test (offline)" }, 460 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" }, 461 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" }, 462 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" }, 463 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" }, 464 }; 465 466 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) 467 468 469 static void tg3_write32(struct tg3 *tp, u32 off, u32 val) 470 { 471 writel(val, tp->regs + off); 472 } 473 474 static u32 tg3_read32(struct tg3 *tp, u32 off) 475 { 476 return readl(tp->regs + off); 477 } 478 479 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) 480 { 481 writel(val, tp->aperegs + off); 482 } 483 484 static u32 tg3_ape_read32(struct tg3 *tp, u32 off) 485 { 486 return readl(tp->aperegs + off); 487 } 488 489 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 490 { 491 unsigned long flags; 492 493 spin_lock_irqsave(&tp->indirect_lock, flags); 494 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 495 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 496 spin_unlock_irqrestore(&tp->indirect_lock, flags); 497 } 498 499 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) 500 { 501 writel(val, tp->regs + off); 502 readl(tp->regs + off); 503 } 504 505 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) 506 { 507 unsigned long flags; 508 u32 val; 509 510 spin_lock_irqsave(&tp->indirect_lock, flags); 511 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 512 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 513 spin_unlock_irqrestore(&tp->indirect_lock, flags); 514 return val; 515 } 516 517 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) 518 { 519 unsigned long flags; 520 521 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { 522 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + 523 TG3_64BIT_REG_LOW, val); 524 return; 525 } 526 if (off == TG3_RX_STD_PROD_IDX_REG) { 527 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + 528 TG3_64BIT_REG_LOW, val); 529 return; 530 } 531 532 spin_lock_irqsave(&tp->indirect_lock, flags); 533 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 534 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 535 spin_unlock_irqrestore(&tp->indirect_lock, flags); 536 537 /* In indirect mode when disabling interrupts, we also need 538 * to clear the interrupt bit in the GRC local ctrl register. 539 */ 540 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && 541 (val == 0x1)) { 542 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, 543 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); 544 } 545 } 546 547 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) 548 { 549 unsigned long flags; 550 u32 val; 551 552 spin_lock_irqsave(&tp->indirect_lock, flags); 553 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 554 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 555 spin_unlock_irqrestore(&tp->indirect_lock, flags); 556 return val; 557 } 558 559 /* usec_wait specifies the wait time in usec when writing to certain registers 560 * where it is unsafe to read back the register without some delay. 561 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. 562 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. 563 */ 564 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) 565 { 566 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) 567 /* Non-posted methods */ 568 tp->write32(tp, off, val); 569 else { 570 /* Posted method */ 571 tg3_write32(tp, off, val); 572 if (usec_wait) 573 udelay(usec_wait); 574 tp->read32(tp, off); 575 } 576 /* Wait again after the read for the posted method to guarantee that 577 * the wait time is met. 578 */ 579 if (usec_wait) 580 udelay(usec_wait); 581 } 582 583 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) 584 { 585 tp->write32_mbox(tp, off, val); 586 if (tg3_flag(tp, FLUSH_POSTED_WRITES) || 587 (!tg3_flag(tp, MBOX_WRITE_REORDER) && 588 !tg3_flag(tp, ICH_WORKAROUND))) 589 tp->read32_mbox(tp, off); 590 } 591 592 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) 593 { 594 void __iomem *mbox = tp->regs + off; 595 writel(val, mbox); 596 if (tg3_flag(tp, TXD_MBOX_HWBUG)) 597 writel(val, mbox); 598 if (tg3_flag(tp, MBOX_WRITE_REORDER) || 599 tg3_flag(tp, FLUSH_POSTED_WRITES)) 600 readl(mbox); 601 } 602 603 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) 604 { 605 return readl(tp->regs + off + GRCMBOX_BASE); 606 } 607 608 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) 609 { 610 writel(val, tp->regs + off + GRCMBOX_BASE); 611 } 612 613 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) 614 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) 615 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) 616 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) 617 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg) 618 619 #define tw32(reg, val) tp->write32(tp, reg, val) 620 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) 621 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) 622 #define tr32(reg) tp->read32(tp, reg) 623 624 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 625 { 626 unsigned long flags; 627 628 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 629 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) 630 return; 631 632 spin_lock_irqsave(&tp->indirect_lock, flags); 633 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 634 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 635 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 636 637 /* Always leave this as zero. */ 638 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 639 } else { 640 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 641 tw32_f(TG3PCI_MEM_WIN_DATA, val); 642 643 /* Always leave this as zero. */ 644 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 645 } 646 spin_unlock_irqrestore(&tp->indirect_lock, flags); 647 } 648 649 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 650 { 651 unsigned long flags; 652 653 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 654 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { 655 *val = 0; 656 return; 657 } 658 659 spin_lock_irqsave(&tp->indirect_lock, flags); 660 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 661 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 662 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 663 664 /* Always leave this as zero. */ 665 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 666 } else { 667 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 668 *val = tr32(TG3PCI_MEM_WIN_DATA); 669 670 /* Always leave this as zero. */ 671 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 672 } 673 spin_unlock_irqrestore(&tp->indirect_lock, flags); 674 } 675 676 static void tg3_ape_lock_init(struct tg3 *tp) 677 { 678 int i; 679 u32 regbase, bit; 680 681 if (tg3_asic_rev(tp) == ASIC_REV_5761) 682 regbase = TG3_APE_LOCK_GRANT; 683 else 684 regbase = TG3_APE_PER_LOCK_GRANT; 685 686 /* Make sure the driver hasn't any stale locks. */ 687 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) { 688 switch (i) { 689 case TG3_APE_LOCK_PHY0: 690 case TG3_APE_LOCK_PHY1: 691 case TG3_APE_LOCK_PHY2: 692 case TG3_APE_LOCK_PHY3: 693 bit = APE_LOCK_GRANT_DRIVER; 694 break; 695 default: 696 if (!tp->pci_fn) 697 bit = APE_LOCK_GRANT_DRIVER; 698 else 699 bit = 1 << tp->pci_fn; 700 } 701 tg3_ape_write32(tp, regbase + 4 * i, bit); 702 } 703 704 } 705 706 static int tg3_ape_lock(struct tg3 *tp, int locknum) 707 { 708 int i, off; 709 int ret = 0; 710 u32 status, req, gnt, bit; 711 712 if (!tg3_flag(tp, ENABLE_APE)) 713 return 0; 714 715 switch (locknum) { 716 case TG3_APE_LOCK_GPIO: 717 if (tg3_asic_rev(tp) == ASIC_REV_5761) 718 return 0; 719 fallthrough; 720 case TG3_APE_LOCK_GRC: 721 case TG3_APE_LOCK_MEM: 722 if (!tp->pci_fn) 723 bit = APE_LOCK_REQ_DRIVER; 724 else 725 bit = 1 << tp->pci_fn; 726 break; 727 case TG3_APE_LOCK_PHY0: 728 case TG3_APE_LOCK_PHY1: 729 case TG3_APE_LOCK_PHY2: 730 case TG3_APE_LOCK_PHY3: 731 bit = APE_LOCK_REQ_DRIVER; 732 break; 733 default: 734 return -EINVAL; 735 } 736 737 if (tg3_asic_rev(tp) == ASIC_REV_5761) { 738 req = TG3_APE_LOCK_REQ; 739 gnt = TG3_APE_LOCK_GRANT; 740 } else { 741 req = TG3_APE_PER_LOCK_REQ; 742 gnt = TG3_APE_PER_LOCK_GRANT; 743 } 744 745 off = 4 * locknum; 746 747 tg3_ape_write32(tp, req + off, bit); 748 749 /* Wait for up to 1 millisecond to acquire lock. */ 750 for (i = 0; i < 100; i++) { 751 status = tg3_ape_read32(tp, gnt + off); 752 if (status == bit) 753 break; 754 if (pci_channel_offline(tp->pdev)) 755 break; 756 757 udelay(10); 758 } 759 760 if (status != bit) { 761 /* Revoke the lock request. */ 762 tg3_ape_write32(tp, gnt + off, bit); 763 ret = -EBUSY; 764 } 765 766 return ret; 767 } 768 769 static void tg3_ape_unlock(struct tg3 *tp, int locknum) 770 { 771 u32 gnt, bit; 772 773 if (!tg3_flag(tp, ENABLE_APE)) 774 return; 775 776 switch (locknum) { 777 case TG3_APE_LOCK_GPIO: 778 if (tg3_asic_rev(tp) == ASIC_REV_5761) 779 return; 780 fallthrough; 781 case TG3_APE_LOCK_GRC: 782 case TG3_APE_LOCK_MEM: 783 if (!tp->pci_fn) 784 bit = APE_LOCK_GRANT_DRIVER; 785 else 786 bit = 1 << tp->pci_fn; 787 break; 788 case TG3_APE_LOCK_PHY0: 789 case TG3_APE_LOCK_PHY1: 790 case TG3_APE_LOCK_PHY2: 791 case TG3_APE_LOCK_PHY3: 792 bit = APE_LOCK_GRANT_DRIVER; 793 break; 794 default: 795 return; 796 } 797 798 if (tg3_asic_rev(tp) == ASIC_REV_5761) 799 gnt = TG3_APE_LOCK_GRANT; 800 else 801 gnt = TG3_APE_PER_LOCK_GRANT; 802 803 tg3_ape_write32(tp, gnt + 4 * locknum, bit); 804 } 805 806 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us) 807 { 808 u32 apedata; 809 810 while (timeout_us) { 811 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) 812 return -EBUSY; 813 814 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 815 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 816 break; 817 818 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 819 820 udelay(10); 821 timeout_us -= (timeout_us > 10) ? 10 : timeout_us; 822 } 823 824 return timeout_us ? 0 : -EBUSY; 825 } 826 827 #ifdef CONFIG_TIGON3_HWMON 828 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us) 829 { 830 u32 i, apedata; 831 832 for (i = 0; i < timeout_us / 10; i++) { 833 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 834 835 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 836 break; 837 838 udelay(10); 839 } 840 841 return i == timeout_us / 10; 842 } 843 844 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, 845 u32 len) 846 { 847 int err; 848 u32 i, bufoff, msgoff, maxlen, apedata; 849 850 if (!tg3_flag(tp, APE_HAS_NCSI)) 851 return 0; 852 853 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 854 if (apedata != APE_SEG_SIG_MAGIC) 855 return -ENODEV; 856 857 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 858 if (!(apedata & APE_FW_STATUS_READY)) 859 return -EAGAIN; 860 861 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) + 862 TG3_APE_SHMEM_BASE; 863 msgoff = bufoff + 2 * sizeof(u32); 864 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN); 865 866 while (len) { 867 u32 length; 868 869 /* Cap xfer sizes to scratchpad limits. */ 870 length = (len > maxlen) ? maxlen : len; 871 len -= length; 872 873 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 874 if (!(apedata & APE_FW_STATUS_READY)) 875 return -EAGAIN; 876 877 /* Wait for up to 1 msec for APE to service previous event. */ 878 err = tg3_ape_event_lock(tp, 1000); 879 if (err) 880 return err; 881 882 apedata = APE_EVENT_STATUS_DRIVER_EVNT | 883 APE_EVENT_STATUS_SCRTCHPD_READ | 884 APE_EVENT_STATUS_EVENT_PENDING; 885 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata); 886 887 tg3_ape_write32(tp, bufoff, base_off); 888 tg3_ape_write32(tp, bufoff + sizeof(u32), length); 889 890 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 891 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 892 893 base_off += length; 894 895 if (tg3_ape_wait_for_event(tp, 30000)) 896 return -EAGAIN; 897 898 for (i = 0; length; i += 4, length -= 4) { 899 u32 val = tg3_ape_read32(tp, msgoff + i); 900 memcpy(data, &val, sizeof(u32)); 901 data++; 902 } 903 } 904 905 return 0; 906 } 907 #endif 908 909 static int tg3_ape_send_event(struct tg3 *tp, u32 event) 910 { 911 int err; 912 u32 apedata; 913 914 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 915 if (apedata != APE_SEG_SIG_MAGIC) 916 return -EAGAIN; 917 918 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 919 if (!(apedata & APE_FW_STATUS_READY)) 920 return -EAGAIN; 921 922 /* Wait for up to 20 millisecond for APE to service previous event. */ 923 err = tg3_ape_event_lock(tp, 20000); 924 if (err) 925 return err; 926 927 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, 928 event | APE_EVENT_STATUS_EVENT_PENDING); 929 930 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 931 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 932 933 return 0; 934 } 935 936 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) 937 { 938 u32 event; 939 u32 apedata; 940 941 if (!tg3_flag(tp, ENABLE_APE)) 942 return; 943 944 switch (kind) { 945 case RESET_KIND_INIT: 946 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); 947 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 948 APE_HOST_SEG_SIG_MAGIC); 949 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, 950 APE_HOST_SEG_LEN_MAGIC); 951 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); 952 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); 953 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, 954 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); 955 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, 956 APE_HOST_BEHAV_NO_PHYLOCK); 957 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, 958 TG3_APE_HOST_DRVR_STATE_START); 959 960 event = APE_EVENT_STATUS_STATE_START; 961 break; 962 case RESET_KIND_SHUTDOWN: 963 if (device_may_wakeup(&tp->pdev->dev) && 964 tg3_flag(tp, WOL_ENABLE)) { 965 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, 966 TG3_APE_HOST_WOL_SPEED_AUTO); 967 apedata = TG3_APE_HOST_DRVR_STATE_WOL; 968 } else 969 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; 970 971 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); 972 973 event = APE_EVENT_STATUS_STATE_UNLOAD; 974 break; 975 default: 976 return; 977 } 978 979 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; 980 981 tg3_ape_send_event(tp, event); 982 } 983 984 static void tg3_send_ape_heartbeat(struct tg3 *tp, 985 unsigned long interval) 986 { 987 /* Check if hb interval has exceeded */ 988 if (!tg3_flag(tp, ENABLE_APE) || 989 time_before(jiffies, tp->ape_hb_jiffies + interval)) 990 return; 991 992 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); 993 tp->ape_hb_jiffies = jiffies; 994 } 995 996 static void tg3_disable_ints(struct tg3 *tp) 997 { 998 int i; 999 1000 tw32(TG3PCI_MISC_HOST_CTRL, 1001 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); 1002 for (i = 0; i < tp->irq_max; i++) 1003 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); 1004 } 1005 1006 static void tg3_enable_ints(struct tg3 *tp) 1007 { 1008 int i; 1009 1010 tp->irq_sync = 0; 1011 wmb(); 1012 1013 tw32(TG3PCI_MISC_HOST_CTRL, 1014 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 1015 1016 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; 1017 for (i = 0; i < tp->irq_cnt; i++) { 1018 struct tg3_napi *tnapi = &tp->napi[i]; 1019 1020 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1021 if (tg3_flag(tp, 1SHOT_MSI)) 1022 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1023 1024 tp->coal_now |= tnapi->coal_now; 1025 } 1026 1027 /* Force an initial interrupt */ 1028 if (!tg3_flag(tp, TAGGED_STATUS) && 1029 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) 1030 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 1031 else 1032 tw32(HOSTCC_MODE, tp->coal_now); 1033 1034 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); 1035 } 1036 1037 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) 1038 { 1039 struct tg3 *tp = tnapi->tp; 1040 struct tg3_hw_status *sblk = tnapi->hw_status; 1041 unsigned int work_exists = 0; 1042 1043 /* check for phy events */ 1044 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 1045 if (sblk->status & SD_STATUS_LINK_CHG) 1046 work_exists = 1; 1047 } 1048 1049 /* check for TX work to do */ 1050 if (sblk->idx[0].tx_consumer != tnapi->tx_cons) 1051 work_exists = 1; 1052 1053 /* check for RX work to do */ 1054 if (tnapi->rx_rcb_prod_idx && 1055 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 1056 work_exists = 1; 1057 1058 return work_exists; 1059 } 1060 1061 /* tg3_int_reenable 1062 * similar to tg3_enable_ints, but it accurately determines whether there 1063 * is new work pending and can return without flushing the PIO write 1064 * which reenables interrupts 1065 */ 1066 static void tg3_int_reenable(struct tg3_napi *tnapi) 1067 { 1068 struct tg3 *tp = tnapi->tp; 1069 1070 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 1071 1072 /* When doing tagged status, this work check is unnecessary. 1073 * The last_tag we write above tells the chip which piece of 1074 * work we've completed. 1075 */ 1076 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) 1077 tw32(HOSTCC_MODE, tp->coalesce_mode | 1078 HOSTCC_MODE_ENABLE | tnapi->coal_now); 1079 } 1080 1081 static void tg3_switch_clocks(struct tg3 *tp) 1082 { 1083 u32 clock_ctrl; 1084 u32 orig_clock_ctrl; 1085 1086 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) 1087 return; 1088 1089 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); 1090 1091 orig_clock_ctrl = clock_ctrl; 1092 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | 1093 CLOCK_CTRL_CLKRUN_OENABLE | 1094 0x1f); 1095 tp->pci_clock_ctrl = clock_ctrl; 1096 1097 if (tg3_flag(tp, 5705_PLUS)) { 1098 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { 1099 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1100 clock_ctrl | CLOCK_CTRL_625_CORE, 40); 1101 } 1102 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { 1103 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1104 clock_ctrl | 1105 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), 1106 40); 1107 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1108 clock_ctrl | (CLOCK_CTRL_ALTCLK), 1109 40); 1110 } 1111 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); 1112 } 1113 1114 #define PHY_BUSY_LOOPS 5000 1115 1116 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg, 1117 u32 *val) 1118 { 1119 u32 frame_val; 1120 unsigned int loops; 1121 int ret; 1122 1123 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1124 tw32_f(MAC_MI_MODE, 1125 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1126 udelay(80); 1127 } 1128 1129 tg3_ape_lock(tp, tp->phy_ape_lock); 1130 1131 *val = 0x0; 1132 1133 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1134 MI_COM_PHY_ADDR_MASK); 1135 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1136 MI_COM_REG_ADDR_MASK); 1137 frame_val |= (MI_COM_CMD_READ | MI_COM_START); 1138 1139 tw32_f(MAC_MI_COM, frame_val); 1140 1141 loops = PHY_BUSY_LOOPS; 1142 while (loops != 0) { 1143 udelay(10); 1144 frame_val = tr32(MAC_MI_COM); 1145 1146 if ((frame_val & MI_COM_BUSY) == 0) { 1147 udelay(5); 1148 frame_val = tr32(MAC_MI_COM); 1149 break; 1150 } 1151 loops -= 1; 1152 } 1153 1154 ret = -EBUSY; 1155 if (loops != 0) { 1156 *val = frame_val & MI_COM_DATA_MASK; 1157 ret = 0; 1158 } 1159 1160 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1161 tw32_f(MAC_MI_MODE, tp->mi_mode); 1162 udelay(80); 1163 } 1164 1165 tg3_ape_unlock(tp, tp->phy_ape_lock); 1166 1167 return ret; 1168 } 1169 1170 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) 1171 { 1172 return __tg3_readphy(tp, tp->phy_addr, reg, val); 1173 } 1174 1175 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg, 1176 u32 val) 1177 { 1178 u32 frame_val; 1179 unsigned int loops; 1180 int ret; 1181 1182 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 1183 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) 1184 return 0; 1185 1186 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1187 tw32_f(MAC_MI_MODE, 1188 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1189 udelay(80); 1190 } 1191 1192 tg3_ape_lock(tp, tp->phy_ape_lock); 1193 1194 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1195 MI_COM_PHY_ADDR_MASK); 1196 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1197 MI_COM_REG_ADDR_MASK); 1198 frame_val |= (val & MI_COM_DATA_MASK); 1199 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); 1200 1201 tw32_f(MAC_MI_COM, frame_val); 1202 1203 loops = PHY_BUSY_LOOPS; 1204 while (loops != 0) { 1205 udelay(10); 1206 frame_val = tr32(MAC_MI_COM); 1207 if ((frame_val & MI_COM_BUSY) == 0) { 1208 udelay(5); 1209 frame_val = tr32(MAC_MI_COM); 1210 break; 1211 } 1212 loops -= 1; 1213 } 1214 1215 ret = -EBUSY; 1216 if (loops != 0) 1217 ret = 0; 1218 1219 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1220 tw32_f(MAC_MI_MODE, tp->mi_mode); 1221 udelay(80); 1222 } 1223 1224 tg3_ape_unlock(tp, tp->phy_ape_lock); 1225 1226 return ret; 1227 } 1228 1229 static int tg3_writephy(struct tg3 *tp, int reg, u32 val) 1230 { 1231 return __tg3_writephy(tp, tp->phy_addr, reg, val); 1232 } 1233 1234 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) 1235 { 1236 int err; 1237 1238 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1239 if (err) 1240 goto done; 1241 1242 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1243 if (err) 1244 goto done; 1245 1246 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1247 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1248 if (err) 1249 goto done; 1250 1251 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); 1252 1253 done: 1254 return err; 1255 } 1256 1257 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) 1258 { 1259 int err; 1260 1261 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1262 if (err) 1263 goto done; 1264 1265 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1266 if (err) 1267 goto done; 1268 1269 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1270 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1271 if (err) 1272 goto done; 1273 1274 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); 1275 1276 done: 1277 return err; 1278 } 1279 1280 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) 1281 { 1282 int err; 1283 1284 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1285 if (!err) 1286 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); 1287 1288 return err; 1289 } 1290 1291 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) 1292 { 1293 int err; 1294 1295 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1296 if (!err) 1297 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); 1298 1299 return err; 1300 } 1301 1302 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) 1303 { 1304 int err; 1305 1306 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 1307 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | 1308 MII_TG3_AUXCTL_SHDWSEL_MISC); 1309 if (!err) 1310 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); 1311 1312 return err; 1313 } 1314 1315 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) 1316 { 1317 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) 1318 set |= MII_TG3_AUXCTL_MISC_WREN; 1319 1320 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); 1321 } 1322 1323 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) 1324 { 1325 u32 val; 1326 int err; 1327 1328 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 1329 1330 if (err) 1331 return err; 1332 1333 if (enable) 1334 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1335 else 1336 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1337 1338 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 1339 val | MII_TG3_AUXCTL_ACTL_TX_6DB); 1340 1341 return err; 1342 } 1343 1344 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val) 1345 { 1346 return tg3_writephy(tp, MII_TG3_MISC_SHDW, 1347 reg | val | MII_TG3_MISC_SHDW_WREN); 1348 } 1349 1350 static int tg3_bmcr_reset(struct tg3 *tp) 1351 { 1352 u32 phy_control; 1353 int limit, err; 1354 1355 /* OK, reset it, and poll the BMCR_RESET bit until it 1356 * clears or we time out. 1357 */ 1358 phy_control = BMCR_RESET; 1359 err = tg3_writephy(tp, MII_BMCR, phy_control); 1360 if (err != 0) 1361 return -EBUSY; 1362 1363 limit = 5000; 1364 while (limit--) { 1365 err = tg3_readphy(tp, MII_BMCR, &phy_control); 1366 if (err != 0) 1367 return -EBUSY; 1368 1369 if ((phy_control & BMCR_RESET) == 0) { 1370 udelay(40); 1371 break; 1372 } 1373 udelay(10); 1374 } 1375 if (limit < 0) 1376 return -EBUSY; 1377 1378 return 0; 1379 } 1380 1381 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) 1382 { 1383 struct tg3 *tp = bp->priv; 1384 u32 val; 1385 1386 spin_lock_bh(&tp->lock); 1387 1388 if (__tg3_readphy(tp, mii_id, reg, &val)) 1389 val = -EIO; 1390 1391 spin_unlock_bh(&tp->lock); 1392 1393 return val; 1394 } 1395 1396 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) 1397 { 1398 struct tg3 *tp = bp->priv; 1399 u32 ret = 0; 1400 1401 spin_lock_bh(&tp->lock); 1402 1403 if (__tg3_writephy(tp, mii_id, reg, val)) 1404 ret = -EIO; 1405 1406 spin_unlock_bh(&tp->lock); 1407 1408 return ret; 1409 } 1410 1411 static void tg3_mdio_config_5785(struct tg3 *tp) 1412 { 1413 u32 val; 1414 struct phy_device *phydev; 1415 1416 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1417 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1418 case PHY_ID_BCM50610: 1419 case PHY_ID_BCM50610M: 1420 val = MAC_PHYCFG2_50610_LED_MODES; 1421 break; 1422 case PHY_ID_BCMAC131: 1423 val = MAC_PHYCFG2_AC131_LED_MODES; 1424 break; 1425 case PHY_ID_RTL8211C: 1426 val = MAC_PHYCFG2_RTL8211C_LED_MODES; 1427 break; 1428 case PHY_ID_RTL8201E: 1429 val = MAC_PHYCFG2_RTL8201E_LED_MODES; 1430 break; 1431 default: 1432 return; 1433 } 1434 1435 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { 1436 tw32(MAC_PHYCFG2, val); 1437 1438 val = tr32(MAC_PHYCFG1); 1439 val &= ~(MAC_PHYCFG1_RGMII_INT | 1440 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); 1441 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; 1442 tw32(MAC_PHYCFG1, val); 1443 1444 return; 1445 } 1446 1447 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) 1448 val |= MAC_PHYCFG2_EMODE_MASK_MASK | 1449 MAC_PHYCFG2_FMODE_MASK_MASK | 1450 MAC_PHYCFG2_GMODE_MASK_MASK | 1451 MAC_PHYCFG2_ACT_MASK_MASK | 1452 MAC_PHYCFG2_QUAL_MASK_MASK | 1453 MAC_PHYCFG2_INBAND_ENABLE; 1454 1455 tw32(MAC_PHYCFG2, val); 1456 1457 val = tr32(MAC_PHYCFG1); 1458 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | 1459 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); 1460 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1461 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1462 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; 1463 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1464 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; 1465 } 1466 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | 1467 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; 1468 tw32(MAC_PHYCFG1, val); 1469 1470 val = tr32(MAC_EXT_RGMII_MODE); 1471 val &= ~(MAC_RGMII_MODE_RX_INT_B | 1472 MAC_RGMII_MODE_RX_QUALITY | 1473 MAC_RGMII_MODE_RX_ACTIVITY | 1474 MAC_RGMII_MODE_RX_ENG_DET | 1475 MAC_RGMII_MODE_TX_ENABLE | 1476 MAC_RGMII_MODE_TX_LOWPWR | 1477 MAC_RGMII_MODE_TX_RESET); 1478 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1479 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1480 val |= MAC_RGMII_MODE_RX_INT_B | 1481 MAC_RGMII_MODE_RX_QUALITY | 1482 MAC_RGMII_MODE_RX_ACTIVITY | 1483 MAC_RGMII_MODE_RX_ENG_DET; 1484 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1485 val |= MAC_RGMII_MODE_TX_ENABLE | 1486 MAC_RGMII_MODE_TX_LOWPWR | 1487 MAC_RGMII_MODE_TX_RESET; 1488 } 1489 tw32(MAC_EXT_RGMII_MODE, val); 1490 } 1491 1492 static void tg3_mdio_start(struct tg3 *tp) 1493 { 1494 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; 1495 tw32_f(MAC_MI_MODE, tp->mi_mode); 1496 udelay(80); 1497 1498 if (tg3_flag(tp, MDIOBUS_INITED) && 1499 tg3_asic_rev(tp) == ASIC_REV_5785) 1500 tg3_mdio_config_5785(tp); 1501 } 1502 1503 static int tg3_mdio_init(struct tg3 *tp) 1504 { 1505 int i; 1506 u32 reg; 1507 struct phy_device *phydev; 1508 1509 if (tg3_flag(tp, 5717_PLUS)) { 1510 u32 is_serdes; 1511 1512 tp->phy_addr = tp->pci_fn + 1; 1513 1514 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) 1515 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; 1516 else 1517 is_serdes = tr32(TG3_CPMU_PHY_STRAP) & 1518 TG3_CPMU_PHY_STRAP_IS_SERDES; 1519 if (is_serdes) 1520 tp->phy_addr += 7; 1521 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) { 1522 int addr; 1523 1524 addr = ssb_gige_get_phyaddr(tp->pdev); 1525 if (addr < 0) 1526 return addr; 1527 tp->phy_addr = addr; 1528 } else 1529 tp->phy_addr = TG3_PHY_MII_ADDR; 1530 1531 tg3_mdio_start(tp); 1532 1533 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) 1534 return 0; 1535 1536 tp->mdio_bus = mdiobus_alloc(); 1537 if (tp->mdio_bus == NULL) 1538 return -ENOMEM; 1539 1540 tp->mdio_bus->name = "tg3 mdio bus"; 1541 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", 1542 (tp->pdev->bus->number << 8) | tp->pdev->devfn); 1543 tp->mdio_bus->priv = tp; 1544 tp->mdio_bus->parent = &tp->pdev->dev; 1545 tp->mdio_bus->read = &tg3_mdio_read; 1546 tp->mdio_bus->write = &tg3_mdio_write; 1547 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); 1548 1549 /* The bus registration will look for all the PHYs on the mdio bus. 1550 * Unfortunately, it does not ensure the PHY is powered up before 1551 * accessing the PHY ID registers. A chip reset is the 1552 * quickest way to bring the device back to an operational state.. 1553 */ 1554 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) 1555 tg3_bmcr_reset(tp); 1556 1557 i = mdiobus_register(tp->mdio_bus); 1558 if (i) { 1559 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); 1560 mdiobus_free(tp->mdio_bus); 1561 return i; 1562 } 1563 1564 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1565 1566 if (!phydev || !phydev->drv) { 1567 dev_warn(&tp->pdev->dev, "No PHY devices\n"); 1568 mdiobus_unregister(tp->mdio_bus); 1569 mdiobus_free(tp->mdio_bus); 1570 return -ENODEV; 1571 } 1572 1573 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1574 case PHY_ID_BCM57780: 1575 phydev->interface = PHY_INTERFACE_MODE_GMII; 1576 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1577 break; 1578 case PHY_ID_BCM50610: 1579 case PHY_ID_BCM50610M: 1580 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | 1581 PHY_BRCM_RX_REFCLK_UNUSED | 1582 PHY_BRCM_DIS_TXCRXC_NOENRGY | 1583 PHY_BRCM_AUTO_PWRDWN_ENABLE; 1584 fallthrough; 1585 case PHY_ID_RTL8211C: 1586 phydev->interface = PHY_INTERFACE_MODE_RGMII; 1587 break; 1588 case PHY_ID_RTL8201E: 1589 case PHY_ID_BCMAC131: 1590 phydev->interface = PHY_INTERFACE_MODE_MII; 1591 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1592 tp->phy_flags |= TG3_PHYFLG_IS_FET; 1593 break; 1594 } 1595 1596 tg3_flag_set(tp, MDIOBUS_INITED); 1597 1598 if (tg3_asic_rev(tp) == ASIC_REV_5785) 1599 tg3_mdio_config_5785(tp); 1600 1601 return 0; 1602 } 1603 1604 static void tg3_mdio_fini(struct tg3 *tp) 1605 { 1606 if (tg3_flag(tp, MDIOBUS_INITED)) { 1607 tg3_flag_clear(tp, MDIOBUS_INITED); 1608 mdiobus_unregister(tp->mdio_bus); 1609 mdiobus_free(tp->mdio_bus); 1610 } 1611 } 1612 1613 /* tp->lock is held. */ 1614 static inline void tg3_generate_fw_event(struct tg3 *tp) 1615 { 1616 u32 val; 1617 1618 val = tr32(GRC_RX_CPU_EVENT); 1619 val |= GRC_RX_CPU_DRIVER_EVENT; 1620 tw32_f(GRC_RX_CPU_EVENT, val); 1621 1622 tp->last_event_jiffies = jiffies; 1623 } 1624 1625 #define TG3_FW_EVENT_TIMEOUT_USEC 2500 1626 1627 /* tp->lock is held. */ 1628 static void tg3_wait_for_event_ack(struct tg3 *tp) 1629 { 1630 int i; 1631 unsigned int delay_cnt; 1632 long time_remain; 1633 1634 /* If enough time has passed, no wait is necessary. */ 1635 time_remain = (long)(tp->last_event_jiffies + 1 + 1636 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - 1637 (long)jiffies; 1638 if (time_remain < 0) 1639 return; 1640 1641 /* Check if we can shorten the wait time. */ 1642 delay_cnt = jiffies_to_usecs(time_remain); 1643 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) 1644 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; 1645 delay_cnt = (delay_cnt >> 3) + 1; 1646 1647 for (i = 0; i < delay_cnt; i++) { 1648 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) 1649 break; 1650 if (pci_channel_offline(tp->pdev)) 1651 break; 1652 1653 udelay(8); 1654 } 1655 } 1656 1657 /* tp->lock is held. */ 1658 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data) 1659 { 1660 u32 reg, val; 1661 1662 val = 0; 1663 if (!tg3_readphy(tp, MII_BMCR, ®)) 1664 val = reg << 16; 1665 if (!tg3_readphy(tp, MII_BMSR, ®)) 1666 val |= (reg & 0xffff); 1667 *data++ = val; 1668 1669 val = 0; 1670 if (!tg3_readphy(tp, MII_ADVERTISE, ®)) 1671 val = reg << 16; 1672 if (!tg3_readphy(tp, MII_LPA, ®)) 1673 val |= (reg & 0xffff); 1674 *data++ = val; 1675 1676 val = 0; 1677 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { 1678 if (!tg3_readphy(tp, MII_CTRL1000, ®)) 1679 val = reg << 16; 1680 if (!tg3_readphy(tp, MII_STAT1000, ®)) 1681 val |= (reg & 0xffff); 1682 } 1683 *data++ = val; 1684 1685 if (!tg3_readphy(tp, MII_PHYADDR, ®)) 1686 val = reg << 16; 1687 else 1688 val = 0; 1689 *data++ = val; 1690 } 1691 1692 /* tp->lock is held. */ 1693 static void tg3_ump_link_report(struct tg3 *tp) 1694 { 1695 u32 data[4]; 1696 1697 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) 1698 return; 1699 1700 tg3_phy_gather_ump_data(tp, data); 1701 1702 tg3_wait_for_event_ack(tp); 1703 1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); 1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); 1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]); 1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]); 1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]); 1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]); 1710 1711 tg3_generate_fw_event(tp); 1712 } 1713 1714 /* tp->lock is held. */ 1715 static void tg3_stop_fw(struct tg3 *tp) 1716 { 1717 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 1718 /* Wait for RX cpu to ACK the previous event. */ 1719 tg3_wait_for_event_ack(tp); 1720 1721 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); 1722 1723 tg3_generate_fw_event(tp); 1724 1725 /* Wait for RX cpu to ACK this event. */ 1726 tg3_wait_for_event_ack(tp); 1727 } 1728 } 1729 1730 /* tp->lock is held. */ 1731 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) 1732 { 1733 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, 1734 NIC_SRAM_FIRMWARE_MBOX_MAGIC1); 1735 1736 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1737 switch (kind) { 1738 case RESET_KIND_INIT: 1739 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1740 DRV_STATE_START); 1741 break; 1742 1743 case RESET_KIND_SHUTDOWN: 1744 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1745 DRV_STATE_UNLOAD); 1746 break; 1747 1748 case RESET_KIND_SUSPEND: 1749 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1750 DRV_STATE_SUSPEND); 1751 break; 1752 1753 default: 1754 break; 1755 } 1756 } 1757 } 1758 1759 /* tp->lock is held. */ 1760 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) 1761 { 1762 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1763 switch (kind) { 1764 case RESET_KIND_INIT: 1765 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1766 DRV_STATE_START_DONE); 1767 break; 1768 1769 case RESET_KIND_SHUTDOWN: 1770 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1771 DRV_STATE_UNLOAD_DONE); 1772 break; 1773 1774 default: 1775 break; 1776 } 1777 } 1778 } 1779 1780 /* tp->lock is held. */ 1781 static void tg3_write_sig_legacy(struct tg3 *tp, int kind) 1782 { 1783 if (tg3_flag(tp, ENABLE_ASF)) { 1784 switch (kind) { 1785 case RESET_KIND_INIT: 1786 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1787 DRV_STATE_START); 1788 break; 1789 1790 case RESET_KIND_SHUTDOWN: 1791 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1792 DRV_STATE_UNLOAD); 1793 break; 1794 1795 case RESET_KIND_SUSPEND: 1796 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1797 DRV_STATE_SUSPEND); 1798 break; 1799 1800 default: 1801 break; 1802 } 1803 } 1804 } 1805 1806 static int tg3_poll_fw(struct tg3 *tp) 1807 { 1808 int i; 1809 u32 val; 1810 1811 if (tg3_flag(tp, NO_FWARE_REPORTED)) 1812 return 0; 1813 1814 if (tg3_flag(tp, IS_SSB_CORE)) { 1815 /* We don't use firmware. */ 1816 return 0; 1817 } 1818 1819 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 1820 /* Wait up to 20ms for init done. */ 1821 for (i = 0; i < 200; i++) { 1822 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) 1823 return 0; 1824 if (pci_channel_offline(tp->pdev)) 1825 return -ENODEV; 1826 1827 udelay(100); 1828 } 1829 return -ENODEV; 1830 } 1831 1832 /* Wait for firmware initialization to complete. */ 1833 for (i = 0; i < 100000; i++) { 1834 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); 1835 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 1836 break; 1837 if (pci_channel_offline(tp->pdev)) { 1838 if (!tg3_flag(tp, NO_FWARE_REPORTED)) { 1839 tg3_flag_set(tp, NO_FWARE_REPORTED); 1840 netdev_info(tp->dev, "No firmware running\n"); 1841 } 1842 1843 break; 1844 } 1845 1846 udelay(10); 1847 } 1848 1849 /* Chip might not be fitted with firmware. Some Sun onboard 1850 * parts are configured like that. So don't signal the timeout 1851 * of the above loop as an error, but do report the lack of 1852 * running firmware once. 1853 */ 1854 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { 1855 tg3_flag_set(tp, NO_FWARE_REPORTED); 1856 1857 netdev_info(tp->dev, "No firmware running\n"); 1858 } 1859 1860 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 1861 /* The 57765 A0 needs a little more 1862 * time to do some important work. 1863 */ 1864 mdelay(10); 1865 } 1866 1867 return 0; 1868 } 1869 1870 static void tg3_link_report(struct tg3 *tp) 1871 { 1872 if (!netif_carrier_ok(tp->dev)) { 1873 netif_info(tp, link, tp->dev, "Link is down\n"); 1874 tg3_ump_link_report(tp); 1875 } else if (netif_msg_link(tp)) { 1876 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", 1877 (tp->link_config.active_speed == SPEED_1000 ? 1878 1000 : 1879 (tp->link_config.active_speed == SPEED_100 ? 1880 100 : 10)), 1881 (tp->link_config.active_duplex == DUPLEX_FULL ? 1882 "full" : "half")); 1883 1884 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", 1885 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? 1886 "on" : "off", 1887 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? 1888 "on" : "off"); 1889 1890 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 1891 netdev_info(tp->dev, "EEE is %s\n", 1892 tp->setlpicnt ? "enabled" : "disabled"); 1893 1894 tg3_ump_link_report(tp); 1895 } 1896 1897 tp->link_up = netif_carrier_ok(tp->dev); 1898 } 1899 1900 static u32 tg3_decode_flowctrl_1000T(u32 adv) 1901 { 1902 u32 flowctrl = 0; 1903 1904 if (adv & ADVERTISE_PAUSE_CAP) { 1905 flowctrl |= FLOW_CTRL_RX; 1906 if (!(adv & ADVERTISE_PAUSE_ASYM)) 1907 flowctrl |= FLOW_CTRL_TX; 1908 } else if (adv & ADVERTISE_PAUSE_ASYM) 1909 flowctrl |= FLOW_CTRL_TX; 1910 1911 return flowctrl; 1912 } 1913 1914 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) 1915 { 1916 u16 miireg; 1917 1918 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) 1919 miireg = ADVERTISE_1000XPAUSE; 1920 else if (flow_ctrl & FLOW_CTRL_TX) 1921 miireg = ADVERTISE_1000XPSE_ASYM; 1922 else if (flow_ctrl & FLOW_CTRL_RX) 1923 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; 1924 else 1925 miireg = 0; 1926 1927 return miireg; 1928 } 1929 1930 static u32 tg3_decode_flowctrl_1000X(u32 adv) 1931 { 1932 u32 flowctrl = 0; 1933 1934 if (adv & ADVERTISE_1000XPAUSE) { 1935 flowctrl |= FLOW_CTRL_RX; 1936 if (!(adv & ADVERTISE_1000XPSE_ASYM)) 1937 flowctrl |= FLOW_CTRL_TX; 1938 } else if (adv & ADVERTISE_1000XPSE_ASYM) 1939 flowctrl |= FLOW_CTRL_TX; 1940 1941 return flowctrl; 1942 } 1943 1944 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) 1945 { 1946 u8 cap = 0; 1947 1948 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) { 1949 cap = FLOW_CTRL_TX | FLOW_CTRL_RX; 1950 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) { 1951 if (lcladv & ADVERTISE_1000XPAUSE) 1952 cap = FLOW_CTRL_RX; 1953 if (rmtadv & ADVERTISE_1000XPAUSE) 1954 cap = FLOW_CTRL_TX; 1955 } 1956 1957 return cap; 1958 } 1959 1960 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) 1961 { 1962 u8 autoneg; 1963 u8 flowctrl = 0; 1964 u32 old_rx_mode = tp->rx_mode; 1965 u32 old_tx_mode = tp->tx_mode; 1966 1967 if (tg3_flag(tp, USE_PHYLIB)) 1968 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg; 1969 else 1970 autoneg = tp->link_config.autoneg; 1971 1972 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { 1973 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 1974 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); 1975 else 1976 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 1977 } else 1978 flowctrl = tp->link_config.flowctrl; 1979 1980 tp->link_config.active_flowctrl = flowctrl; 1981 1982 if (flowctrl & FLOW_CTRL_RX) 1983 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; 1984 else 1985 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; 1986 1987 if (old_rx_mode != tp->rx_mode) 1988 tw32_f(MAC_RX_MODE, tp->rx_mode); 1989 1990 if (flowctrl & FLOW_CTRL_TX) 1991 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; 1992 else 1993 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; 1994 1995 if (old_tx_mode != tp->tx_mode) 1996 tw32_f(MAC_TX_MODE, tp->tx_mode); 1997 } 1998 1999 static void tg3_adjust_link(struct net_device *dev) 2000 { 2001 u8 oldflowctrl, linkmesg = 0; 2002 u32 mac_mode, lcl_adv, rmt_adv; 2003 struct tg3 *tp = netdev_priv(dev); 2004 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2005 2006 spin_lock_bh(&tp->lock); 2007 2008 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | 2009 MAC_MODE_HALF_DUPLEX); 2010 2011 oldflowctrl = tp->link_config.active_flowctrl; 2012 2013 if (phydev->link) { 2014 lcl_adv = 0; 2015 rmt_adv = 0; 2016 2017 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) 2018 mac_mode |= MAC_MODE_PORT_MODE_MII; 2019 else if (phydev->speed == SPEED_1000 || 2020 tg3_asic_rev(tp) != ASIC_REV_5785) 2021 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2022 else 2023 mac_mode |= MAC_MODE_PORT_MODE_MII; 2024 2025 if (phydev->duplex == DUPLEX_HALF) 2026 mac_mode |= MAC_MODE_HALF_DUPLEX; 2027 else { 2028 lcl_adv = mii_advertise_flowctrl( 2029 tp->link_config.flowctrl); 2030 2031 if (phydev->pause) 2032 rmt_adv = LPA_PAUSE_CAP; 2033 if (phydev->asym_pause) 2034 rmt_adv |= LPA_PAUSE_ASYM; 2035 } 2036 2037 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 2038 } else 2039 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2040 2041 if (mac_mode != tp->mac_mode) { 2042 tp->mac_mode = mac_mode; 2043 tw32_f(MAC_MODE, tp->mac_mode); 2044 udelay(40); 2045 } 2046 2047 if (tg3_asic_rev(tp) == ASIC_REV_5785) { 2048 if (phydev->speed == SPEED_10) 2049 tw32(MAC_MI_STAT, 2050 MAC_MI_STAT_10MBPS_MODE | 2051 MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2052 else 2053 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2054 } 2055 2056 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) 2057 tw32(MAC_TX_LENGTHS, 2058 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2059 (6 << TX_LENGTHS_IPG_SHIFT) | 2060 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); 2061 else 2062 tw32(MAC_TX_LENGTHS, 2063 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2064 (6 << TX_LENGTHS_IPG_SHIFT) | 2065 (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); 2066 2067 if (phydev->link != tp->old_link || 2068 phydev->speed != tp->link_config.active_speed || 2069 phydev->duplex != tp->link_config.active_duplex || 2070 oldflowctrl != tp->link_config.active_flowctrl) 2071 linkmesg = 1; 2072 2073 tp->old_link = phydev->link; 2074 tp->link_config.active_speed = phydev->speed; 2075 tp->link_config.active_duplex = phydev->duplex; 2076 2077 spin_unlock_bh(&tp->lock); 2078 2079 if (linkmesg) 2080 tg3_link_report(tp); 2081 } 2082 2083 static int tg3_phy_init(struct tg3 *tp) 2084 { 2085 struct phy_device *phydev; 2086 2087 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) 2088 return 0; 2089 2090 /* Bring the PHY back to a known state. */ 2091 tg3_bmcr_reset(tp); 2092 2093 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2094 2095 /* Attach the MAC to the PHY. */ 2096 phydev = phy_connect(tp->dev, phydev_name(phydev), 2097 tg3_adjust_link, phydev->interface); 2098 if (IS_ERR(phydev)) { 2099 dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); 2100 return PTR_ERR(phydev); 2101 } 2102 2103 /* Mask with MAC supported features. */ 2104 switch (phydev->interface) { 2105 case PHY_INTERFACE_MODE_GMII: 2106 case PHY_INTERFACE_MODE_RGMII: 2107 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 2108 phy_set_max_speed(phydev, SPEED_1000); 2109 phy_support_asym_pause(phydev); 2110 break; 2111 } 2112 fallthrough; 2113 case PHY_INTERFACE_MODE_MII: 2114 phy_set_max_speed(phydev, SPEED_100); 2115 phy_support_asym_pause(phydev); 2116 break; 2117 default: 2118 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2119 return -EINVAL; 2120 } 2121 2122 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; 2123 2124 phy_attached_info(phydev); 2125 2126 return 0; 2127 } 2128 2129 static void tg3_phy_start(struct tg3 *tp) 2130 { 2131 struct phy_device *phydev; 2132 2133 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2134 return; 2135 2136 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2137 2138 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 2139 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 2140 phydev->speed = tp->link_config.speed; 2141 phydev->duplex = tp->link_config.duplex; 2142 phydev->autoneg = tp->link_config.autoneg; 2143 ethtool_convert_legacy_u32_to_link_mode( 2144 phydev->advertising, tp->link_config.advertising); 2145 } 2146 2147 phy_start(phydev); 2148 2149 phy_start_aneg(phydev); 2150 } 2151 2152 static void tg3_phy_stop(struct tg3 *tp) 2153 { 2154 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2155 return; 2156 2157 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2158 } 2159 2160 static void tg3_phy_fini(struct tg3 *tp) 2161 { 2162 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 2163 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2164 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; 2165 } 2166 } 2167 2168 static int tg3_phy_set_extloopbk(struct tg3 *tp) 2169 { 2170 int err; 2171 u32 val; 2172 2173 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 2174 return 0; 2175 2176 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2177 /* Cannot do read-modify-write on 5401 */ 2178 err = tg3_phy_auxctl_write(tp, 2179 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2180 MII_TG3_AUXCTL_ACTL_EXTLOOPBK | 2181 0x4c20); 2182 goto done; 2183 } 2184 2185 err = tg3_phy_auxctl_read(tp, 2186 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2187 if (err) 2188 return err; 2189 2190 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK; 2191 err = tg3_phy_auxctl_write(tp, 2192 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val); 2193 2194 done: 2195 return err; 2196 } 2197 2198 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) 2199 { 2200 u32 phytest; 2201 2202 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 2203 u32 phy; 2204 2205 tg3_writephy(tp, MII_TG3_FET_TEST, 2206 phytest | MII_TG3_FET_SHADOW_EN); 2207 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { 2208 if (enable) 2209 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; 2210 else 2211 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; 2212 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); 2213 } 2214 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 2215 } 2216 } 2217 2218 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) 2219 { 2220 u32 reg; 2221 2222 if (!tg3_flag(tp, 5705_PLUS) || 2223 (tg3_flag(tp, 5717_PLUS) && 2224 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) 2225 return; 2226 2227 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2228 tg3_phy_fet_toggle_apd(tp, enable); 2229 return; 2230 } 2231 2232 reg = MII_TG3_MISC_SHDW_SCR5_LPED | 2233 MII_TG3_MISC_SHDW_SCR5_DLPTLM | 2234 MII_TG3_MISC_SHDW_SCR5_SDTL | 2235 MII_TG3_MISC_SHDW_SCR5_C125OE; 2236 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable) 2237 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; 2238 2239 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg); 2240 2241 2242 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS; 2243 if (enable) 2244 reg |= MII_TG3_MISC_SHDW_APD_ENABLE; 2245 2246 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg); 2247 } 2248 2249 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable) 2250 { 2251 u32 phy; 2252 2253 if (!tg3_flag(tp, 5705_PLUS) || 2254 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 2255 return; 2256 2257 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2258 u32 ephy; 2259 2260 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { 2261 u32 reg = MII_TG3_FET_SHDW_MISCCTRL; 2262 2263 tg3_writephy(tp, MII_TG3_FET_TEST, 2264 ephy | MII_TG3_FET_SHADOW_EN); 2265 if (!tg3_readphy(tp, reg, &phy)) { 2266 if (enable) 2267 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2268 else 2269 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2270 tg3_writephy(tp, reg, phy); 2271 } 2272 tg3_writephy(tp, MII_TG3_FET_TEST, ephy); 2273 } 2274 } else { 2275 int ret; 2276 2277 ret = tg3_phy_auxctl_read(tp, 2278 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); 2279 if (!ret) { 2280 if (enable) 2281 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2282 else 2283 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2284 tg3_phy_auxctl_write(tp, 2285 MII_TG3_AUXCTL_SHDWSEL_MISC, phy); 2286 } 2287 } 2288 } 2289 2290 static void tg3_phy_set_wirespeed(struct tg3 *tp) 2291 { 2292 int ret; 2293 u32 val; 2294 2295 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) 2296 return; 2297 2298 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); 2299 if (!ret) 2300 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, 2301 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); 2302 } 2303 2304 static void tg3_phy_apply_otp(struct tg3 *tp) 2305 { 2306 u32 otp, phy; 2307 2308 if (!tp->phy_otp) 2309 return; 2310 2311 otp = tp->phy_otp; 2312 2313 if (tg3_phy_toggle_auxctl_smdsp(tp, true)) 2314 return; 2315 2316 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); 2317 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; 2318 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); 2319 2320 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | 2321 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); 2322 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); 2323 2324 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); 2325 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; 2326 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); 2327 2328 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); 2329 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); 2330 2331 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); 2332 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); 2333 2334 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | 2335 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); 2336 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); 2337 2338 tg3_phy_toggle_auxctl_smdsp(tp, false); 2339 } 2340 2341 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee) 2342 { 2343 u32 val; 2344 struct ethtool_eee *dest = &tp->eee; 2345 2346 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2347 return; 2348 2349 if (eee) 2350 dest = eee; 2351 2352 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val)) 2353 return; 2354 2355 /* Pull eee_active */ 2356 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || 2357 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) { 2358 dest->eee_active = 1; 2359 } else 2360 dest->eee_active = 0; 2361 2362 /* Pull lp advertised settings */ 2363 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val)) 2364 return; 2365 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2366 2367 /* Pull advertised and eee_enabled settings */ 2368 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) 2369 return; 2370 dest->eee_enabled = !!val; 2371 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2372 2373 /* Pull tx_lpi_enabled */ 2374 val = tr32(TG3_CPMU_EEE_MODE); 2375 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX); 2376 2377 /* Pull lpi timer value */ 2378 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff; 2379 } 2380 2381 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) 2382 { 2383 u32 val; 2384 2385 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2386 return; 2387 2388 tp->setlpicnt = 0; 2389 2390 if (tp->link_config.autoneg == AUTONEG_ENABLE && 2391 current_link_up && 2392 tp->link_config.active_duplex == DUPLEX_FULL && 2393 (tp->link_config.active_speed == SPEED_100 || 2394 tp->link_config.active_speed == SPEED_1000)) { 2395 u32 eeectl; 2396 2397 if (tp->link_config.active_speed == SPEED_1000) 2398 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; 2399 else 2400 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; 2401 2402 tw32(TG3_CPMU_EEE_CTRL, eeectl); 2403 2404 tg3_eee_pull_config(tp, NULL); 2405 if (tp->eee.eee_active) 2406 tp->setlpicnt = 2; 2407 } 2408 2409 if (!tp->setlpicnt) { 2410 if (current_link_up && 2411 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2412 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); 2413 tg3_phy_toggle_auxctl_smdsp(tp, false); 2414 } 2415 2416 val = tr32(TG3_CPMU_EEE_MODE); 2417 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); 2418 } 2419 } 2420 2421 static void tg3_phy_eee_enable(struct tg3 *tp) 2422 { 2423 u32 val; 2424 2425 if (tp->link_config.active_speed == SPEED_1000 && 2426 (tg3_asic_rev(tp) == ASIC_REV_5717 || 2427 tg3_asic_rev(tp) == ASIC_REV_5719 || 2428 tg3_flag(tp, 57765_CLASS)) && 2429 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2430 val = MII_TG3_DSP_TAP26_ALNOKO | 2431 MII_TG3_DSP_TAP26_RMRXSTO; 2432 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 2433 tg3_phy_toggle_auxctl_smdsp(tp, false); 2434 } 2435 2436 val = tr32(TG3_CPMU_EEE_MODE); 2437 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); 2438 } 2439 2440 static int tg3_wait_macro_done(struct tg3 *tp) 2441 { 2442 int limit = 100; 2443 2444 while (limit--) { 2445 u32 tmp32; 2446 2447 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { 2448 if ((tmp32 & 0x1000) == 0) 2449 break; 2450 } 2451 } 2452 if (limit < 0) 2453 return -EBUSY; 2454 2455 return 0; 2456 } 2457 2458 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) 2459 { 2460 static const u32 test_pat[4][6] = { 2461 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, 2462 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, 2463 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, 2464 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } 2465 }; 2466 int chan; 2467 2468 for (chan = 0; chan < 4; chan++) { 2469 int i; 2470 2471 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2472 (chan * 0x2000) | 0x0200); 2473 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2474 2475 for (i = 0; i < 6; i++) 2476 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 2477 test_pat[chan][i]); 2478 2479 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2480 if (tg3_wait_macro_done(tp)) { 2481 *resetp = 1; 2482 return -EBUSY; 2483 } 2484 2485 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2486 (chan * 0x2000) | 0x0200); 2487 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); 2488 if (tg3_wait_macro_done(tp)) { 2489 *resetp = 1; 2490 return -EBUSY; 2491 } 2492 2493 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); 2494 if (tg3_wait_macro_done(tp)) { 2495 *resetp = 1; 2496 return -EBUSY; 2497 } 2498 2499 for (i = 0; i < 6; i += 2) { 2500 u32 low, high; 2501 2502 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || 2503 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || 2504 tg3_wait_macro_done(tp)) { 2505 *resetp = 1; 2506 return -EBUSY; 2507 } 2508 low &= 0x7fff; 2509 high &= 0x000f; 2510 if (low != test_pat[chan][i] || 2511 high != test_pat[chan][i+1]) { 2512 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); 2513 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); 2514 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); 2515 2516 return -EBUSY; 2517 } 2518 } 2519 } 2520 2521 return 0; 2522 } 2523 2524 static int tg3_phy_reset_chanpat(struct tg3 *tp) 2525 { 2526 int chan; 2527 2528 for (chan = 0; chan < 4; chan++) { 2529 int i; 2530 2531 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2532 (chan * 0x2000) | 0x0200); 2533 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2534 for (i = 0; i < 6; i++) 2535 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); 2536 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2537 if (tg3_wait_macro_done(tp)) 2538 return -EBUSY; 2539 } 2540 2541 return 0; 2542 } 2543 2544 static int tg3_phy_reset_5703_4_5(struct tg3 *tp) 2545 { 2546 u32 reg32, phy9_orig; 2547 int retries, do_phy_reset, err; 2548 2549 retries = 10; 2550 do_phy_reset = 1; 2551 do { 2552 if (do_phy_reset) { 2553 err = tg3_bmcr_reset(tp); 2554 if (err) 2555 return err; 2556 do_phy_reset = 0; 2557 } 2558 2559 /* Disable transmitter and interrupt. */ 2560 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) 2561 continue; 2562 2563 reg32 |= 0x3000; 2564 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2565 2566 /* Set full-duplex, 1000 mbps. */ 2567 tg3_writephy(tp, MII_BMCR, 2568 BMCR_FULLDPLX | BMCR_SPEED1000); 2569 2570 /* Set to master mode. */ 2571 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) 2572 continue; 2573 2574 tg3_writephy(tp, MII_CTRL1000, 2575 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 2576 2577 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 2578 if (err) 2579 return err; 2580 2581 /* Block the PHY control access. */ 2582 tg3_phydsp_write(tp, 0x8005, 0x0800); 2583 2584 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); 2585 if (!err) 2586 break; 2587 } while (--retries); 2588 2589 err = tg3_phy_reset_chanpat(tp); 2590 if (err) 2591 return err; 2592 2593 tg3_phydsp_write(tp, 0x8005, 0x0000); 2594 2595 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); 2596 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); 2597 2598 tg3_phy_toggle_auxctl_smdsp(tp, false); 2599 2600 tg3_writephy(tp, MII_CTRL1000, phy9_orig); 2601 2602 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32); 2603 if (err) 2604 return err; 2605 2606 reg32 &= ~0x3000; 2607 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2608 2609 return 0; 2610 } 2611 2612 static void tg3_carrier_off(struct tg3 *tp) 2613 { 2614 netif_carrier_off(tp->dev); 2615 tp->link_up = false; 2616 } 2617 2618 static void tg3_warn_mgmt_link_flap(struct tg3 *tp) 2619 { 2620 if (tg3_flag(tp, ENABLE_ASF)) 2621 netdev_warn(tp->dev, 2622 "Management side-band traffic will be interrupted during phy settings change\n"); 2623 } 2624 2625 /* This will reset the tigon3 PHY if there is no valid 2626 * link unless the FORCE argument is non-zero. 2627 */ 2628 static int tg3_phy_reset(struct tg3 *tp) 2629 { 2630 u32 val, cpmuctrl; 2631 int err; 2632 2633 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2634 val = tr32(GRC_MISC_CFG); 2635 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); 2636 udelay(40); 2637 } 2638 err = tg3_readphy(tp, MII_BMSR, &val); 2639 err |= tg3_readphy(tp, MII_BMSR, &val); 2640 if (err != 0) 2641 return -EBUSY; 2642 2643 if (netif_running(tp->dev) && tp->link_up) { 2644 netif_carrier_off(tp->dev); 2645 tg3_link_report(tp); 2646 } 2647 2648 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 2649 tg3_asic_rev(tp) == ASIC_REV_5704 || 2650 tg3_asic_rev(tp) == ASIC_REV_5705) { 2651 err = tg3_phy_reset_5703_4_5(tp); 2652 if (err) 2653 return err; 2654 goto out; 2655 } 2656 2657 cpmuctrl = 0; 2658 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 2659 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 2660 cpmuctrl = tr32(TG3_CPMU_CTRL); 2661 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) 2662 tw32(TG3_CPMU_CTRL, 2663 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); 2664 } 2665 2666 err = tg3_bmcr_reset(tp); 2667 if (err) 2668 return err; 2669 2670 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { 2671 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; 2672 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); 2673 2674 tw32(TG3_CPMU_CTRL, cpmuctrl); 2675 } 2676 2677 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 2678 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 2679 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 2680 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == 2681 CPMU_LSPD_1000MB_MACCLK_12_5) { 2682 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 2683 udelay(40); 2684 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 2685 } 2686 } 2687 2688 if (tg3_flag(tp, 5717_PLUS) && 2689 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) 2690 return 0; 2691 2692 tg3_phy_apply_otp(tp); 2693 2694 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 2695 tg3_phy_toggle_apd(tp, true); 2696 else 2697 tg3_phy_toggle_apd(tp, false); 2698 2699 out: 2700 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && 2701 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2702 tg3_phydsp_write(tp, 0x201f, 0x2aaa); 2703 tg3_phydsp_write(tp, 0x000a, 0x0323); 2704 tg3_phy_toggle_auxctl_smdsp(tp, false); 2705 } 2706 2707 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { 2708 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2709 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2710 } 2711 2712 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { 2713 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2714 tg3_phydsp_write(tp, 0x000a, 0x310b); 2715 tg3_phydsp_write(tp, 0x201f, 0x9506); 2716 tg3_phydsp_write(tp, 0x401f, 0x14e2); 2717 tg3_phy_toggle_auxctl_smdsp(tp, false); 2718 } 2719 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { 2720 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2721 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 2722 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { 2723 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); 2724 tg3_writephy(tp, MII_TG3_TEST1, 2725 MII_TG3_TEST1_TRIM_EN | 0x4); 2726 } else 2727 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); 2728 2729 tg3_phy_toggle_auxctl_smdsp(tp, false); 2730 } 2731 } 2732 2733 /* Set Extended packet length bit (bit 14) on all chips that */ 2734 /* support jumbo frames */ 2735 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2736 /* Cannot do read-modify-write on 5401 */ 2737 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 2738 } else if (tg3_flag(tp, JUMBO_CAPABLE)) { 2739 /* Set bit 14 with read-modify-write to preserve other bits */ 2740 err = tg3_phy_auxctl_read(tp, 2741 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2742 if (!err) 2743 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2744 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); 2745 } 2746 2747 /* Set phy register 0x10 bit 0 to high fifo elasticity to support 2748 * jumbo frames transmission. 2749 */ 2750 if (tg3_flag(tp, JUMBO_CAPABLE)) { 2751 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) 2752 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2753 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); 2754 } 2755 2756 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2757 /* adjust output voltage */ 2758 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); 2759 } 2760 2761 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0) 2762 tg3_phydsp_write(tp, 0xffb, 0x4000); 2763 2764 tg3_phy_toggle_automdix(tp, true); 2765 tg3_phy_set_wirespeed(tp); 2766 return 0; 2767 } 2768 2769 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001 2770 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002 2771 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ 2772 TG3_GPIO_MSG_NEED_VAUX) 2773 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ 2774 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ 2775 (TG3_GPIO_MSG_DRVR_PRES << 4) | \ 2776 (TG3_GPIO_MSG_DRVR_PRES << 8) | \ 2777 (TG3_GPIO_MSG_DRVR_PRES << 12)) 2778 2779 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ 2780 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ 2781 (TG3_GPIO_MSG_NEED_VAUX << 4) | \ 2782 (TG3_GPIO_MSG_NEED_VAUX << 8) | \ 2783 (TG3_GPIO_MSG_NEED_VAUX << 12)) 2784 2785 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) 2786 { 2787 u32 status, shift; 2788 2789 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2790 tg3_asic_rev(tp) == ASIC_REV_5719) 2791 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); 2792 else 2793 status = tr32(TG3_CPMU_DRV_STATUS); 2794 2795 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; 2796 status &= ~(TG3_GPIO_MSG_MASK << shift); 2797 status |= (newstat << shift); 2798 2799 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2800 tg3_asic_rev(tp) == ASIC_REV_5719) 2801 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); 2802 else 2803 tw32(TG3_CPMU_DRV_STATUS, status); 2804 2805 return status >> TG3_APE_GPIO_MSG_SHIFT; 2806 } 2807 2808 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) 2809 { 2810 if (!tg3_flag(tp, IS_NIC)) 2811 return 0; 2812 2813 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2814 tg3_asic_rev(tp) == ASIC_REV_5719 || 2815 tg3_asic_rev(tp) == ASIC_REV_5720) { 2816 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2817 return -EIO; 2818 2819 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); 2820 2821 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2822 TG3_GRC_LCLCTL_PWRSW_DELAY); 2823 2824 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2825 } else { 2826 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2827 TG3_GRC_LCLCTL_PWRSW_DELAY); 2828 } 2829 2830 return 0; 2831 } 2832 2833 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) 2834 { 2835 u32 grc_local_ctrl; 2836 2837 if (!tg3_flag(tp, IS_NIC) || 2838 tg3_asic_rev(tp) == ASIC_REV_5700 || 2839 tg3_asic_rev(tp) == ASIC_REV_5701) 2840 return; 2841 2842 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; 2843 2844 tw32_wait_f(GRC_LOCAL_CTRL, 2845 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2846 TG3_GRC_LCLCTL_PWRSW_DELAY); 2847 2848 tw32_wait_f(GRC_LOCAL_CTRL, 2849 grc_local_ctrl, 2850 TG3_GRC_LCLCTL_PWRSW_DELAY); 2851 2852 tw32_wait_f(GRC_LOCAL_CTRL, 2853 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2854 TG3_GRC_LCLCTL_PWRSW_DELAY); 2855 } 2856 2857 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) 2858 { 2859 if (!tg3_flag(tp, IS_NIC)) 2860 return; 2861 2862 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 2863 tg3_asic_rev(tp) == ASIC_REV_5701) { 2864 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2865 (GRC_LCLCTRL_GPIO_OE0 | 2866 GRC_LCLCTRL_GPIO_OE1 | 2867 GRC_LCLCTRL_GPIO_OE2 | 2868 GRC_LCLCTRL_GPIO_OUTPUT0 | 2869 GRC_LCLCTRL_GPIO_OUTPUT1), 2870 TG3_GRC_LCLCTL_PWRSW_DELAY); 2871 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 2872 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 2873 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ 2874 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | 2875 GRC_LCLCTRL_GPIO_OE1 | 2876 GRC_LCLCTRL_GPIO_OE2 | 2877 GRC_LCLCTRL_GPIO_OUTPUT0 | 2878 GRC_LCLCTRL_GPIO_OUTPUT1 | 2879 tp->grc_local_ctrl; 2880 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2881 TG3_GRC_LCLCTL_PWRSW_DELAY); 2882 2883 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; 2884 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2885 TG3_GRC_LCLCTL_PWRSW_DELAY); 2886 2887 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; 2888 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2889 TG3_GRC_LCLCTL_PWRSW_DELAY); 2890 } else { 2891 u32 no_gpio2; 2892 u32 grc_local_ctrl = 0; 2893 2894 /* Workaround to prevent overdrawing Amps. */ 2895 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 2896 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 2897 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2898 grc_local_ctrl, 2899 TG3_GRC_LCLCTL_PWRSW_DELAY); 2900 } 2901 2902 /* On 5753 and variants, GPIO2 cannot be used. */ 2903 no_gpio2 = tp->nic_sram_data_cfg & 2904 NIC_SRAM_DATA_CFG_NO_GPIO2; 2905 2906 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 2907 GRC_LCLCTRL_GPIO_OE1 | 2908 GRC_LCLCTRL_GPIO_OE2 | 2909 GRC_LCLCTRL_GPIO_OUTPUT1 | 2910 GRC_LCLCTRL_GPIO_OUTPUT2; 2911 if (no_gpio2) { 2912 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | 2913 GRC_LCLCTRL_GPIO_OUTPUT2); 2914 } 2915 tw32_wait_f(GRC_LOCAL_CTRL, 2916 tp->grc_local_ctrl | grc_local_ctrl, 2917 TG3_GRC_LCLCTL_PWRSW_DELAY); 2918 2919 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; 2920 2921 tw32_wait_f(GRC_LOCAL_CTRL, 2922 tp->grc_local_ctrl | grc_local_ctrl, 2923 TG3_GRC_LCLCTL_PWRSW_DELAY); 2924 2925 if (!no_gpio2) { 2926 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; 2927 tw32_wait_f(GRC_LOCAL_CTRL, 2928 tp->grc_local_ctrl | grc_local_ctrl, 2929 TG3_GRC_LCLCTL_PWRSW_DELAY); 2930 } 2931 } 2932 } 2933 2934 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) 2935 { 2936 u32 msg = 0; 2937 2938 /* Serialize power state transitions */ 2939 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2940 return; 2941 2942 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) 2943 msg = TG3_GPIO_MSG_NEED_VAUX; 2944 2945 msg = tg3_set_function_status(tp, msg); 2946 2947 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) 2948 goto done; 2949 2950 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) 2951 tg3_pwrsrc_switch_to_vaux(tp); 2952 else 2953 tg3_pwrsrc_die_with_vmain(tp); 2954 2955 done: 2956 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2957 } 2958 2959 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) 2960 { 2961 bool need_vaux = false; 2962 2963 /* The GPIOs do something completely different on 57765. */ 2964 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS)) 2965 return; 2966 2967 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2968 tg3_asic_rev(tp) == ASIC_REV_5719 || 2969 tg3_asic_rev(tp) == ASIC_REV_5720) { 2970 tg3_frob_aux_power_5717(tp, include_wol ? 2971 tg3_flag(tp, WOL_ENABLE) != 0 : 0); 2972 return; 2973 } 2974 2975 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { 2976 struct net_device *dev_peer; 2977 2978 dev_peer = pci_get_drvdata(tp->pdev_peer); 2979 2980 /* remove_one() may have been run on the peer. */ 2981 if (dev_peer) { 2982 struct tg3 *tp_peer = netdev_priv(dev_peer); 2983 2984 if (tg3_flag(tp_peer, INIT_COMPLETE)) 2985 return; 2986 2987 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || 2988 tg3_flag(tp_peer, ENABLE_ASF)) 2989 need_vaux = true; 2990 } 2991 } 2992 2993 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || 2994 tg3_flag(tp, ENABLE_ASF)) 2995 need_vaux = true; 2996 2997 if (need_vaux) 2998 tg3_pwrsrc_switch_to_vaux(tp); 2999 else 3000 tg3_pwrsrc_die_with_vmain(tp); 3001 } 3002 3003 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) 3004 { 3005 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) 3006 return 1; 3007 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { 3008 if (speed != SPEED_10) 3009 return 1; 3010 } else if (speed == SPEED_10) 3011 return 1; 3012 3013 return 0; 3014 } 3015 3016 static bool tg3_phy_power_bug(struct tg3 *tp) 3017 { 3018 switch (tg3_asic_rev(tp)) { 3019 case ASIC_REV_5700: 3020 case ASIC_REV_5704: 3021 return true; 3022 case ASIC_REV_5780: 3023 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 3024 return true; 3025 return false; 3026 case ASIC_REV_5717: 3027 if (!tp->pci_fn) 3028 return true; 3029 return false; 3030 case ASIC_REV_5719: 3031 case ASIC_REV_5720: 3032 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 3033 !tp->pci_fn) 3034 return true; 3035 return false; 3036 } 3037 3038 return false; 3039 } 3040 3041 static bool tg3_phy_led_bug(struct tg3 *tp) 3042 { 3043 switch (tg3_asic_rev(tp)) { 3044 case ASIC_REV_5719: 3045 case ASIC_REV_5720: 3046 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 3047 !tp->pci_fn) 3048 return true; 3049 return false; 3050 } 3051 3052 return false; 3053 } 3054 3055 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) 3056 { 3057 u32 val; 3058 3059 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) 3060 return; 3061 3062 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 3063 if (tg3_asic_rev(tp) == ASIC_REV_5704) { 3064 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); 3065 u32 serdes_cfg = tr32(MAC_SERDES_CFG); 3066 3067 sg_dig_ctrl |= 3068 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; 3069 tw32(SG_DIG_CTRL, sg_dig_ctrl); 3070 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); 3071 } 3072 return; 3073 } 3074 3075 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3076 tg3_bmcr_reset(tp); 3077 val = tr32(GRC_MISC_CFG); 3078 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 3079 udelay(40); 3080 return; 3081 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 3082 u32 phytest; 3083 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 3084 u32 phy; 3085 3086 tg3_writephy(tp, MII_ADVERTISE, 0); 3087 tg3_writephy(tp, MII_BMCR, 3088 BMCR_ANENABLE | BMCR_ANRESTART); 3089 3090 tg3_writephy(tp, MII_TG3_FET_TEST, 3091 phytest | MII_TG3_FET_SHADOW_EN); 3092 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { 3093 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; 3094 tg3_writephy(tp, 3095 MII_TG3_FET_SHDW_AUXMODE4, 3096 phy); 3097 } 3098 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 3099 } 3100 return; 3101 } else if (do_low_power) { 3102 if (!tg3_phy_led_bug(tp)) 3103 tg3_writephy(tp, MII_TG3_EXT_CTRL, 3104 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 3105 3106 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | 3107 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | 3108 MII_TG3_AUXCTL_PCTL_VREG_11V; 3109 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); 3110 } 3111 3112 /* The PHY should not be powered down on some chips because 3113 * of bugs. 3114 */ 3115 if (tg3_phy_power_bug(tp)) 3116 return; 3117 3118 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 3119 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 3120 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 3121 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 3122 val |= CPMU_LSPD_1000MB_MACCLK_12_5; 3123 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 3124 } 3125 3126 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); 3127 } 3128 3129 /* tp->lock is held. */ 3130 static int tg3_nvram_lock(struct tg3 *tp) 3131 { 3132 if (tg3_flag(tp, NVRAM)) { 3133 int i; 3134 3135 if (tp->nvram_lock_cnt == 0) { 3136 tw32(NVRAM_SWARB, SWARB_REQ_SET1); 3137 for (i = 0; i < 8000; i++) { 3138 if (tr32(NVRAM_SWARB) & SWARB_GNT1) 3139 break; 3140 udelay(20); 3141 } 3142 if (i == 8000) { 3143 tw32(NVRAM_SWARB, SWARB_REQ_CLR1); 3144 return -ENODEV; 3145 } 3146 } 3147 tp->nvram_lock_cnt++; 3148 } 3149 return 0; 3150 } 3151 3152 /* tp->lock is held. */ 3153 static void tg3_nvram_unlock(struct tg3 *tp) 3154 { 3155 if (tg3_flag(tp, NVRAM)) { 3156 if (tp->nvram_lock_cnt > 0) 3157 tp->nvram_lock_cnt--; 3158 if (tp->nvram_lock_cnt == 0) 3159 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); 3160 } 3161 } 3162 3163 /* tp->lock is held. */ 3164 static void tg3_enable_nvram_access(struct tg3 *tp) 3165 { 3166 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3167 u32 nvaccess = tr32(NVRAM_ACCESS); 3168 3169 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); 3170 } 3171 } 3172 3173 /* tp->lock is held. */ 3174 static void tg3_disable_nvram_access(struct tg3 *tp) 3175 { 3176 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3177 u32 nvaccess = tr32(NVRAM_ACCESS); 3178 3179 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); 3180 } 3181 } 3182 3183 static int tg3_nvram_read_using_eeprom(struct tg3 *tp, 3184 u32 offset, u32 *val) 3185 { 3186 u32 tmp; 3187 int i; 3188 3189 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) 3190 return -EINVAL; 3191 3192 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | 3193 EEPROM_ADDR_DEVID_MASK | 3194 EEPROM_ADDR_READ); 3195 tw32(GRC_EEPROM_ADDR, 3196 tmp | 3197 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3198 ((offset << EEPROM_ADDR_ADDR_SHIFT) & 3199 EEPROM_ADDR_ADDR_MASK) | 3200 EEPROM_ADDR_READ | EEPROM_ADDR_START); 3201 3202 for (i = 0; i < 1000; i++) { 3203 tmp = tr32(GRC_EEPROM_ADDR); 3204 3205 if (tmp & EEPROM_ADDR_COMPLETE) 3206 break; 3207 msleep(1); 3208 } 3209 if (!(tmp & EEPROM_ADDR_COMPLETE)) 3210 return -EBUSY; 3211 3212 tmp = tr32(GRC_EEPROM_DATA); 3213 3214 /* 3215 * The data will always be opposite the native endian 3216 * format. Perform a blind byteswap to compensate. 3217 */ 3218 *val = swab32(tmp); 3219 3220 return 0; 3221 } 3222 3223 #define NVRAM_CMD_TIMEOUT 10000 3224 3225 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) 3226 { 3227 int i; 3228 3229 tw32(NVRAM_CMD, nvram_cmd); 3230 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { 3231 usleep_range(10, 40); 3232 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { 3233 udelay(10); 3234 break; 3235 } 3236 } 3237 3238 if (i == NVRAM_CMD_TIMEOUT) 3239 return -EBUSY; 3240 3241 return 0; 3242 } 3243 3244 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) 3245 { 3246 if (tg3_flag(tp, NVRAM) && 3247 tg3_flag(tp, NVRAM_BUFFERED) && 3248 tg3_flag(tp, FLASH) && 3249 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3250 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3251 3252 addr = ((addr / tp->nvram_pagesize) << 3253 ATMEL_AT45DB0X1B_PAGE_POS) + 3254 (addr % tp->nvram_pagesize); 3255 3256 return addr; 3257 } 3258 3259 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) 3260 { 3261 if (tg3_flag(tp, NVRAM) && 3262 tg3_flag(tp, NVRAM_BUFFERED) && 3263 tg3_flag(tp, FLASH) && 3264 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3265 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3266 3267 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * 3268 tp->nvram_pagesize) + 3269 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); 3270 3271 return addr; 3272 } 3273 3274 /* NOTE: Data read in from NVRAM is byteswapped according to 3275 * the byteswapping settings for all other register accesses. 3276 * tg3 devices are BE devices, so on a BE machine, the data 3277 * returned will be exactly as it is seen in NVRAM. On a LE 3278 * machine, the 32-bit value will be byteswapped. 3279 */ 3280 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) 3281 { 3282 int ret; 3283 3284 if (!tg3_flag(tp, NVRAM)) 3285 return tg3_nvram_read_using_eeprom(tp, offset, val); 3286 3287 offset = tg3_nvram_phys_addr(tp, offset); 3288 3289 if (offset > NVRAM_ADDR_MSK) 3290 return -EINVAL; 3291 3292 ret = tg3_nvram_lock(tp); 3293 if (ret) 3294 return ret; 3295 3296 tg3_enable_nvram_access(tp); 3297 3298 tw32(NVRAM_ADDR, offset); 3299 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | 3300 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); 3301 3302 if (ret == 0) 3303 *val = tr32(NVRAM_RDDATA); 3304 3305 tg3_disable_nvram_access(tp); 3306 3307 tg3_nvram_unlock(tp); 3308 3309 return ret; 3310 } 3311 3312 /* Ensures NVRAM data is in bytestream format. */ 3313 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) 3314 { 3315 u32 v; 3316 int res = tg3_nvram_read(tp, offset, &v); 3317 if (!res) 3318 *val = cpu_to_be32(v); 3319 return res; 3320 } 3321 3322 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, 3323 u32 offset, u32 len, u8 *buf) 3324 { 3325 int i, j, rc = 0; 3326 u32 val; 3327 3328 for (i = 0; i < len; i += 4) { 3329 u32 addr; 3330 __be32 data; 3331 3332 addr = offset + i; 3333 3334 memcpy(&data, buf + i, 4); 3335 3336 /* 3337 * The SEEPROM interface expects the data to always be opposite 3338 * the native endian format. We accomplish this by reversing 3339 * all the operations that would have been performed on the 3340 * data from a call to tg3_nvram_read_be32(). 3341 */ 3342 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); 3343 3344 val = tr32(GRC_EEPROM_ADDR); 3345 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); 3346 3347 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | 3348 EEPROM_ADDR_READ); 3349 tw32(GRC_EEPROM_ADDR, val | 3350 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3351 (addr & EEPROM_ADDR_ADDR_MASK) | 3352 EEPROM_ADDR_START | 3353 EEPROM_ADDR_WRITE); 3354 3355 for (j = 0; j < 1000; j++) { 3356 val = tr32(GRC_EEPROM_ADDR); 3357 3358 if (val & EEPROM_ADDR_COMPLETE) 3359 break; 3360 msleep(1); 3361 } 3362 if (!(val & EEPROM_ADDR_COMPLETE)) { 3363 rc = -EBUSY; 3364 break; 3365 } 3366 } 3367 3368 return rc; 3369 } 3370 3371 /* offset and length are dword aligned */ 3372 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, 3373 u8 *buf) 3374 { 3375 int ret = 0; 3376 u32 pagesize = tp->nvram_pagesize; 3377 u32 pagemask = pagesize - 1; 3378 u32 nvram_cmd; 3379 u8 *tmp; 3380 3381 tmp = kmalloc(pagesize, GFP_KERNEL); 3382 if (tmp == NULL) 3383 return -ENOMEM; 3384 3385 while (len) { 3386 int j; 3387 u32 phy_addr, page_off, size; 3388 3389 phy_addr = offset & ~pagemask; 3390 3391 for (j = 0; j < pagesize; j += 4) { 3392 ret = tg3_nvram_read_be32(tp, phy_addr + j, 3393 (__be32 *) (tmp + j)); 3394 if (ret) 3395 break; 3396 } 3397 if (ret) 3398 break; 3399 3400 page_off = offset & pagemask; 3401 size = pagesize; 3402 if (len < size) 3403 size = len; 3404 3405 len -= size; 3406 3407 memcpy(tmp + page_off, buf, size); 3408 3409 offset = offset + (pagesize - page_off); 3410 3411 tg3_enable_nvram_access(tp); 3412 3413 /* 3414 * Before we can erase the flash page, we need 3415 * to issue a special "write enable" command. 3416 */ 3417 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3418 3419 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3420 break; 3421 3422 /* Erase the target page */ 3423 tw32(NVRAM_ADDR, phy_addr); 3424 3425 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | 3426 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; 3427 3428 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3429 break; 3430 3431 /* Issue another write enable to start the write. */ 3432 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3433 3434 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3435 break; 3436 3437 for (j = 0; j < pagesize; j += 4) { 3438 __be32 data; 3439 3440 data = *((__be32 *) (tmp + j)); 3441 3442 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3443 3444 tw32(NVRAM_ADDR, phy_addr + j); 3445 3446 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | 3447 NVRAM_CMD_WR; 3448 3449 if (j == 0) 3450 nvram_cmd |= NVRAM_CMD_FIRST; 3451 else if (j == (pagesize - 4)) 3452 nvram_cmd |= NVRAM_CMD_LAST; 3453 3454 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3455 if (ret) 3456 break; 3457 } 3458 if (ret) 3459 break; 3460 } 3461 3462 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3463 tg3_nvram_exec_cmd(tp, nvram_cmd); 3464 3465 kfree(tmp); 3466 3467 return ret; 3468 } 3469 3470 /* offset and length are dword aligned */ 3471 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, 3472 u8 *buf) 3473 { 3474 int i, ret = 0; 3475 3476 for (i = 0; i < len; i += 4, offset += 4) { 3477 u32 page_off, phy_addr, nvram_cmd; 3478 __be32 data; 3479 3480 memcpy(&data, buf + i, 4); 3481 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3482 3483 page_off = offset % tp->nvram_pagesize; 3484 3485 phy_addr = tg3_nvram_phys_addr(tp, offset); 3486 3487 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; 3488 3489 if (page_off == 0 || i == 0) 3490 nvram_cmd |= NVRAM_CMD_FIRST; 3491 if (page_off == (tp->nvram_pagesize - 4)) 3492 nvram_cmd |= NVRAM_CMD_LAST; 3493 3494 if (i == (len - 4)) 3495 nvram_cmd |= NVRAM_CMD_LAST; 3496 3497 if ((nvram_cmd & NVRAM_CMD_FIRST) || 3498 !tg3_flag(tp, FLASH) || 3499 !tg3_flag(tp, 57765_PLUS)) 3500 tw32(NVRAM_ADDR, phy_addr); 3501 3502 if (tg3_asic_rev(tp) != ASIC_REV_5752 && 3503 !tg3_flag(tp, 5755_PLUS) && 3504 (tp->nvram_jedecnum == JEDEC_ST) && 3505 (nvram_cmd & NVRAM_CMD_FIRST)) { 3506 u32 cmd; 3507 3508 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3509 ret = tg3_nvram_exec_cmd(tp, cmd); 3510 if (ret) 3511 break; 3512 } 3513 if (!tg3_flag(tp, FLASH)) { 3514 /* We always do complete word writes to eeprom. */ 3515 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); 3516 } 3517 3518 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3519 if (ret) 3520 break; 3521 } 3522 return ret; 3523 } 3524 3525 /* offset and length are dword aligned */ 3526 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) 3527 { 3528 int ret; 3529 3530 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3531 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & 3532 ~GRC_LCLCTRL_GPIO_OUTPUT1); 3533 udelay(40); 3534 } 3535 3536 if (!tg3_flag(tp, NVRAM)) { 3537 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); 3538 } else { 3539 u32 grc_mode; 3540 3541 ret = tg3_nvram_lock(tp); 3542 if (ret) 3543 return ret; 3544 3545 tg3_enable_nvram_access(tp); 3546 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) 3547 tw32(NVRAM_WRITE1, 0x406); 3548 3549 grc_mode = tr32(GRC_MODE); 3550 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); 3551 3552 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { 3553 ret = tg3_nvram_write_block_buffered(tp, offset, len, 3554 buf); 3555 } else { 3556 ret = tg3_nvram_write_block_unbuffered(tp, offset, len, 3557 buf); 3558 } 3559 3560 grc_mode = tr32(GRC_MODE); 3561 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); 3562 3563 tg3_disable_nvram_access(tp); 3564 tg3_nvram_unlock(tp); 3565 } 3566 3567 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3568 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 3569 udelay(40); 3570 } 3571 3572 return ret; 3573 } 3574 3575 #define RX_CPU_SCRATCH_BASE 0x30000 3576 #define RX_CPU_SCRATCH_SIZE 0x04000 3577 #define TX_CPU_SCRATCH_BASE 0x34000 3578 #define TX_CPU_SCRATCH_SIZE 0x04000 3579 3580 /* tp->lock is held. */ 3581 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base) 3582 { 3583 int i; 3584 const int iters = 10000; 3585 3586 for (i = 0; i < iters; i++) { 3587 tw32(cpu_base + CPU_STATE, 0xffffffff); 3588 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3589 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) 3590 break; 3591 if (pci_channel_offline(tp->pdev)) 3592 return -EBUSY; 3593 } 3594 3595 return (i == iters) ? -EBUSY : 0; 3596 } 3597 3598 /* tp->lock is held. */ 3599 static int tg3_rxcpu_pause(struct tg3 *tp) 3600 { 3601 int rc = tg3_pause_cpu(tp, RX_CPU_BASE); 3602 3603 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 3604 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); 3605 udelay(10); 3606 3607 return rc; 3608 } 3609 3610 /* tp->lock is held. */ 3611 static int tg3_txcpu_pause(struct tg3 *tp) 3612 { 3613 return tg3_pause_cpu(tp, TX_CPU_BASE); 3614 } 3615 3616 /* tp->lock is held. */ 3617 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base) 3618 { 3619 tw32(cpu_base + CPU_STATE, 0xffffffff); 3620 tw32_f(cpu_base + CPU_MODE, 0x00000000); 3621 } 3622 3623 /* tp->lock is held. */ 3624 static void tg3_rxcpu_resume(struct tg3 *tp) 3625 { 3626 tg3_resume_cpu(tp, RX_CPU_BASE); 3627 } 3628 3629 /* tp->lock is held. */ 3630 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base) 3631 { 3632 int rc; 3633 3634 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); 3635 3636 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3637 u32 val = tr32(GRC_VCPU_EXT_CTRL); 3638 3639 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); 3640 return 0; 3641 } 3642 if (cpu_base == RX_CPU_BASE) { 3643 rc = tg3_rxcpu_pause(tp); 3644 } else { 3645 /* 3646 * There is only an Rx CPU for the 5750 derivative in the 3647 * BCM4785. 3648 */ 3649 if (tg3_flag(tp, IS_SSB_CORE)) 3650 return 0; 3651 3652 rc = tg3_txcpu_pause(tp); 3653 } 3654 3655 if (rc) { 3656 netdev_err(tp->dev, "%s timed out, %s CPU\n", 3657 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX"); 3658 return -ENODEV; 3659 } 3660 3661 /* Clear firmware's nvram arbitration. */ 3662 if (tg3_flag(tp, NVRAM)) 3663 tw32(NVRAM_SWARB, SWARB_REQ_CLR0); 3664 return 0; 3665 } 3666 3667 static int tg3_fw_data_len(struct tg3 *tp, 3668 const struct tg3_firmware_hdr *fw_hdr) 3669 { 3670 int fw_len; 3671 3672 /* Non fragmented firmware have one firmware header followed by a 3673 * contiguous chunk of data to be written. The length field in that 3674 * header is not the length of data to be written but the complete 3675 * length of the bss. The data length is determined based on 3676 * tp->fw->size minus headers. 3677 * 3678 * Fragmented firmware have a main header followed by multiple 3679 * fragments. Each fragment is identical to non fragmented firmware 3680 * with a firmware header followed by a contiguous chunk of data. In 3681 * the main header, the length field is unused and set to 0xffffffff. 3682 * In each fragment header the length is the entire size of that 3683 * fragment i.e. fragment data + header length. Data length is 3684 * therefore length field in the header minus TG3_FW_HDR_LEN. 3685 */ 3686 if (tp->fw_len == 0xffffffff) 3687 fw_len = be32_to_cpu(fw_hdr->len); 3688 else 3689 fw_len = tp->fw->size; 3690 3691 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32); 3692 } 3693 3694 /* tp->lock is held. */ 3695 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, 3696 u32 cpu_scratch_base, int cpu_scratch_size, 3697 const struct tg3_firmware_hdr *fw_hdr) 3698 { 3699 int err, i; 3700 void (*write_op)(struct tg3 *, u32, u32); 3701 int total_len = tp->fw->size; 3702 3703 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { 3704 netdev_err(tp->dev, 3705 "%s: Trying to load TX cpu firmware which is 5705\n", 3706 __func__); 3707 return -EINVAL; 3708 } 3709 3710 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766) 3711 write_op = tg3_write_mem; 3712 else 3713 write_op = tg3_write_indirect_reg32; 3714 3715 if (tg3_asic_rev(tp) != ASIC_REV_57766) { 3716 /* It is possible that bootcode is still loading at this point. 3717 * Get the nvram lock first before halting the cpu. 3718 */ 3719 int lock_err = tg3_nvram_lock(tp); 3720 err = tg3_halt_cpu(tp, cpu_base); 3721 if (!lock_err) 3722 tg3_nvram_unlock(tp); 3723 if (err) 3724 goto out; 3725 3726 for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) 3727 write_op(tp, cpu_scratch_base + i, 0); 3728 tw32(cpu_base + CPU_STATE, 0xffffffff); 3729 tw32(cpu_base + CPU_MODE, 3730 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT); 3731 } else { 3732 /* Subtract additional main header for fragmented firmware and 3733 * advance to the first fragment 3734 */ 3735 total_len -= TG3_FW_HDR_LEN; 3736 fw_hdr++; 3737 } 3738 3739 do { 3740 u32 *fw_data = (u32 *)(fw_hdr + 1); 3741 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++) 3742 write_op(tp, cpu_scratch_base + 3743 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) + 3744 (i * sizeof(u32)), 3745 be32_to_cpu(fw_data[i])); 3746 3747 total_len -= be32_to_cpu(fw_hdr->len); 3748 3749 /* Advance to next fragment */ 3750 fw_hdr = (struct tg3_firmware_hdr *) 3751 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len)); 3752 } while (total_len > 0); 3753 3754 err = 0; 3755 3756 out: 3757 return err; 3758 } 3759 3760 /* tp->lock is held. */ 3761 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc) 3762 { 3763 int i; 3764 const int iters = 5; 3765 3766 tw32(cpu_base + CPU_STATE, 0xffffffff); 3767 tw32_f(cpu_base + CPU_PC, pc); 3768 3769 for (i = 0; i < iters; i++) { 3770 if (tr32(cpu_base + CPU_PC) == pc) 3771 break; 3772 tw32(cpu_base + CPU_STATE, 0xffffffff); 3773 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3774 tw32_f(cpu_base + CPU_PC, pc); 3775 udelay(1000); 3776 } 3777 3778 return (i == iters) ? -EBUSY : 0; 3779 } 3780 3781 /* tp->lock is held. */ 3782 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) 3783 { 3784 const struct tg3_firmware_hdr *fw_hdr; 3785 int err; 3786 3787 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3788 3789 /* Firmware blob starts with version numbers, followed by 3790 start address and length. We are setting complete length. 3791 length = end_address_of_bss - start_address_of_text. 3792 Remainder is the blob to be loaded contiguously 3793 from start address. */ 3794 3795 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, 3796 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, 3797 fw_hdr); 3798 if (err) 3799 return err; 3800 3801 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, 3802 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, 3803 fw_hdr); 3804 if (err) 3805 return err; 3806 3807 /* Now startup only the RX cpu. */ 3808 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE, 3809 be32_to_cpu(fw_hdr->base_addr)); 3810 if (err) { 3811 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " 3812 "should be %08x\n", __func__, 3813 tr32(RX_CPU_BASE + CPU_PC), 3814 be32_to_cpu(fw_hdr->base_addr)); 3815 return -ENODEV; 3816 } 3817 3818 tg3_rxcpu_resume(tp); 3819 3820 return 0; 3821 } 3822 3823 static int tg3_validate_rxcpu_state(struct tg3 *tp) 3824 { 3825 const int iters = 1000; 3826 int i; 3827 u32 val; 3828 3829 /* Wait for boot code to complete initialization and enter service 3830 * loop. It is then safe to download service patches 3831 */ 3832 for (i = 0; i < iters; i++) { 3833 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP) 3834 break; 3835 3836 udelay(10); 3837 } 3838 3839 if (i == iters) { 3840 netdev_err(tp->dev, "Boot code not ready for service patches\n"); 3841 return -EBUSY; 3842 } 3843 3844 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE); 3845 if (val & 0xff) { 3846 netdev_warn(tp->dev, 3847 "Other patches exist. Not downloading EEE patch\n"); 3848 return -EEXIST; 3849 } 3850 3851 return 0; 3852 } 3853 3854 /* tp->lock is held. */ 3855 static void tg3_load_57766_firmware(struct tg3 *tp) 3856 { 3857 struct tg3_firmware_hdr *fw_hdr; 3858 3859 if (!tg3_flag(tp, NO_NVRAM)) 3860 return; 3861 3862 if (tg3_validate_rxcpu_state(tp)) 3863 return; 3864 3865 if (!tp->fw) 3866 return; 3867 3868 /* This firmware blob has a different format than older firmware 3869 * releases as given below. The main difference is we have fragmented 3870 * data to be written to non-contiguous locations. 3871 * 3872 * In the beginning we have a firmware header identical to other 3873 * firmware which consists of version, base addr and length. The length 3874 * here is unused and set to 0xffffffff. 3875 * 3876 * This is followed by a series of firmware fragments which are 3877 * individually identical to previous firmware. i.e. they have the 3878 * firmware header and followed by data for that fragment. The version 3879 * field of the individual fragment header is unused. 3880 */ 3881 3882 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3883 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR) 3884 return; 3885 3886 if (tg3_rxcpu_pause(tp)) 3887 return; 3888 3889 /* tg3_load_firmware_cpu() will always succeed for the 57766 */ 3890 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr); 3891 3892 tg3_rxcpu_resume(tp); 3893 } 3894 3895 /* tp->lock is held. */ 3896 static int tg3_load_tso_firmware(struct tg3 *tp) 3897 { 3898 const struct tg3_firmware_hdr *fw_hdr; 3899 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; 3900 int err; 3901 3902 if (!tg3_flag(tp, FW_TSO)) 3903 return 0; 3904 3905 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3906 3907 /* Firmware blob starts with version numbers, followed by 3908 start address and length. We are setting complete length. 3909 length = end_address_of_bss - start_address_of_text. 3910 Remainder is the blob to be loaded contiguously 3911 from start address. */ 3912 3913 cpu_scratch_size = tp->fw_len; 3914 3915 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 3916 cpu_base = RX_CPU_BASE; 3917 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; 3918 } else { 3919 cpu_base = TX_CPU_BASE; 3920 cpu_scratch_base = TX_CPU_SCRATCH_BASE; 3921 cpu_scratch_size = TX_CPU_SCRATCH_SIZE; 3922 } 3923 3924 err = tg3_load_firmware_cpu(tp, cpu_base, 3925 cpu_scratch_base, cpu_scratch_size, 3926 fw_hdr); 3927 if (err) 3928 return err; 3929 3930 /* Now startup the cpu. */ 3931 err = tg3_pause_cpu_and_set_pc(tp, cpu_base, 3932 be32_to_cpu(fw_hdr->base_addr)); 3933 if (err) { 3934 netdev_err(tp->dev, 3935 "%s fails to set CPU PC, is %08x should be %08x\n", 3936 __func__, tr32(cpu_base + CPU_PC), 3937 be32_to_cpu(fw_hdr->base_addr)); 3938 return -ENODEV; 3939 } 3940 3941 tg3_resume_cpu(tp, cpu_base); 3942 return 0; 3943 } 3944 3945 /* tp->lock is held. */ 3946 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr, 3947 int index) 3948 { 3949 u32 addr_high, addr_low; 3950 3951 addr_high = ((mac_addr[0] << 8) | mac_addr[1]); 3952 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) | 3953 (mac_addr[4] << 8) | mac_addr[5]); 3954 3955 if (index < 4) { 3956 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high); 3957 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low); 3958 } else { 3959 index -= 4; 3960 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high); 3961 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low); 3962 } 3963 } 3964 3965 /* tp->lock is held. */ 3966 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1) 3967 { 3968 u32 addr_high; 3969 int i; 3970 3971 for (i = 0; i < 4; i++) { 3972 if (i == 1 && skip_mac_1) 3973 continue; 3974 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3975 } 3976 3977 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 3978 tg3_asic_rev(tp) == ASIC_REV_5704) { 3979 for (i = 4; i < 16; i++) 3980 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3981 } 3982 3983 addr_high = (tp->dev->dev_addr[0] + 3984 tp->dev->dev_addr[1] + 3985 tp->dev->dev_addr[2] + 3986 tp->dev->dev_addr[3] + 3987 tp->dev->dev_addr[4] + 3988 tp->dev->dev_addr[5]) & 3989 TX_BACKOFF_SEED_MASK; 3990 tw32(MAC_TX_BACKOFF_SEED, addr_high); 3991 } 3992 3993 static void tg3_enable_register_access(struct tg3 *tp) 3994 { 3995 /* 3996 * Make sure register accesses (indirect or otherwise) will function 3997 * correctly. 3998 */ 3999 pci_write_config_dword(tp->pdev, 4000 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); 4001 } 4002 4003 static int tg3_power_up(struct tg3 *tp) 4004 { 4005 int err; 4006 4007 tg3_enable_register_access(tp); 4008 4009 err = pci_set_power_state(tp->pdev, PCI_D0); 4010 if (!err) { 4011 /* Switch out of Vaux if it is a NIC */ 4012 tg3_pwrsrc_switch_to_vmain(tp); 4013 } else { 4014 netdev_err(tp->dev, "Transition to D0 failed\n"); 4015 } 4016 4017 return err; 4018 } 4019 4020 static int tg3_setup_phy(struct tg3 *, bool); 4021 4022 static int tg3_power_down_prepare(struct tg3 *tp) 4023 { 4024 u32 misc_host_ctrl; 4025 bool device_should_wake, do_low_power; 4026 4027 tg3_enable_register_access(tp); 4028 4029 /* Restore the CLKREQ setting. */ 4030 if (tg3_flag(tp, CLKREQ_BUG)) 4031 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 4032 PCI_EXP_LNKCTL_CLKREQ_EN); 4033 4034 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 4035 tw32(TG3PCI_MISC_HOST_CTRL, 4036 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 4037 4038 device_should_wake = device_may_wakeup(&tp->pdev->dev) && 4039 tg3_flag(tp, WOL_ENABLE); 4040 4041 if (tg3_flag(tp, USE_PHYLIB)) { 4042 do_low_power = false; 4043 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && 4044 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4045 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, }; 4046 struct phy_device *phydev; 4047 u32 phyid; 4048 4049 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 4050 4051 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4052 4053 tp->link_config.speed = phydev->speed; 4054 tp->link_config.duplex = phydev->duplex; 4055 tp->link_config.autoneg = phydev->autoneg; 4056 ethtool_convert_link_mode_to_legacy_u32( 4057 &tp->link_config.advertising, 4058 phydev->advertising); 4059 4060 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising); 4061 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 4062 advertising); 4063 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 4064 advertising); 4065 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, 4066 advertising); 4067 4068 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { 4069 if (tg3_flag(tp, WOL_SPEED_100MB)) { 4070 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, 4071 advertising); 4072 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, 4073 advertising); 4074 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, 4075 advertising); 4076 } else { 4077 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, 4078 advertising); 4079 } 4080 } 4081 4082 linkmode_copy(phydev->advertising, advertising); 4083 phy_start_aneg(phydev); 4084 4085 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; 4086 if (phyid != PHY_ID_BCMAC131) { 4087 phyid &= PHY_BCM_OUI_MASK; 4088 if (phyid == PHY_BCM_OUI_1 || 4089 phyid == PHY_BCM_OUI_2 || 4090 phyid == PHY_BCM_OUI_3) 4091 do_low_power = true; 4092 } 4093 } 4094 } else { 4095 do_low_power = true; 4096 4097 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) 4098 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4099 4100 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 4101 tg3_setup_phy(tp, false); 4102 } 4103 4104 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 4105 u32 val; 4106 4107 val = tr32(GRC_VCPU_EXT_CTRL); 4108 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); 4109 } else if (!tg3_flag(tp, ENABLE_ASF)) { 4110 int i; 4111 u32 val; 4112 4113 for (i = 0; i < 200; i++) { 4114 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); 4115 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 4116 break; 4117 msleep(1); 4118 } 4119 } 4120 if (tg3_flag(tp, WOL_CAP)) 4121 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | 4122 WOL_DRV_STATE_SHUTDOWN | 4123 WOL_DRV_WOL | 4124 WOL_SET_MAGIC_PKT); 4125 4126 if (device_should_wake) { 4127 u32 mac_mode; 4128 4129 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 4130 if (do_low_power && 4131 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 4132 tg3_phy_auxctl_write(tp, 4133 MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 4134 MII_TG3_AUXCTL_PCTL_WOL_EN | 4135 MII_TG3_AUXCTL_PCTL_100TX_LPWR | 4136 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); 4137 udelay(40); 4138 } 4139 4140 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4141 mac_mode = MAC_MODE_PORT_MODE_GMII; 4142 else if (tp->phy_flags & 4143 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) { 4144 if (tp->link_config.active_speed == SPEED_1000) 4145 mac_mode = MAC_MODE_PORT_MODE_GMII; 4146 else 4147 mac_mode = MAC_MODE_PORT_MODE_MII; 4148 } else 4149 mac_mode = MAC_MODE_PORT_MODE_MII; 4150 4151 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; 4152 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 4153 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? 4154 SPEED_100 : SPEED_10; 4155 if (tg3_5700_link_polarity(tp, speed)) 4156 mac_mode |= MAC_MODE_LINK_POLARITY; 4157 else 4158 mac_mode &= ~MAC_MODE_LINK_POLARITY; 4159 } 4160 } else { 4161 mac_mode = MAC_MODE_PORT_MODE_TBI; 4162 } 4163 4164 if (!tg3_flag(tp, 5750_PLUS)) 4165 tw32(MAC_LED_CTRL, tp->led_ctrl); 4166 4167 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; 4168 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && 4169 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) 4170 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; 4171 4172 if (tg3_flag(tp, ENABLE_APE)) 4173 mac_mode |= MAC_MODE_APE_TX_EN | 4174 MAC_MODE_APE_RX_EN | 4175 MAC_MODE_TDE_ENABLE; 4176 4177 tw32_f(MAC_MODE, mac_mode); 4178 udelay(100); 4179 4180 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); 4181 udelay(10); 4182 } 4183 4184 if (!tg3_flag(tp, WOL_SPEED_100MB) && 4185 (tg3_asic_rev(tp) == ASIC_REV_5700 || 4186 tg3_asic_rev(tp) == ASIC_REV_5701)) { 4187 u32 base_val; 4188 4189 base_val = tp->pci_clock_ctrl; 4190 base_val |= (CLOCK_CTRL_RXCLK_DISABLE | 4191 CLOCK_CTRL_TXCLK_DISABLE); 4192 4193 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | 4194 CLOCK_CTRL_PWRDOWN_PLL133, 40); 4195 } else if (tg3_flag(tp, 5780_CLASS) || 4196 tg3_flag(tp, CPMU_PRESENT) || 4197 tg3_asic_rev(tp) == ASIC_REV_5906) { 4198 /* do nothing */ 4199 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { 4200 u32 newbits1, newbits2; 4201 4202 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4203 tg3_asic_rev(tp) == ASIC_REV_5701) { 4204 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | 4205 CLOCK_CTRL_TXCLK_DISABLE | 4206 CLOCK_CTRL_ALTCLK); 4207 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4208 } else if (tg3_flag(tp, 5705_PLUS)) { 4209 newbits1 = CLOCK_CTRL_625_CORE; 4210 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; 4211 } else { 4212 newbits1 = CLOCK_CTRL_ALTCLK; 4213 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4214 } 4215 4216 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, 4217 40); 4218 4219 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, 4220 40); 4221 4222 if (!tg3_flag(tp, 5705_PLUS)) { 4223 u32 newbits3; 4224 4225 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4226 tg3_asic_rev(tp) == ASIC_REV_5701) { 4227 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | 4228 CLOCK_CTRL_TXCLK_DISABLE | 4229 CLOCK_CTRL_44MHZ_CORE); 4230 } else { 4231 newbits3 = CLOCK_CTRL_44MHZ_CORE; 4232 } 4233 4234 tw32_wait_f(TG3PCI_CLOCK_CTRL, 4235 tp->pci_clock_ctrl | newbits3, 40); 4236 } 4237 } 4238 4239 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) 4240 tg3_power_down_phy(tp, do_low_power); 4241 4242 tg3_frob_aux_power(tp, true); 4243 4244 /* Workaround for unstable PLL clock */ 4245 if ((!tg3_flag(tp, IS_SSB_CORE)) && 4246 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) || 4247 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) { 4248 u32 val = tr32(0x7d00); 4249 4250 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); 4251 tw32(0x7d00, val); 4252 if (!tg3_flag(tp, ENABLE_ASF)) { 4253 int err; 4254 4255 err = tg3_nvram_lock(tp); 4256 tg3_halt_cpu(tp, RX_CPU_BASE); 4257 if (!err) 4258 tg3_nvram_unlock(tp); 4259 } 4260 } 4261 4262 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 4263 4264 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); 4265 4266 return 0; 4267 } 4268 4269 static void tg3_power_down(struct tg3 *tp) 4270 { 4271 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); 4272 pci_set_power_state(tp->pdev, PCI_D3hot); 4273 } 4274 4275 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex) 4276 { 4277 switch (val & MII_TG3_AUX_STAT_SPDMASK) { 4278 case MII_TG3_AUX_STAT_10HALF: 4279 *speed = SPEED_10; 4280 *duplex = DUPLEX_HALF; 4281 break; 4282 4283 case MII_TG3_AUX_STAT_10FULL: 4284 *speed = SPEED_10; 4285 *duplex = DUPLEX_FULL; 4286 break; 4287 4288 case MII_TG3_AUX_STAT_100HALF: 4289 *speed = SPEED_100; 4290 *duplex = DUPLEX_HALF; 4291 break; 4292 4293 case MII_TG3_AUX_STAT_100FULL: 4294 *speed = SPEED_100; 4295 *duplex = DUPLEX_FULL; 4296 break; 4297 4298 case MII_TG3_AUX_STAT_1000HALF: 4299 *speed = SPEED_1000; 4300 *duplex = DUPLEX_HALF; 4301 break; 4302 4303 case MII_TG3_AUX_STAT_1000FULL: 4304 *speed = SPEED_1000; 4305 *duplex = DUPLEX_FULL; 4306 break; 4307 4308 default: 4309 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4310 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : 4311 SPEED_10; 4312 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : 4313 DUPLEX_HALF; 4314 break; 4315 } 4316 *speed = SPEED_UNKNOWN; 4317 *duplex = DUPLEX_UNKNOWN; 4318 break; 4319 } 4320 } 4321 4322 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) 4323 { 4324 int err = 0; 4325 u32 val, new_adv; 4326 4327 new_adv = ADVERTISE_CSMA; 4328 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; 4329 new_adv |= mii_advertise_flowctrl(flowctrl); 4330 4331 err = tg3_writephy(tp, MII_ADVERTISE, new_adv); 4332 if (err) 4333 goto done; 4334 4335 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4336 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); 4337 4338 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4339 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) 4340 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4341 4342 err = tg3_writephy(tp, MII_CTRL1000, new_adv); 4343 if (err) 4344 goto done; 4345 } 4346 4347 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4348 goto done; 4349 4350 tw32(TG3_CPMU_EEE_MODE, 4351 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); 4352 4353 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 4354 if (!err) { 4355 u32 err2; 4356 4357 val = 0; 4358 /* Advertise 100-BaseTX EEE ability */ 4359 if (advertise & ADVERTISED_100baseT_Full) 4360 val |= MDIO_AN_EEE_ADV_100TX; 4361 /* Advertise 1000-BaseT EEE ability */ 4362 if (advertise & ADVERTISED_1000baseT_Full) 4363 val |= MDIO_AN_EEE_ADV_1000T; 4364 4365 if (!tp->eee.eee_enabled) { 4366 val = 0; 4367 tp->eee.advertised = 0; 4368 } else { 4369 tp->eee.advertised = advertise & 4370 (ADVERTISED_100baseT_Full | 4371 ADVERTISED_1000baseT_Full); 4372 } 4373 4374 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); 4375 if (err) 4376 val = 0; 4377 4378 switch (tg3_asic_rev(tp)) { 4379 case ASIC_REV_5717: 4380 case ASIC_REV_57765: 4381 case ASIC_REV_57766: 4382 case ASIC_REV_5719: 4383 /* If we advertised any eee advertisements above... */ 4384 if (val) 4385 val = MII_TG3_DSP_TAP26_ALNOKO | 4386 MII_TG3_DSP_TAP26_RMRXSTO | 4387 MII_TG3_DSP_TAP26_OPCSINPT; 4388 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 4389 fallthrough; 4390 case ASIC_REV_5720: 4391 case ASIC_REV_5762: 4392 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) 4393 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | 4394 MII_TG3_DSP_CH34TP2_HIBW01); 4395 } 4396 4397 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false); 4398 if (!err) 4399 err = err2; 4400 } 4401 4402 done: 4403 return err; 4404 } 4405 4406 static void tg3_phy_copper_begin(struct tg3 *tp) 4407 { 4408 if (tp->link_config.autoneg == AUTONEG_ENABLE || 4409 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4410 u32 adv, fc; 4411 4412 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4413 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4414 adv = ADVERTISED_10baseT_Half | 4415 ADVERTISED_10baseT_Full; 4416 if (tg3_flag(tp, WOL_SPEED_100MB)) 4417 adv |= ADVERTISED_100baseT_Half | 4418 ADVERTISED_100baseT_Full; 4419 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) { 4420 if (!(tp->phy_flags & 4421 TG3_PHYFLG_DISABLE_1G_HD_ADV)) 4422 adv |= ADVERTISED_1000baseT_Half; 4423 adv |= ADVERTISED_1000baseT_Full; 4424 } 4425 4426 fc = FLOW_CTRL_TX | FLOW_CTRL_RX; 4427 } else { 4428 adv = tp->link_config.advertising; 4429 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 4430 adv &= ~(ADVERTISED_1000baseT_Half | 4431 ADVERTISED_1000baseT_Full); 4432 4433 fc = tp->link_config.flowctrl; 4434 } 4435 4436 tg3_phy_autoneg_cfg(tp, adv, fc); 4437 4438 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4439 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4440 /* Normally during power down we want to autonegotiate 4441 * the lowest possible speed for WOL. However, to avoid 4442 * link flap, we leave it untouched. 4443 */ 4444 return; 4445 } 4446 4447 tg3_writephy(tp, MII_BMCR, 4448 BMCR_ANENABLE | BMCR_ANRESTART); 4449 } else { 4450 int i; 4451 u32 bmcr, orig_bmcr; 4452 4453 tp->link_config.active_speed = tp->link_config.speed; 4454 tp->link_config.active_duplex = tp->link_config.duplex; 4455 4456 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 4457 /* With autoneg disabled, 5715 only links up when the 4458 * advertisement register has the configured speed 4459 * enabled. 4460 */ 4461 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); 4462 } 4463 4464 bmcr = 0; 4465 switch (tp->link_config.speed) { 4466 default: 4467 case SPEED_10: 4468 break; 4469 4470 case SPEED_100: 4471 bmcr |= BMCR_SPEED100; 4472 break; 4473 4474 case SPEED_1000: 4475 bmcr |= BMCR_SPEED1000; 4476 break; 4477 } 4478 4479 if (tp->link_config.duplex == DUPLEX_FULL) 4480 bmcr |= BMCR_FULLDPLX; 4481 4482 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && 4483 (bmcr != orig_bmcr)) { 4484 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); 4485 for (i = 0; i < 1500; i++) { 4486 u32 tmp; 4487 4488 udelay(10); 4489 if (tg3_readphy(tp, MII_BMSR, &tmp) || 4490 tg3_readphy(tp, MII_BMSR, &tmp)) 4491 continue; 4492 if (!(tmp & BMSR_LSTATUS)) { 4493 udelay(40); 4494 break; 4495 } 4496 } 4497 tg3_writephy(tp, MII_BMCR, bmcr); 4498 udelay(40); 4499 } 4500 } 4501 } 4502 4503 static int tg3_phy_pull_config(struct tg3 *tp) 4504 { 4505 int err; 4506 u32 val; 4507 4508 err = tg3_readphy(tp, MII_BMCR, &val); 4509 if (err) 4510 goto done; 4511 4512 if (!(val & BMCR_ANENABLE)) { 4513 tp->link_config.autoneg = AUTONEG_DISABLE; 4514 tp->link_config.advertising = 0; 4515 tg3_flag_clear(tp, PAUSE_AUTONEG); 4516 4517 err = -EIO; 4518 4519 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) { 4520 case 0: 4521 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4522 goto done; 4523 4524 tp->link_config.speed = SPEED_10; 4525 break; 4526 case BMCR_SPEED100: 4527 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4528 goto done; 4529 4530 tp->link_config.speed = SPEED_100; 4531 break; 4532 case BMCR_SPEED1000: 4533 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4534 tp->link_config.speed = SPEED_1000; 4535 break; 4536 } 4537 fallthrough; 4538 default: 4539 goto done; 4540 } 4541 4542 if (val & BMCR_FULLDPLX) 4543 tp->link_config.duplex = DUPLEX_FULL; 4544 else 4545 tp->link_config.duplex = DUPLEX_HALF; 4546 4547 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; 4548 4549 err = 0; 4550 goto done; 4551 } 4552 4553 tp->link_config.autoneg = AUTONEG_ENABLE; 4554 tp->link_config.advertising = ADVERTISED_Autoneg; 4555 tg3_flag_set(tp, PAUSE_AUTONEG); 4556 4557 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4558 u32 adv; 4559 4560 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4561 if (err) 4562 goto done; 4563 4564 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL); 4565 tp->link_config.advertising |= adv | ADVERTISED_TP; 4566 4567 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val); 4568 } else { 4569 tp->link_config.advertising |= ADVERTISED_FIBRE; 4570 } 4571 4572 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4573 u32 adv; 4574 4575 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4576 err = tg3_readphy(tp, MII_CTRL1000, &val); 4577 if (err) 4578 goto done; 4579 4580 adv = mii_ctrl1000_to_ethtool_adv_t(val); 4581 } else { 4582 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4583 if (err) 4584 goto done; 4585 4586 adv = tg3_decode_flowctrl_1000X(val); 4587 tp->link_config.flowctrl = adv; 4588 4589 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL); 4590 adv = mii_adv_to_ethtool_adv_x(val); 4591 } 4592 4593 tp->link_config.advertising |= adv; 4594 } 4595 4596 done: 4597 return err; 4598 } 4599 4600 static int tg3_init_5401phy_dsp(struct tg3 *tp) 4601 { 4602 int err; 4603 4604 /* Turn off tap power management. */ 4605 /* Set Extended packet length bit */ 4606 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 4607 4608 err |= tg3_phydsp_write(tp, 0x0012, 0x1804); 4609 err |= tg3_phydsp_write(tp, 0x0013, 0x1204); 4610 err |= tg3_phydsp_write(tp, 0x8006, 0x0132); 4611 err |= tg3_phydsp_write(tp, 0x8006, 0x0232); 4612 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); 4613 4614 udelay(40); 4615 4616 return err; 4617 } 4618 4619 static bool tg3_phy_eee_config_ok(struct tg3 *tp) 4620 { 4621 struct ethtool_eee eee; 4622 4623 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4624 return true; 4625 4626 tg3_eee_pull_config(tp, &eee); 4627 4628 if (tp->eee.eee_enabled) { 4629 if (tp->eee.advertised != eee.advertised || 4630 tp->eee.tx_lpi_timer != eee.tx_lpi_timer || 4631 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) 4632 return false; 4633 } else { 4634 /* EEE is disabled but we're advertising */ 4635 if (eee.advertised) 4636 return false; 4637 } 4638 4639 return true; 4640 } 4641 4642 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) 4643 { 4644 u32 advmsk, tgtadv, advertising; 4645 4646 advertising = tp->link_config.advertising; 4647 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; 4648 4649 advmsk = ADVERTISE_ALL; 4650 if (tp->link_config.active_duplex == DUPLEX_FULL) { 4651 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); 4652 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4653 } 4654 4655 if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) 4656 return false; 4657 4658 if ((*lcladv & advmsk) != tgtadv) 4659 return false; 4660 4661 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4662 u32 tg3_ctrl; 4663 4664 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); 4665 4666 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) 4667 return false; 4668 4669 if (tgtadv && 4670 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4671 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) { 4672 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4673 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL | 4674 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 4675 } else { 4676 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); 4677 } 4678 4679 if (tg3_ctrl != tgtadv) 4680 return false; 4681 } 4682 4683 return true; 4684 } 4685 4686 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) 4687 { 4688 u32 lpeth = 0; 4689 4690 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4691 u32 val; 4692 4693 if (tg3_readphy(tp, MII_STAT1000, &val)) 4694 return false; 4695 4696 lpeth = mii_stat1000_to_ethtool_lpa_t(val); 4697 } 4698 4699 if (tg3_readphy(tp, MII_LPA, rmtadv)) 4700 return false; 4701 4702 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); 4703 tp->link_config.rmt_adv = lpeth; 4704 4705 return true; 4706 } 4707 4708 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up) 4709 { 4710 if (curr_link_up != tp->link_up) { 4711 if (curr_link_up) { 4712 netif_carrier_on(tp->dev); 4713 } else { 4714 netif_carrier_off(tp->dev); 4715 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4716 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 4717 } 4718 4719 tg3_link_report(tp); 4720 return true; 4721 } 4722 4723 return false; 4724 } 4725 4726 static void tg3_clear_mac_status(struct tg3 *tp) 4727 { 4728 tw32(MAC_EVENT, 0); 4729 4730 tw32_f(MAC_STATUS, 4731 MAC_STATUS_SYNC_CHANGED | 4732 MAC_STATUS_CFG_CHANGED | 4733 MAC_STATUS_MI_COMPLETION | 4734 MAC_STATUS_LNKSTATE_CHANGED); 4735 udelay(40); 4736 } 4737 4738 static void tg3_setup_eee(struct tg3 *tp) 4739 { 4740 u32 val; 4741 4742 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | 4743 TG3_CPMU_EEE_LNKIDL_UART_IDL; 4744 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 4745 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; 4746 4747 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); 4748 4749 tw32_f(TG3_CPMU_EEE_CTRL, 4750 TG3_CPMU_EEE_CTRL_EXIT_20_1_US); 4751 4752 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | 4753 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) | 4754 TG3_CPMU_EEEMD_LPI_IN_RX | 4755 TG3_CPMU_EEEMD_EEE_ENABLE; 4756 4757 if (tg3_asic_rev(tp) != ASIC_REV_5717) 4758 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; 4759 4760 if (tg3_flag(tp, ENABLE_APE)) 4761 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; 4762 4763 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0); 4764 4765 tw32_f(TG3_CPMU_EEE_DBTMR1, 4766 TG3_CPMU_DBTMR1_PCIEXIT_2047US | 4767 (tp->eee.tx_lpi_timer & 0xffff)); 4768 4769 tw32_f(TG3_CPMU_EEE_DBTMR2, 4770 TG3_CPMU_DBTMR2_APE_TX_2047US | 4771 TG3_CPMU_DBTMR2_TXIDXEQ_2047US); 4772 } 4773 4774 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) 4775 { 4776 bool current_link_up; 4777 u32 bmsr, val; 4778 u32 lcl_adv, rmt_adv; 4779 u32 current_speed; 4780 u8 current_duplex; 4781 int i, err; 4782 4783 tg3_clear_mac_status(tp); 4784 4785 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 4786 tw32_f(MAC_MI_MODE, 4787 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 4788 udelay(80); 4789 } 4790 4791 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); 4792 4793 /* Some third-party PHYs need to be reset on link going 4794 * down. 4795 */ 4796 if ((tg3_asic_rev(tp) == ASIC_REV_5703 || 4797 tg3_asic_rev(tp) == ASIC_REV_5704 || 4798 tg3_asic_rev(tp) == ASIC_REV_5705) && 4799 tp->link_up) { 4800 tg3_readphy(tp, MII_BMSR, &bmsr); 4801 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4802 !(bmsr & BMSR_LSTATUS)) 4803 force_reset = true; 4804 } 4805 if (force_reset) 4806 tg3_phy_reset(tp); 4807 4808 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 4809 tg3_readphy(tp, MII_BMSR, &bmsr); 4810 if (tg3_readphy(tp, MII_BMSR, &bmsr) || 4811 !tg3_flag(tp, INIT_COMPLETE)) 4812 bmsr = 0; 4813 4814 if (!(bmsr & BMSR_LSTATUS)) { 4815 err = tg3_init_5401phy_dsp(tp); 4816 if (err) 4817 return err; 4818 4819 tg3_readphy(tp, MII_BMSR, &bmsr); 4820 for (i = 0; i < 1000; i++) { 4821 udelay(10); 4822 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4823 (bmsr & BMSR_LSTATUS)) { 4824 udelay(40); 4825 break; 4826 } 4827 } 4828 4829 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == 4830 TG3_PHY_REV_BCM5401_B0 && 4831 !(bmsr & BMSR_LSTATUS) && 4832 tp->link_config.active_speed == SPEED_1000) { 4833 err = tg3_phy_reset(tp); 4834 if (!err) 4835 err = tg3_init_5401phy_dsp(tp); 4836 if (err) 4837 return err; 4838 } 4839 } 4840 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4841 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) { 4842 /* 5701 {A0,B0} CRC bug workaround */ 4843 tg3_writephy(tp, 0x15, 0x0a75); 4844 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4845 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 4846 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4847 } 4848 4849 /* Clear pending interrupts... */ 4850 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4851 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4852 4853 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) 4854 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); 4855 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) 4856 tg3_writephy(tp, MII_TG3_IMASK, ~0); 4857 4858 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4859 tg3_asic_rev(tp) == ASIC_REV_5701) { 4860 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) 4861 tg3_writephy(tp, MII_TG3_EXT_CTRL, 4862 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 4863 else 4864 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); 4865 } 4866 4867 current_link_up = false; 4868 current_speed = SPEED_UNKNOWN; 4869 current_duplex = DUPLEX_UNKNOWN; 4870 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; 4871 tp->link_config.rmt_adv = 0; 4872 4873 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { 4874 err = tg3_phy_auxctl_read(tp, 4875 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4876 &val); 4877 if (!err && !(val & (1 << 10))) { 4878 tg3_phy_auxctl_write(tp, 4879 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4880 val | (1 << 10)); 4881 goto relink; 4882 } 4883 } 4884 4885 bmsr = 0; 4886 for (i = 0; i < 100; i++) { 4887 tg3_readphy(tp, MII_BMSR, &bmsr); 4888 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4889 (bmsr & BMSR_LSTATUS)) 4890 break; 4891 udelay(40); 4892 } 4893 4894 if (bmsr & BMSR_LSTATUS) { 4895 u32 aux_stat, bmcr; 4896 4897 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); 4898 for (i = 0; i < 2000; i++) { 4899 udelay(10); 4900 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && 4901 aux_stat) 4902 break; 4903 } 4904 4905 tg3_aux_stat_to_speed_duplex(tp, aux_stat, 4906 ¤t_speed, 4907 ¤t_duplex); 4908 4909 bmcr = 0; 4910 for (i = 0; i < 200; i++) { 4911 tg3_readphy(tp, MII_BMCR, &bmcr); 4912 if (tg3_readphy(tp, MII_BMCR, &bmcr)) 4913 continue; 4914 if (bmcr && bmcr != 0x7fff) 4915 break; 4916 udelay(10); 4917 } 4918 4919 lcl_adv = 0; 4920 rmt_adv = 0; 4921 4922 tp->link_config.active_speed = current_speed; 4923 tp->link_config.active_duplex = current_duplex; 4924 4925 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 4926 bool eee_config_ok = tg3_phy_eee_config_ok(tp); 4927 4928 if ((bmcr & BMCR_ANENABLE) && 4929 eee_config_ok && 4930 tg3_phy_copper_an_config_ok(tp, &lcl_adv) && 4931 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) 4932 current_link_up = true; 4933 4934 /* EEE settings changes take effect only after a phy 4935 * reset. If we have skipped a reset due to Link Flap 4936 * Avoidance being enabled, do it now. 4937 */ 4938 if (!eee_config_ok && 4939 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 4940 !force_reset) { 4941 tg3_setup_eee(tp); 4942 tg3_phy_reset(tp); 4943 } 4944 } else { 4945 if (!(bmcr & BMCR_ANENABLE) && 4946 tp->link_config.speed == current_speed && 4947 tp->link_config.duplex == current_duplex) { 4948 current_link_up = true; 4949 } 4950 } 4951 4952 if (current_link_up && 4953 tp->link_config.active_duplex == DUPLEX_FULL) { 4954 u32 reg, bit; 4955 4956 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4957 reg = MII_TG3_FET_GEN_STAT; 4958 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; 4959 } else { 4960 reg = MII_TG3_EXT_STAT; 4961 bit = MII_TG3_EXT_STAT_MDIX; 4962 } 4963 4964 if (!tg3_readphy(tp, reg, &val) && (val & bit)) 4965 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; 4966 4967 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 4968 } 4969 } 4970 4971 relink: 4972 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4973 tg3_phy_copper_begin(tp); 4974 4975 if (tg3_flag(tp, ROBOSWITCH)) { 4976 current_link_up = true; 4977 /* FIXME: when BCM5325 switch is used use 100 MBit/s */ 4978 current_speed = SPEED_1000; 4979 current_duplex = DUPLEX_FULL; 4980 tp->link_config.active_speed = current_speed; 4981 tp->link_config.active_duplex = current_duplex; 4982 } 4983 4984 tg3_readphy(tp, MII_BMSR, &bmsr); 4985 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || 4986 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 4987 current_link_up = true; 4988 } 4989 4990 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 4991 if (current_link_up) { 4992 if (tp->link_config.active_speed == SPEED_100 || 4993 tp->link_config.active_speed == SPEED_10) 4994 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 4995 else 4996 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 4997 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) 4998 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 4999 else 5000 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5001 5002 /* In order for the 5750 core in BCM4785 chip to work properly 5003 * in RGMII mode, the Led Control Register must be set up. 5004 */ 5005 if (tg3_flag(tp, RGMII_MODE)) { 5006 u32 led_ctrl = tr32(MAC_LED_CTRL); 5007 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON); 5008 5009 if (tp->link_config.active_speed == SPEED_10) 5010 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE; 5011 else if (tp->link_config.active_speed == SPEED_100) 5012 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5013 LED_CTRL_100MBPS_ON); 5014 else if (tp->link_config.active_speed == SPEED_1000) 5015 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5016 LED_CTRL_1000MBPS_ON); 5017 5018 tw32(MAC_LED_CTRL, led_ctrl); 5019 udelay(40); 5020 } 5021 5022 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5023 if (tp->link_config.active_duplex == DUPLEX_HALF) 5024 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5025 5026 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 5027 if (current_link_up && 5028 tg3_5700_link_polarity(tp, tp->link_config.active_speed)) 5029 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 5030 else 5031 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 5032 } 5033 5034 /* ??? Without this setting Netgear GA302T PHY does not 5035 * ??? send/receive packets... 5036 */ 5037 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && 5038 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) { 5039 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; 5040 tw32_f(MAC_MI_MODE, tp->mi_mode); 5041 udelay(80); 5042 } 5043 5044 tw32_f(MAC_MODE, tp->mac_mode); 5045 udelay(40); 5046 5047 tg3_phy_eee_adjust(tp, current_link_up); 5048 5049 if (tg3_flag(tp, USE_LINKCHG_REG)) { 5050 /* Polled via timer. */ 5051 tw32_f(MAC_EVENT, 0); 5052 } else { 5053 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5054 } 5055 udelay(40); 5056 5057 if (tg3_asic_rev(tp) == ASIC_REV_5700 && 5058 current_link_up && 5059 tp->link_config.active_speed == SPEED_1000 && 5060 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { 5061 udelay(120); 5062 tw32_f(MAC_STATUS, 5063 (MAC_STATUS_SYNC_CHANGED | 5064 MAC_STATUS_CFG_CHANGED)); 5065 udelay(40); 5066 tg3_write_mem(tp, 5067 NIC_SRAM_FIRMWARE_MBOX, 5068 NIC_SRAM_FIRMWARE_MBOX_MAGIC2); 5069 } 5070 5071 /* Prevent send BD corruption. */ 5072 if (tg3_flag(tp, CLKREQ_BUG)) { 5073 if (tp->link_config.active_speed == SPEED_100 || 5074 tp->link_config.active_speed == SPEED_10) 5075 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL, 5076 PCI_EXP_LNKCTL_CLKREQ_EN); 5077 else 5078 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 5079 PCI_EXP_LNKCTL_CLKREQ_EN); 5080 } 5081 5082 tg3_test_and_report_link_chg(tp, current_link_up); 5083 5084 return 0; 5085 } 5086 5087 struct tg3_fiber_aneginfo { 5088 int state; 5089 #define ANEG_STATE_UNKNOWN 0 5090 #define ANEG_STATE_AN_ENABLE 1 5091 #define ANEG_STATE_RESTART_INIT 2 5092 #define ANEG_STATE_RESTART 3 5093 #define ANEG_STATE_DISABLE_LINK_OK 4 5094 #define ANEG_STATE_ABILITY_DETECT_INIT 5 5095 #define ANEG_STATE_ABILITY_DETECT 6 5096 #define ANEG_STATE_ACK_DETECT_INIT 7 5097 #define ANEG_STATE_ACK_DETECT 8 5098 #define ANEG_STATE_COMPLETE_ACK_INIT 9 5099 #define ANEG_STATE_COMPLETE_ACK 10 5100 #define ANEG_STATE_IDLE_DETECT_INIT 11 5101 #define ANEG_STATE_IDLE_DETECT 12 5102 #define ANEG_STATE_LINK_OK 13 5103 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 5104 #define ANEG_STATE_NEXT_PAGE_WAIT 15 5105 5106 u32 flags; 5107 #define MR_AN_ENABLE 0x00000001 5108 #define MR_RESTART_AN 0x00000002 5109 #define MR_AN_COMPLETE 0x00000004 5110 #define MR_PAGE_RX 0x00000008 5111 #define MR_NP_LOADED 0x00000010 5112 #define MR_TOGGLE_TX 0x00000020 5113 #define MR_LP_ADV_FULL_DUPLEX 0x00000040 5114 #define MR_LP_ADV_HALF_DUPLEX 0x00000080 5115 #define MR_LP_ADV_SYM_PAUSE 0x00000100 5116 #define MR_LP_ADV_ASYM_PAUSE 0x00000200 5117 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400 5118 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800 5119 #define MR_LP_ADV_NEXT_PAGE 0x00001000 5120 #define MR_TOGGLE_RX 0x00002000 5121 #define MR_NP_RX 0x00004000 5122 5123 #define MR_LINK_OK 0x80000000 5124 5125 unsigned long link_time, cur_time; 5126 5127 u32 ability_match_cfg; 5128 int ability_match_count; 5129 5130 char ability_match, idle_match, ack_match; 5131 5132 u32 txconfig, rxconfig; 5133 #define ANEG_CFG_NP 0x00000080 5134 #define ANEG_CFG_ACK 0x00000040 5135 #define ANEG_CFG_RF2 0x00000020 5136 #define ANEG_CFG_RF1 0x00000010 5137 #define ANEG_CFG_PS2 0x00000001 5138 #define ANEG_CFG_PS1 0x00008000 5139 #define ANEG_CFG_HD 0x00004000 5140 #define ANEG_CFG_FD 0x00002000 5141 #define ANEG_CFG_INVAL 0x00001f06 5142 5143 }; 5144 #define ANEG_OK 0 5145 #define ANEG_DONE 1 5146 #define ANEG_TIMER_ENAB 2 5147 #define ANEG_FAILED -1 5148 5149 #define ANEG_STATE_SETTLE_TIME 10000 5150 5151 static int tg3_fiber_aneg_smachine(struct tg3 *tp, 5152 struct tg3_fiber_aneginfo *ap) 5153 { 5154 u16 flowctrl; 5155 unsigned long delta; 5156 u32 rx_cfg_reg; 5157 int ret; 5158 5159 if (ap->state == ANEG_STATE_UNKNOWN) { 5160 ap->rxconfig = 0; 5161 ap->link_time = 0; 5162 ap->cur_time = 0; 5163 ap->ability_match_cfg = 0; 5164 ap->ability_match_count = 0; 5165 ap->ability_match = 0; 5166 ap->idle_match = 0; 5167 ap->ack_match = 0; 5168 } 5169 ap->cur_time++; 5170 5171 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { 5172 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); 5173 5174 if (rx_cfg_reg != ap->ability_match_cfg) { 5175 ap->ability_match_cfg = rx_cfg_reg; 5176 ap->ability_match = 0; 5177 ap->ability_match_count = 0; 5178 } else { 5179 if (++ap->ability_match_count > 1) { 5180 ap->ability_match = 1; 5181 ap->ability_match_cfg = rx_cfg_reg; 5182 } 5183 } 5184 if (rx_cfg_reg & ANEG_CFG_ACK) 5185 ap->ack_match = 1; 5186 else 5187 ap->ack_match = 0; 5188 5189 ap->idle_match = 0; 5190 } else { 5191 ap->idle_match = 1; 5192 ap->ability_match_cfg = 0; 5193 ap->ability_match_count = 0; 5194 ap->ability_match = 0; 5195 ap->ack_match = 0; 5196 5197 rx_cfg_reg = 0; 5198 } 5199 5200 ap->rxconfig = rx_cfg_reg; 5201 ret = ANEG_OK; 5202 5203 switch (ap->state) { 5204 case ANEG_STATE_UNKNOWN: 5205 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) 5206 ap->state = ANEG_STATE_AN_ENABLE; 5207 5208 fallthrough; 5209 case ANEG_STATE_AN_ENABLE: 5210 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); 5211 if (ap->flags & MR_AN_ENABLE) { 5212 ap->link_time = 0; 5213 ap->cur_time = 0; 5214 ap->ability_match_cfg = 0; 5215 ap->ability_match_count = 0; 5216 ap->ability_match = 0; 5217 ap->idle_match = 0; 5218 ap->ack_match = 0; 5219 5220 ap->state = ANEG_STATE_RESTART_INIT; 5221 } else { 5222 ap->state = ANEG_STATE_DISABLE_LINK_OK; 5223 } 5224 break; 5225 5226 case ANEG_STATE_RESTART_INIT: 5227 ap->link_time = ap->cur_time; 5228 ap->flags &= ~(MR_NP_LOADED); 5229 ap->txconfig = 0; 5230 tw32(MAC_TX_AUTO_NEG, 0); 5231 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5232 tw32_f(MAC_MODE, tp->mac_mode); 5233 udelay(40); 5234 5235 ret = ANEG_TIMER_ENAB; 5236 ap->state = ANEG_STATE_RESTART; 5237 5238 fallthrough; 5239 case ANEG_STATE_RESTART: 5240 delta = ap->cur_time - ap->link_time; 5241 if (delta > ANEG_STATE_SETTLE_TIME) 5242 ap->state = ANEG_STATE_ABILITY_DETECT_INIT; 5243 else 5244 ret = ANEG_TIMER_ENAB; 5245 break; 5246 5247 case ANEG_STATE_DISABLE_LINK_OK: 5248 ret = ANEG_DONE; 5249 break; 5250 5251 case ANEG_STATE_ABILITY_DETECT_INIT: 5252 ap->flags &= ~(MR_TOGGLE_TX); 5253 ap->txconfig = ANEG_CFG_FD; 5254 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5255 if (flowctrl & ADVERTISE_1000XPAUSE) 5256 ap->txconfig |= ANEG_CFG_PS1; 5257 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5258 ap->txconfig |= ANEG_CFG_PS2; 5259 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5260 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5261 tw32_f(MAC_MODE, tp->mac_mode); 5262 udelay(40); 5263 5264 ap->state = ANEG_STATE_ABILITY_DETECT; 5265 break; 5266 5267 case ANEG_STATE_ABILITY_DETECT: 5268 if (ap->ability_match != 0 && ap->rxconfig != 0) 5269 ap->state = ANEG_STATE_ACK_DETECT_INIT; 5270 break; 5271 5272 case ANEG_STATE_ACK_DETECT_INIT: 5273 ap->txconfig |= ANEG_CFG_ACK; 5274 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5275 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5276 tw32_f(MAC_MODE, tp->mac_mode); 5277 udelay(40); 5278 5279 ap->state = ANEG_STATE_ACK_DETECT; 5280 5281 fallthrough; 5282 case ANEG_STATE_ACK_DETECT: 5283 if (ap->ack_match != 0) { 5284 if ((ap->rxconfig & ~ANEG_CFG_ACK) == 5285 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { 5286 ap->state = ANEG_STATE_COMPLETE_ACK_INIT; 5287 } else { 5288 ap->state = ANEG_STATE_AN_ENABLE; 5289 } 5290 } else if (ap->ability_match != 0 && 5291 ap->rxconfig == 0) { 5292 ap->state = ANEG_STATE_AN_ENABLE; 5293 } 5294 break; 5295 5296 case ANEG_STATE_COMPLETE_ACK_INIT: 5297 if (ap->rxconfig & ANEG_CFG_INVAL) { 5298 ret = ANEG_FAILED; 5299 break; 5300 } 5301 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | 5302 MR_LP_ADV_HALF_DUPLEX | 5303 MR_LP_ADV_SYM_PAUSE | 5304 MR_LP_ADV_ASYM_PAUSE | 5305 MR_LP_ADV_REMOTE_FAULT1 | 5306 MR_LP_ADV_REMOTE_FAULT2 | 5307 MR_LP_ADV_NEXT_PAGE | 5308 MR_TOGGLE_RX | 5309 MR_NP_RX); 5310 if (ap->rxconfig & ANEG_CFG_FD) 5311 ap->flags |= MR_LP_ADV_FULL_DUPLEX; 5312 if (ap->rxconfig & ANEG_CFG_HD) 5313 ap->flags |= MR_LP_ADV_HALF_DUPLEX; 5314 if (ap->rxconfig & ANEG_CFG_PS1) 5315 ap->flags |= MR_LP_ADV_SYM_PAUSE; 5316 if (ap->rxconfig & ANEG_CFG_PS2) 5317 ap->flags |= MR_LP_ADV_ASYM_PAUSE; 5318 if (ap->rxconfig & ANEG_CFG_RF1) 5319 ap->flags |= MR_LP_ADV_REMOTE_FAULT1; 5320 if (ap->rxconfig & ANEG_CFG_RF2) 5321 ap->flags |= MR_LP_ADV_REMOTE_FAULT2; 5322 if (ap->rxconfig & ANEG_CFG_NP) 5323 ap->flags |= MR_LP_ADV_NEXT_PAGE; 5324 5325 ap->link_time = ap->cur_time; 5326 5327 ap->flags ^= (MR_TOGGLE_TX); 5328 if (ap->rxconfig & 0x0008) 5329 ap->flags |= MR_TOGGLE_RX; 5330 if (ap->rxconfig & ANEG_CFG_NP) 5331 ap->flags |= MR_NP_RX; 5332 ap->flags |= MR_PAGE_RX; 5333 5334 ap->state = ANEG_STATE_COMPLETE_ACK; 5335 ret = ANEG_TIMER_ENAB; 5336 break; 5337 5338 case ANEG_STATE_COMPLETE_ACK: 5339 if (ap->ability_match != 0 && 5340 ap->rxconfig == 0) { 5341 ap->state = ANEG_STATE_AN_ENABLE; 5342 break; 5343 } 5344 delta = ap->cur_time - ap->link_time; 5345 if (delta > ANEG_STATE_SETTLE_TIME) { 5346 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { 5347 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5348 } else { 5349 if ((ap->txconfig & ANEG_CFG_NP) == 0 && 5350 !(ap->flags & MR_NP_RX)) { 5351 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5352 } else { 5353 ret = ANEG_FAILED; 5354 } 5355 } 5356 } 5357 break; 5358 5359 case ANEG_STATE_IDLE_DETECT_INIT: 5360 ap->link_time = ap->cur_time; 5361 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5362 tw32_f(MAC_MODE, tp->mac_mode); 5363 udelay(40); 5364 5365 ap->state = ANEG_STATE_IDLE_DETECT; 5366 ret = ANEG_TIMER_ENAB; 5367 break; 5368 5369 case ANEG_STATE_IDLE_DETECT: 5370 if (ap->ability_match != 0 && 5371 ap->rxconfig == 0) { 5372 ap->state = ANEG_STATE_AN_ENABLE; 5373 break; 5374 } 5375 delta = ap->cur_time - ap->link_time; 5376 if (delta > ANEG_STATE_SETTLE_TIME) { 5377 /* XXX another gem from the Broadcom driver :( */ 5378 ap->state = ANEG_STATE_LINK_OK; 5379 } 5380 break; 5381 5382 case ANEG_STATE_LINK_OK: 5383 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); 5384 ret = ANEG_DONE; 5385 break; 5386 5387 case ANEG_STATE_NEXT_PAGE_WAIT_INIT: 5388 /* ??? unimplemented */ 5389 break; 5390 5391 case ANEG_STATE_NEXT_PAGE_WAIT: 5392 /* ??? unimplemented */ 5393 break; 5394 5395 default: 5396 ret = ANEG_FAILED; 5397 break; 5398 } 5399 5400 return ret; 5401 } 5402 5403 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) 5404 { 5405 int res = 0; 5406 struct tg3_fiber_aneginfo aninfo; 5407 int status = ANEG_FAILED; 5408 unsigned int tick; 5409 u32 tmp; 5410 5411 tw32_f(MAC_TX_AUTO_NEG, 0); 5412 5413 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 5414 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); 5415 udelay(40); 5416 5417 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); 5418 udelay(40); 5419 5420 memset(&aninfo, 0, sizeof(aninfo)); 5421 aninfo.flags |= MR_AN_ENABLE; 5422 aninfo.state = ANEG_STATE_UNKNOWN; 5423 aninfo.cur_time = 0; 5424 tick = 0; 5425 while (++tick < 195000) { 5426 status = tg3_fiber_aneg_smachine(tp, &aninfo); 5427 if (status == ANEG_DONE || status == ANEG_FAILED) 5428 break; 5429 5430 udelay(1); 5431 } 5432 5433 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5434 tw32_f(MAC_MODE, tp->mac_mode); 5435 udelay(40); 5436 5437 *txflags = aninfo.txconfig; 5438 *rxflags = aninfo.flags; 5439 5440 if (status == ANEG_DONE && 5441 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | 5442 MR_LP_ADV_FULL_DUPLEX))) 5443 res = 1; 5444 5445 return res; 5446 } 5447 5448 static void tg3_init_bcm8002(struct tg3 *tp) 5449 { 5450 u32 mac_status = tr32(MAC_STATUS); 5451 int i; 5452 5453 /* Reset when initting first time or we have a link. */ 5454 if (tg3_flag(tp, INIT_COMPLETE) && 5455 !(mac_status & MAC_STATUS_PCS_SYNCED)) 5456 return; 5457 5458 /* Set PLL lock range. */ 5459 tg3_writephy(tp, 0x16, 0x8007); 5460 5461 /* SW reset */ 5462 tg3_writephy(tp, MII_BMCR, BMCR_RESET); 5463 5464 /* Wait for reset to complete. */ 5465 /* XXX schedule_timeout() ... */ 5466 for (i = 0; i < 500; i++) 5467 udelay(10); 5468 5469 /* Config mode; select PMA/Ch 1 regs. */ 5470 tg3_writephy(tp, 0x10, 0x8411); 5471 5472 /* Enable auto-lock and comdet, select txclk for tx. */ 5473 tg3_writephy(tp, 0x11, 0x0a10); 5474 5475 tg3_writephy(tp, 0x18, 0x00a0); 5476 tg3_writephy(tp, 0x16, 0x41ff); 5477 5478 /* Assert and deassert POR. */ 5479 tg3_writephy(tp, 0x13, 0x0400); 5480 udelay(40); 5481 tg3_writephy(tp, 0x13, 0x0000); 5482 5483 tg3_writephy(tp, 0x11, 0x0a50); 5484 udelay(40); 5485 tg3_writephy(tp, 0x11, 0x0a10); 5486 5487 /* Wait for signal to stabilize */ 5488 /* XXX schedule_timeout() ... */ 5489 for (i = 0; i < 15000; i++) 5490 udelay(10); 5491 5492 /* Deselect the channel register so we can read the PHYID 5493 * later. 5494 */ 5495 tg3_writephy(tp, 0x10, 0x8011); 5496 } 5497 5498 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) 5499 { 5500 u16 flowctrl; 5501 bool current_link_up; 5502 u32 sg_dig_ctrl, sg_dig_status; 5503 u32 serdes_cfg, expected_sg_dig_ctrl; 5504 int workaround, port_a; 5505 5506 serdes_cfg = 0; 5507 workaround = 0; 5508 port_a = 1; 5509 current_link_up = false; 5510 5511 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 && 5512 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) { 5513 workaround = 1; 5514 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 5515 port_a = 0; 5516 5517 /* preserve bits 0-11,13,14 for signal pre-emphasis */ 5518 /* preserve bits 20-23 for voltage regulator */ 5519 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; 5520 } 5521 5522 sg_dig_ctrl = tr32(SG_DIG_CTRL); 5523 5524 if (tp->link_config.autoneg != AUTONEG_ENABLE) { 5525 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { 5526 if (workaround) { 5527 u32 val = serdes_cfg; 5528 5529 if (port_a) 5530 val |= 0xc010000; 5531 else 5532 val |= 0x4010000; 5533 tw32_f(MAC_SERDES_CFG, val); 5534 } 5535 5536 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5537 } 5538 if (mac_status & MAC_STATUS_PCS_SYNCED) { 5539 tg3_setup_flow_control(tp, 0, 0); 5540 current_link_up = true; 5541 } 5542 goto out; 5543 } 5544 5545 /* Want auto-negotiation. */ 5546 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; 5547 5548 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5549 if (flowctrl & ADVERTISE_1000XPAUSE) 5550 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; 5551 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5552 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; 5553 5554 if (sg_dig_ctrl != expected_sg_dig_ctrl) { 5555 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && 5556 tp->serdes_counter && 5557 ((mac_status & (MAC_STATUS_PCS_SYNCED | 5558 MAC_STATUS_RCVD_CFG)) == 5559 MAC_STATUS_PCS_SYNCED)) { 5560 tp->serdes_counter--; 5561 current_link_up = true; 5562 goto out; 5563 } 5564 restart_autoneg: 5565 if (workaround) 5566 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); 5567 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); 5568 udelay(5); 5569 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); 5570 5571 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5572 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5573 } else if (mac_status & (MAC_STATUS_PCS_SYNCED | 5574 MAC_STATUS_SIGNAL_DET)) { 5575 sg_dig_status = tr32(SG_DIG_STATUS); 5576 mac_status = tr32(MAC_STATUS); 5577 5578 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && 5579 (mac_status & MAC_STATUS_PCS_SYNCED)) { 5580 u32 local_adv = 0, remote_adv = 0; 5581 5582 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) 5583 local_adv |= ADVERTISE_1000XPAUSE; 5584 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) 5585 local_adv |= ADVERTISE_1000XPSE_ASYM; 5586 5587 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) 5588 remote_adv |= LPA_1000XPAUSE; 5589 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) 5590 remote_adv |= LPA_1000XPAUSE_ASYM; 5591 5592 tp->link_config.rmt_adv = 5593 mii_adv_to_ethtool_adv_x(remote_adv); 5594 5595 tg3_setup_flow_control(tp, local_adv, remote_adv); 5596 current_link_up = true; 5597 tp->serdes_counter = 0; 5598 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5599 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { 5600 if (tp->serdes_counter) 5601 tp->serdes_counter--; 5602 else { 5603 if (workaround) { 5604 u32 val = serdes_cfg; 5605 5606 if (port_a) 5607 val |= 0xc010000; 5608 else 5609 val |= 0x4010000; 5610 5611 tw32_f(MAC_SERDES_CFG, val); 5612 } 5613 5614 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5615 udelay(40); 5616 5617 /* Link parallel detection - link is up */ 5618 /* only if we have PCS_SYNC and not */ 5619 /* receiving config code words */ 5620 mac_status = tr32(MAC_STATUS); 5621 if ((mac_status & MAC_STATUS_PCS_SYNCED) && 5622 !(mac_status & MAC_STATUS_RCVD_CFG)) { 5623 tg3_setup_flow_control(tp, 0, 0); 5624 current_link_up = true; 5625 tp->phy_flags |= 5626 TG3_PHYFLG_PARALLEL_DETECT; 5627 tp->serdes_counter = 5628 SERDES_PARALLEL_DET_TIMEOUT; 5629 } else 5630 goto restart_autoneg; 5631 } 5632 } 5633 } else { 5634 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5635 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5636 } 5637 5638 out: 5639 return current_link_up; 5640 } 5641 5642 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) 5643 { 5644 bool current_link_up = false; 5645 5646 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) 5647 goto out; 5648 5649 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5650 u32 txflags, rxflags; 5651 int i; 5652 5653 if (fiber_autoneg(tp, &txflags, &rxflags)) { 5654 u32 local_adv = 0, remote_adv = 0; 5655 5656 if (txflags & ANEG_CFG_PS1) 5657 local_adv |= ADVERTISE_1000XPAUSE; 5658 if (txflags & ANEG_CFG_PS2) 5659 local_adv |= ADVERTISE_1000XPSE_ASYM; 5660 5661 if (rxflags & MR_LP_ADV_SYM_PAUSE) 5662 remote_adv |= LPA_1000XPAUSE; 5663 if (rxflags & MR_LP_ADV_ASYM_PAUSE) 5664 remote_adv |= LPA_1000XPAUSE_ASYM; 5665 5666 tp->link_config.rmt_adv = 5667 mii_adv_to_ethtool_adv_x(remote_adv); 5668 5669 tg3_setup_flow_control(tp, local_adv, remote_adv); 5670 5671 current_link_up = true; 5672 } 5673 for (i = 0; i < 30; i++) { 5674 udelay(20); 5675 tw32_f(MAC_STATUS, 5676 (MAC_STATUS_SYNC_CHANGED | 5677 MAC_STATUS_CFG_CHANGED)); 5678 udelay(40); 5679 if ((tr32(MAC_STATUS) & 5680 (MAC_STATUS_SYNC_CHANGED | 5681 MAC_STATUS_CFG_CHANGED)) == 0) 5682 break; 5683 } 5684 5685 mac_status = tr32(MAC_STATUS); 5686 if (!current_link_up && 5687 (mac_status & MAC_STATUS_PCS_SYNCED) && 5688 !(mac_status & MAC_STATUS_RCVD_CFG)) 5689 current_link_up = true; 5690 } else { 5691 tg3_setup_flow_control(tp, 0, 0); 5692 5693 /* Forcing 1000FD link up. */ 5694 current_link_up = true; 5695 5696 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); 5697 udelay(40); 5698 5699 tw32_f(MAC_MODE, tp->mac_mode); 5700 udelay(40); 5701 } 5702 5703 out: 5704 return current_link_up; 5705 } 5706 5707 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) 5708 { 5709 u32 orig_pause_cfg; 5710 u32 orig_active_speed; 5711 u8 orig_active_duplex; 5712 u32 mac_status; 5713 bool current_link_up; 5714 int i; 5715 5716 orig_pause_cfg = tp->link_config.active_flowctrl; 5717 orig_active_speed = tp->link_config.active_speed; 5718 orig_active_duplex = tp->link_config.active_duplex; 5719 5720 if (!tg3_flag(tp, HW_AUTONEG) && 5721 tp->link_up && 5722 tg3_flag(tp, INIT_COMPLETE)) { 5723 mac_status = tr32(MAC_STATUS); 5724 mac_status &= (MAC_STATUS_PCS_SYNCED | 5725 MAC_STATUS_SIGNAL_DET | 5726 MAC_STATUS_CFG_CHANGED | 5727 MAC_STATUS_RCVD_CFG); 5728 if (mac_status == (MAC_STATUS_PCS_SYNCED | 5729 MAC_STATUS_SIGNAL_DET)) { 5730 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5731 MAC_STATUS_CFG_CHANGED)); 5732 return 0; 5733 } 5734 } 5735 5736 tw32_f(MAC_TX_AUTO_NEG, 0); 5737 5738 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 5739 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; 5740 tw32_f(MAC_MODE, tp->mac_mode); 5741 udelay(40); 5742 5743 if (tp->phy_id == TG3_PHY_ID_BCM8002) 5744 tg3_init_bcm8002(tp); 5745 5746 /* Enable link change event even when serdes polling. */ 5747 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5748 udelay(40); 5749 5750 tp->link_config.rmt_adv = 0; 5751 mac_status = tr32(MAC_STATUS); 5752 5753 if (tg3_flag(tp, HW_AUTONEG)) 5754 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); 5755 else 5756 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); 5757 5758 tp->napi[0].hw_status->status = 5759 (SD_STATUS_UPDATED | 5760 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); 5761 5762 for (i = 0; i < 100; i++) { 5763 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5764 MAC_STATUS_CFG_CHANGED)); 5765 udelay(5); 5766 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | 5767 MAC_STATUS_CFG_CHANGED | 5768 MAC_STATUS_LNKSTATE_CHANGED)) == 0) 5769 break; 5770 } 5771 5772 mac_status = tr32(MAC_STATUS); 5773 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { 5774 current_link_up = false; 5775 if (tp->link_config.autoneg == AUTONEG_ENABLE && 5776 tp->serdes_counter == 0) { 5777 tw32_f(MAC_MODE, (tp->mac_mode | 5778 MAC_MODE_SEND_CONFIGS)); 5779 udelay(1); 5780 tw32_f(MAC_MODE, tp->mac_mode); 5781 } 5782 } 5783 5784 if (current_link_up) { 5785 tp->link_config.active_speed = SPEED_1000; 5786 tp->link_config.active_duplex = DUPLEX_FULL; 5787 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5788 LED_CTRL_LNKLED_OVERRIDE | 5789 LED_CTRL_1000MBPS_ON)); 5790 } else { 5791 tp->link_config.active_speed = SPEED_UNKNOWN; 5792 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 5793 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5794 LED_CTRL_LNKLED_OVERRIDE | 5795 LED_CTRL_TRAFFIC_OVERRIDE)); 5796 } 5797 5798 if (!tg3_test_and_report_link_chg(tp, current_link_up)) { 5799 u32 now_pause_cfg = tp->link_config.active_flowctrl; 5800 if (orig_pause_cfg != now_pause_cfg || 5801 orig_active_speed != tp->link_config.active_speed || 5802 orig_active_duplex != tp->link_config.active_duplex) 5803 tg3_link_report(tp); 5804 } 5805 5806 return 0; 5807 } 5808 5809 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) 5810 { 5811 int err = 0; 5812 u32 bmsr, bmcr; 5813 u32 current_speed = SPEED_UNKNOWN; 5814 u8 current_duplex = DUPLEX_UNKNOWN; 5815 bool current_link_up = false; 5816 u32 local_adv, remote_adv, sgsr; 5817 5818 if ((tg3_asic_rev(tp) == ASIC_REV_5719 || 5819 tg3_asic_rev(tp) == ASIC_REV_5720) && 5820 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) && 5821 (sgsr & SERDES_TG3_SGMII_MODE)) { 5822 5823 if (force_reset) 5824 tg3_phy_reset(tp); 5825 5826 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 5827 5828 if (!(sgsr & SERDES_TG3_LINK_UP)) { 5829 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5830 } else { 5831 current_link_up = true; 5832 if (sgsr & SERDES_TG3_SPEED_1000) { 5833 current_speed = SPEED_1000; 5834 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5835 } else if (sgsr & SERDES_TG3_SPEED_100) { 5836 current_speed = SPEED_100; 5837 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5838 } else { 5839 current_speed = SPEED_10; 5840 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5841 } 5842 5843 if (sgsr & SERDES_TG3_FULL_DUPLEX) 5844 current_duplex = DUPLEX_FULL; 5845 else 5846 current_duplex = DUPLEX_HALF; 5847 } 5848 5849 tw32_f(MAC_MODE, tp->mac_mode); 5850 udelay(40); 5851 5852 tg3_clear_mac_status(tp); 5853 5854 goto fiber_setup_done; 5855 } 5856 5857 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5858 tw32_f(MAC_MODE, tp->mac_mode); 5859 udelay(40); 5860 5861 tg3_clear_mac_status(tp); 5862 5863 if (force_reset) 5864 tg3_phy_reset(tp); 5865 5866 tp->link_config.rmt_adv = 0; 5867 5868 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5869 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5870 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5871 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5872 bmsr |= BMSR_LSTATUS; 5873 else 5874 bmsr &= ~BMSR_LSTATUS; 5875 } 5876 5877 err |= tg3_readphy(tp, MII_BMCR, &bmcr); 5878 5879 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && 5880 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 5881 /* do nothing, just check for link up at the end */ 5882 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5883 u32 adv, newadv; 5884 5885 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5886 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | 5887 ADVERTISE_1000XPAUSE | 5888 ADVERTISE_1000XPSE_ASYM | 5889 ADVERTISE_SLCT); 5890 5891 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5892 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); 5893 5894 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { 5895 tg3_writephy(tp, MII_ADVERTISE, newadv); 5896 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; 5897 tg3_writephy(tp, MII_BMCR, bmcr); 5898 5899 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5900 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; 5901 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5902 5903 return err; 5904 } 5905 } else { 5906 u32 new_bmcr; 5907 5908 bmcr &= ~BMCR_SPEED1000; 5909 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); 5910 5911 if (tp->link_config.duplex == DUPLEX_FULL) 5912 new_bmcr |= BMCR_FULLDPLX; 5913 5914 if (new_bmcr != bmcr) { 5915 /* BMCR_SPEED1000 is a reserved bit that needs 5916 * to be set on write. 5917 */ 5918 new_bmcr |= BMCR_SPEED1000; 5919 5920 /* Force a linkdown */ 5921 if (tp->link_up) { 5922 u32 adv; 5923 5924 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5925 adv &= ~(ADVERTISE_1000XFULL | 5926 ADVERTISE_1000XHALF | 5927 ADVERTISE_SLCT); 5928 tg3_writephy(tp, MII_ADVERTISE, adv); 5929 tg3_writephy(tp, MII_BMCR, bmcr | 5930 BMCR_ANRESTART | 5931 BMCR_ANENABLE); 5932 udelay(10); 5933 tg3_carrier_off(tp); 5934 } 5935 tg3_writephy(tp, MII_BMCR, new_bmcr); 5936 bmcr = new_bmcr; 5937 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5938 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5939 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5940 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5941 bmsr |= BMSR_LSTATUS; 5942 else 5943 bmsr &= ~BMSR_LSTATUS; 5944 } 5945 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5946 } 5947 } 5948 5949 if (bmsr & BMSR_LSTATUS) { 5950 current_speed = SPEED_1000; 5951 current_link_up = true; 5952 if (bmcr & BMCR_FULLDPLX) 5953 current_duplex = DUPLEX_FULL; 5954 else 5955 current_duplex = DUPLEX_HALF; 5956 5957 local_adv = 0; 5958 remote_adv = 0; 5959 5960 if (bmcr & BMCR_ANENABLE) { 5961 u32 common; 5962 5963 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); 5964 err |= tg3_readphy(tp, MII_LPA, &remote_adv); 5965 common = local_adv & remote_adv; 5966 if (common & (ADVERTISE_1000XHALF | 5967 ADVERTISE_1000XFULL)) { 5968 if (common & ADVERTISE_1000XFULL) 5969 current_duplex = DUPLEX_FULL; 5970 else 5971 current_duplex = DUPLEX_HALF; 5972 5973 tp->link_config.rmt_adv = 5974 mii_adv_to_ethtool_adv_x(remote_adv); 5975 } else if (!tg3_flag(tp, 5780_CLASS)) { 5976 /* Link is up via parallel detect */ 5977 } else { 5978 current_link_up = false; 5979 } 5980 } 5981 } 5982 5983 fiber_setup_done: 5984 if (current_link_up && current_duplex == DUPLEX_FULL) 5985 tg3_setup_flow_control(tp, local_adv, remote_adv); 5986 5987 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5988 if (tp->link_config.active_duplex == DUPLEX_HALF) 5989 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5990 5991 tw32_f(MAC_MODE, tp->mac_mode); 5992 udelay(40); 5993 5994 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5995 5996 tp->link_config.active_speed = current_speed; 5997 tp->link_config.active_duplex = current_duplex; 5998 5999 tg3_test_and_report_link_chg(tp, current_link_up); 6000 return err; 6001 } 6002 6003 static void tg3_serdes_parallel_detect(struct tg3 *tp) 6004 { 6005 if (tp->serdes_counter) { 6006 /* Give autoneg time to complete. */ 6007 tp->serdes_counter--; 6008 return; 6009 } 6010 6011 if (!tp->link_up && 6012 (tp->link_config.autoneg == AUTONEG_ENABLE)) { 6013 u32 bmcr; 6014 6015 tg3_readphy(tp, MII_BMCR, &bmcr); 6016 if (bmcr & BMCR_ANENABLE) { 6017 u32 phy1, phy2; 6018 6019 /* Select shadow register 0x1f */ 6020 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); 6021 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); 6022 6023 /* Select expansion interrupt status register */ 6024 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6025 MII_TG3_DSP_EXP1_INT_STAT); 6026 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6027 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6028 6029 if ((phy1 & 0x10) && !(phy2 & 0x20)) { 6030 /* We have signal detect and not receiving 6031 * config code words, link is up by parallel 6032 * detection. 6033 */ 6034 6035 bmcr &= ~BMCR_ANENABLE; 6036 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; 6037 tg3_writephy(tp, MII_BMCR, bmcr); 6038 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; 6039 } 6040 } 6041 } else if (tp->link_up && 6042 (tp->link_config.autoneg == AUTONEG_ENABLE) && 6043 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 6044 u32 phy2; 6045 6046 /* Select expansion interrupt status register */ 6047 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6048 MII_TG3_DSP_EXP1_INT_STAT); 6049 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6050 if (phy2 & 0x20) { 6051 u32 bmcr; 6052 6053 /* Config code words received, turn on autoneg. */ 6054 tg3_readphy(tp, MII_BMCR, &bmcr); 6055 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); 6056 6057 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 6058 6059 } 6060 } 6061 } 6062 6063 static int tg3_setup_phy(struct tg3 *tp, bool force_reset) 6064 { 6065 u32 val; 6066 int err; 6067 6068 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 6069 err = tg3_setup_fiber_phy(tp, force_reset); 6070 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 6071 err = tg3_setup_fiber_mii_phy(tp, force_reset); 6072 else 6073 err = tg3_setup_copper_phy(tp, force_reset); 6074 6075 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 6076 u32 scale; 6077 6078 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; 6079 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) 6080 scale = 65; 6081 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) 6082 scale = 6; 6083 else 6084 scale = 12; 6085 6086 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; 6087 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); 6088 tw32(GRC_MISC_CFG, val); 6089 } 6090 6091 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 6092 (6 << TX_LENGTHS_IPG_SHIFT); 6093 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 6094 tg3_asic_rev(tp) == ASIC_REV_5762) 6095 val |= tr32(MAC_TX_LENGTHS) & 6096 (TX_LENGTHS_JMB_FRM_LEN_MSK | 6097 TX_LENGTHS_CNT_DWN_VAL_MSK); 6098 6099 if (tp->link_config.active_speed == SPEED_1000 && 6100 tp->link_config.active_duplex == DUPLEX_HALF) 6101 tw32(MAC_TX_LENGTHS, val | 6102 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); 6103 else 6104 tw32(MAC_TX_LENGTHS, val | 6105 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); 6106 6107 if (!tg3_flag(tp, 5705_PLUS)) { 6108 if (tp->link_up) { 6109 tw32(HOSTCC_STAT_COAL_TICKS, 6110 tp->coal.stats_block_coalesce_usecs); 6111 } else { 6112 tw32(HOSTCC_STAT_COAL_TICKS, 0); 6113 } 6114 } 6115 6116 if (tg3_flag(tp, ASPM_WORKAROUND)) { 6117 val = tr32(PCIE_PWR_MGMT_THRESH); 6118 if (!tp->link_up) 6119 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | 6120 tp->pwrmgmt_thresh; 6121 else 6122 val |= PCIE_PWR_MGMT_L1_THRESH_MSK; 6123 tw32(PCIE_PWR_MGMT_THRESH, val); 6124 } 6125 6126 return err; 6127 } 6128 6129 /* tp->lock must be held */ 6130 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts) 6131 { 6132 u64 stamp; 6133 6134 ptp_read_system_prets(sts); 6135 stamp = tr32(TG3_EAV_REF_CLCK_LSB); 6136 ptp_read_system_postts(sts); 6137 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32; 6138 6139 return stamp; 6140 } 6141 6142 /* tp->lock must be held */ 6143 static void tg3_refclk_write(struct tg3 *tp, u64 newval) 6144 { 6145 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6146 6147 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP); 6148 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff); 6149 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32); 6150 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME); 6151 } 6152 6153 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); 6154 static inline void tg3_full_unlock(struct tg3 *tp); 6155 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) 6156 { 6157 struct tg3 *tp = netdev_priv(dev); 6158 6159 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 6160 SOF_TIMESTAMPING_RX_SOFTWARE | 6161 SOF_TIMESTAMPING_SOFTWARE; 6162 6163 if (tg3_flag(tp, PTP_CAPABLE)) { 6164 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 6165 SOF_TIMESTAMPING_RX_HARDWARE | 6166 SOF_TIMESTAMPING_RAW_HARDWARE; 6167 } 6168 6169 if (tp->ptp_clock) 6170 info->phc_index = ptp_clock_index(tp->ptp_clock); 6171 else 6172 info->phc_index = -1; 6173 6174 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 6175 6176 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 6177 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 6178 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 6179 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 6180 return 0; 6181 } 6182 6183 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 6184 { 6185 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6186 u64 correction; 6187 bool neg_adj; 6188 6189 /* Frequency adjustment is performed using hardware with a 24 bit 6190 * accumulator and a programmable correction value. On each clk, the 6191 * correction value gets added to the accumulator and when it 6192 * overflows, the time counter is incremented/decremented. 6193 */ 6194 neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction); 6195 6196 tg3_full_lock(tp, 0); 6197 6198 if (correction) 6199 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 6200 TG3_EAV_REF_CLK_CORRECT_EN | 6201 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | 6202 ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK)); 6203 else 6204 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0); 6205 6206 tg3_full_unlock(tp); 6207 6208 return 0; 6209 } 6210 6211 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 6212 { 6213 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6214 6215 tg3_full_lock(tp, 0); 6216 tp->ptp_adjust += delta; 6217 tg3_full_unlock(tp); 6218 6219 return 0; 6220 } 6221 6222 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, 6223 struct ptp_system_timestamp *sts) 6224 { 6225 u64 ns; 6226 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6227 6228 tg3_full_lock(tp, 0); 6229 ns = tg3_refclk_read(tp, sts); 6230 ns += tp->ptp_adjust; 6231 tg3_full_unlock(tp); 6232 6233 *ts = ns_to_timespec64(ns); 6234 6235 return 0; 6236 } 6237 6238 static int tg3_ptp_settime(struct ptp_clock_info *ptp, 6239 const struct timespec64 *ts) 6240 { 6241 u64 ns; 6242 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6243 6244 ns = timespec64_to_ns(ts); 6245 6246 tg3_full_lock(tp, 0); 6247 tg3_refclk_write(tp, ns); 6248 tp->ptp_adjust = 0; 6249 tg3_full_unlock(tp); 6250 6251 return 0; 6252 } 6253 6254 static int tg3_ptp_enable(struct ptp_clock_info *ptp, 6255 struct ptp_clock_request *rq, int on) 6256 { 6257 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6258 u32 clock_ctl; 6259 int rval = 0; 6260 6261 switch (rq->type) { 6262 case PTP_CLK_REQ_PEROUT: 6263 /* Reject requests with unsupported flags */ 6264 if (rq->perout.flags) 6265 return -EOPNOTSUPP; 6266 6267 if (rq->perout.index != 0) 6268 return -EINVAL; 6269 6270 tg3_full_lock(tp, 0); 6271 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6272 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK; 6273 6274 if (on) { 6275 u64 nsec; 6276 6277 nsec = rq->perout.start.sec * 1000000000ULL + 6278 rq->perout.start.nsec; 6279 6280 if (rq->perout.period.sec || rq->perout.period.nsec) { 6281 netdev_warn(tp->dev, 6282 "Device supports only a one-shot timesync output, period must be 0\n"); 6283 rval = -EINVAL; 6284 goto err_out; 6285 } 6286 6287 if (nsec & (1ULL << 63)) { 6288 netdev_warn(tp->dev, 6289 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n"); 6290 rval = -EINVAL; 6291 goto err_out; 6292 } 6293 6294 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff)); 6295 tw32(TG3_EAV_WATCHDOG0_MSB, 6296 TG3_EAV_WATCHDOG0_EN | 6297 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK)); 6298 6299 tw32(TG3_EAV_REF_CLCK_CTL, 6300 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0); 6301 } else { 6302 tw32(TG3_EAV_WATCHDOG0_MSB, 0); 6303 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl); 6304 } 6305 6306 err_out: 6307 tg3_full_unlock(tp); 6308 return rval; 6309 6310 default: 6311 break; 6312 } 6313 6314 return -EOPNOTSUPP; 6315 } 6316 6317 static const struct ptp_clock_info tg3_ptp_caps = { 6318 .owner = THIS_MODULE, 6319 .name = "tg3 clock", 6320 .max_adj = 250000000, 6321 .n_alarm = 0, 6322 .n_ext_ts = 0, 6323 .n_per_out = 1, 6324 .n_pins = 0, 6325 .pps = 0, 6326 .adjfine = tg3_ptp_adjfine, 6327 .adjtime = tg3_ptp_adjtime, 6328 .gettimex64 = tg3_ptp_gettimex, 6329 .settime64 = tg3_ptp_settime, 6330 .enable = tg3_ptp_enable, 6331 }; 6332 6333 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, 6334 struct skb_shared_hwtstamps *timestamp) 6335 { 6336 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); 6337 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + 6338 tp->ptp_adjust); 6339 } 6340 6341 /* tp->lock must be held */ 6342 static void tg3_ptp_init(struct tg3 *tp) 6343 { 6344 if (!tg3_flag(tp, PTP_CAPABLE)) 6345 return; 6346 6347 /* Initialize the hardware clock to the system time. */ 6348 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real())); 6349 tp->ptp_adjust = 0; 6350 tp->ptp_info = tg3_ptp_caps; 6351 } 6352 6353 /* tp->lock must be held */ 6354 static void tg3_ptp_resume(struct tg3 *tp) 6355 { 6356 if (!tg3_flag(tp, PTP_CAPABLE)) 6357 return; 6358 6359 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust); 6360 tp->ptp_adjust = 0; 6361 } 6362 6363 static void tg3_ptp_fini(struct tg3 *tp) 6364 { 6365 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock) 6366 return; 6367 6368 ptp_clock_unregister(tp->ptp_clock); 6369 tp->ptp_clock = NULL; 6370 tp->ptp_adjust = 0; 6371 } 6372 6373 static inline int tg3_irq_sync(struct tg3 *tp) 6374 { 6375 return tp->irq_sync; 6376 } 6377 6378 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) 6379 { 6380 int i; 6381 6382 dst = (u32 *)((u8 *)dst + off); 6383 for (i = 0; i < len; i += sizeof(u32)) 6384 *dst++ = tr32(off + i); 6385 } 6386 6387 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) 6388 { 6389 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); 6390 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); 6391 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); 6392 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); 6393 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); 6394 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); 6395 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); 6396 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); 6397 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); 6398 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); 6399 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); 6400 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); 6401 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); 6402 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); 6403 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); 6404 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); 6405 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); 6406 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); 6407 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); 6408 6409 if (tg3_flag(tp, SUPPORT_MSIX)) 6410 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); 6411 6412 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); 6413 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); 6414 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); 6415 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); 6416 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); 6417 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); 6418 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); 6419 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); 6420 6421 if (!tg3_flag(tp, 5705_PLUS)) { 6422 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); 6423 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); 6424 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); 6425 } 6426 6427 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); 6428 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); 6429 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); 6430 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); 6431 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); 6432 6433 if (tg3_flag(tp, NVRAM)) 6434 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); 6435 } 6436 6437 static void tg3_dump_state(struct tg3 *tp) 6438 { 6439 int i; 6440 u32 *regs; 6441 6442 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); 6443 if (!regs) 6444 return; 6445 6446 if (tg3_flag(tp, PCI_EXPRESS)) { 6447 /* Read up to but not including private PCI registers */ 6448 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) 6449 regs[i / sizeof(u32)] = tr32(i); 6450 } else 6451 tg3_dump_legacy_regs(tp, regs); 6452 6453 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { 6454 if (!regs[i + 0] && !regs[i + 1] && 6455 !regs[i + 2] && !regs[i + 3]) 6456 continue; 6457 6458 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", 6459 i * 4, 6460 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); 6461 } 6462 6463 kfree(regs); 6464 6465 for (i = 0; i < tp->irq_cnt; i++) { 6466 struct tg3_napi *tnapi = &tp->napi[i]; 6467 6468 /* SW status block */ 6469 netdev_err(tp->dev, 6470 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", 6471 i, 6472 tnapi->hw_status->status, 6473 tnapi->hw_status->status_tag, 6474 tnapi->hw_status->rx_jumbo_consumer, 6475 tnapi->hw_status->rx_consumer, 6476 tnapi->hw_status->rx_mini_consumer, 6477 tnapi->hw_status->idx[0].rx_producer, 6478 tnapi->hw_status->idx[0].tx_consumer); 6479 6480 netdev_err(tp->dev, 6481 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", 6482 i, 6483 tnapi->last_tag, tnapi->last_irq_tag, 6484 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, 6485 tnapi->rx_rcb_ptr, 6486 tnapi->prodring.rx_std_prod_idx, 6487 tnapi->prodring.rx_std_cons_idx, 6488 tnapi->prodring.rx_jmb_prod_idx, 6489 tnapi->prodring.rx_jmb_cons_idx); 6490 } 6491 } 6492 6493 /* This is called whenever we suspect that the system chipset is re- 6494 * ordering the sequence of MMIO to the tx send mailbox. The symptom 6495 * is bogus tx completions. We try to recover by setting the 6496 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later 6497 * in the workqueue. 6498 */ 6499 static void tg3_tx_recover(struct tg3 *tp) 6500 { 6501 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || 6502 tp->write32_tx_mbox == tg3_write_indirect_mbox); 6503 6504 netdev_warn(tp->dev, 6505 "The system may be re-ordering memory-mapped I/O " 6506 "cycles to the network device, attempting to recover. " 6507 "Please report the problem to the driver maintainer " 6508 "and include system chipset information.\n"); 6509 6510 tg3_flag_set(tp, TX_RECOVERY_PENDING); 6511 } 6512 6513 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) 6514 { 6515 /* Tell compiler to fetch tx indices from memory. */ 6516 barrier(); 6517 return tnapi->tx_pending - 6518 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); 6519 } 6520 6521 /* Tigon3 never reports partial packet sends. So we do not 6522 * need special logic to handle SKBs that have not had all 6523 * of their frags sent yet, like SunGEM does. 6524 */ 6525 static void tg3_tx(struct tg3_napi *tnapi) 6526 { 6527 struct tg3 *tp = tnapi->tp; 6528 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; 6529 u32 sw_idx = tnapi->tx_cons; 6530 struct netdev_queue *txq; 6531 int index = tnapi - tp->napi; 6532 unsigned int pkts_compl = 0, bytes_compl = 0; 6533 6534 if (tg3_flag(tp, ENABLE_TSS)) 6535 index--; 6536 6537 txq = netdev_get_tx_queue(tp->dev, index); 6538 6539 while (sw_idx != hw_idx) { 6540 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; 6541 struct sk_buff *skb = ri->skb; 6542 int i, tx_bug = 0; 6543 6544 if (unlikely(skb == NULL)) { 6545 tg3_tx_recover(tp); 6546 return; 6547 } 6548 6549 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) { 6550 struct skb_shared_hwtstamps timestamp; 6551 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB); 6552 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; 6553 6554 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); 6555 6556 skb_tstamp_tx(skb, ×tamp); 6557 } 6558 6559 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), 6560 skb_headlen(skb), DMA_TO_DEVICE); 6561 6562 ri->skb = NULL; 6563 6564 while (ri->fragmented) { 6565 ri->fragmented = false; 6566 sw_idx = NEXT_TX(sw_idx); 6567 ri = &tnapi->tx_buffers[sw_idx]; 6568 } 6569 6570 sw_idx = NEXT_TX(sw_idx); 6571 6572 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 6573 ri = &tnapi->tx_buffers[sw_idx]; 6574 if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) 6575 tx_bug = 1; 6576 6577 dma_unmap_page(&tp->pdev->dev, 6578 dma_unmap_addr(ri, mapping), 6579 skb_frag_size(&skb_shinfo(skb)->frags[i]), 6580 DMA_TO_DEVICE); 6581 6582 while (ri->fragmented) { 6583 ri->fragmented = false; 6584 sw_idx = NEXT_TX(sw_idx); 6585 ri = &tnapi->tx_buffers[sw_idx]; 6586 } 6587 6588 sw_idx = NEXT_TX(sw_idx); 6589 } 6590 6591 pkts_compl++; 6592 bytes_compl += skb->len; 6593 6594 dev_consume_skb_any(skb); 6595 6596 if (unlikely(tx_bug)) { 6597 tg3_tx_recover(tp); 6598 return; 6599 } 6600 } 6601 6602 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); 6603 6604 tnapi->tx_cons = sw_idx; 6605 6606 /* Need to make the tx_cons update visible to tg3_start_xmit() 6607 * before checking for netif_queue_stopped(). Without the 6608 * memory barrier, there is a small possibility that tg3_start_xmit() 6609 * will miss it and cause the queue to be stopped forever. 6610 */ 6611 smp_mb(); 6612 6613 if (unlikely(netif_tx_queue_stopped(txq) && 6614 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { 6615 __netif_tx_lock(txq, smp_processor_id()); 6616 if (netif_tx_queue_stopped(txq) && 6617 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) 6618 netif_tx_wake_queue(txq); 6619 __netif_tx_unlock(txq); 6620 } 6621 } 6622 6623 static void tg3_frag_free(bool is_frag, void *data) 6624 { 6625 if (is_frag) 6626 skb_free_frag(data); 6627 else 6628 kfree(data); 6629 } 6630 6631 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) 6632 { 6633 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) + 6634 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6635 6636 if (!ri->data) 6637 return; 6638 6639 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz, 6640 DMA_FROM_DEVICE); 6641 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data); 6642 ri->data = NULL; 6643 } 6644 6645 6646 /* Returns size of skb allocated or < 0 on error. 6647 * 6648 * We only need to fill in the address because the other members 6649 * of the RX descriptor are invariant, see tg3_init_rings. 6650 * 6651 * Note the purposeful assymetry of cpu vs. chip accesses. For 6652 * posting buffers we only dirty the first cache line of the RX 6653 * descriptor (containing the address). Whereas for the RX status 6654 * buffers the cpu only reads the last cacheline of the RX descriptor 6655 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 6656 */ 6657 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, 6658 u32 opaque_key, u32 dest_idx_unmasked, 6659 unsigned int *frag_size) 6660 { 6661 struct tg3_rx_buffer_desc *desc; 6662 struct ring_info *map; 6663 u8 *data; 6664 dma_addr_t mapping; 6665 int skb_size, data_size, dest_idx; 6666 6667 switch (opaque_key) { 6668 case RXD_OPAQUE_RING_STD: 6669 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6670 desc = &tpr->rx_std[dest_idx]; 6671 map = &tpr->rx_std_buffers[dest_idx]; 6672 data_size = tp->rx_pkt_map_sz; 6673 break; 6674 6675 case RXD_OPAQUE_RING_JUMBO: 6676 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6677 desc = &tpr->rx_jmb[dest_idx].std; 6678 map = &tpr->rx_jmb_buffers[dest_idx]; 6679 data_size = TG3_RX_JMB_MAP_SZ; 6680 break; 6681 6682 default: 6683 return -EINVAL; 6684 } 6685 6686 /* Do not overwrite any of the map or rp information 6687 * until we are sure we can commit to a new buffer. 6688 * 6689 * Callers depend upon this behavior and assume that 6690 * we leave everything unchanged if we fail. 6691 */ 6692 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + 6693 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6694 if (skb_size <= PAGE_SIZE) { 6695 data = napi_alloc_frag(skb_size); 6696 *frag_size = skb_size; 6697 } else { 6698 data = kmalloc(skb_size, GFP_ATOMIC); 6699 *frag_size = 0; 6700 } 6701 if (!data) 6702 return -ENOMEM; 6703 6704 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp), 6705 data_size, DMA_FROM_DEVICE); 6706 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) { 6707 tg3_frag_free(skb_size <= PAGE_SIZE, data); 6708 return -EIO; 6709 } 6710 6711 map->data = data; 6712 dma_unmap_addr_set(map, mapping, mapping); 6713 6714 desc->addr_hi = ((u64)mapping >> 32); 6715 desc->addr_lo = ((u64)mapping & 0xffffffff); 6716 6717 return data_size; 6718 } 6719 6720 /* We only need to move over in the address because the other 6721 * members of the RX descriptor are invariant. See notes above 6722 * tg3_alloc_rx_data for full details. 6723 */ 6724 static void tg3_recycle_rx(struct tg3_napi *tnapi, 6725 struct tg3_rx_prodring_set *dpr, 6726 u32 opaque_key, int src_idx, 6727 u32 dest_idx_unmasked) 6728 { 6729 struct tg3 *tp = tnapi->tp; 6730 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 6731 struct ring_info *src_map, *dest_map; 6732 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; 6733 int dest_idx; 6734 6735 switch (opaque_key) { 6736 case RXD_OPAQUE_RING_STD: 6737 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6738 dest_desc = &dpr->rx_std[dest_idx]; 6739 dest_map = &dpr->rx_std_buffers[dest_idx]; 6740 src_desc = &spr->rx_std[src_idx]; 6741 src_map = &spr->rx_std_buffers[src_idx]; 6742 break; 6743 6744 case RXD_OPAQUE_RING_JUMBO: 6745 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6746 dest_desc = &dpr->rx_jmb[dest_idx].std; 6747 dest_map = &dpr->rx_jmb_buffers[dest_idx]; 6748 src_desc = &spr->rx_jmb[src_idx].std; 6749 src_map = &spr->rx_jmb_buffers[src_idx]; 6750 break; 6751 6752 default: 6753 return; 6754 } 6755 6756 dest_map->data = src_map->data; 6757 dma_unmap_addr_set(dest_map, mapping, 6758 dma_unmap_addr(src_map, mapping)); 6759 dest_desc->addr_hi = src_desc->addr_hi; 6760 dest_desc->addr_lo = src_desc->addr_lo; 6761 6762 /* Ensure that the update to the skb happens after the physical 6763 * addresses have been transferred to the new BD location. 6764 */ 6765 smp_wmb(); 6766 6767 src_map->data = NULL; 6768 } 6769 6770 /* The RX ring scheme is composed of multiple rings which post fresh 6771 * buffers to the chip, and one special ring the chip uses to report 6772 * status back to the host. 6773 * 6774 * The special ring reports the status of received packets to the 6775 * host. The chip does not write into the original descriptor the 6776 * RX buffer was obtained from. The chip simply takes the original 6777 * descriptor as provided by the host, updates the status and length 6778 * field, then writes this into the next status ring entry. 6779 * 6780 * Each ring the host uses to post buffers to the chip is described 6781 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, 6782 * it is first placed into the on-chip ram. When the packet's length 6783 * is known, it walks down the TG3_BDINFO entries to select the ring. 6784 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO 6785 * which is within the range of the new packet's length is chosen. 6786 * 6787 * The "separate ring for rx status" scheme may sound queer, but it makes 6788 * sense from a cache coherency perspective. If only the host writes 6789 * to the buffer post rings, and only the chip writes to the rx status 6790 * rings, then cache lines never move beyond shared-modified state. 6791 * If both the host and chip were to write into the same ring, cache line 6792 * eviction could occur since both entities want it in an exclusive state. 6793 */ 6794 static int tg3_rx(struct tg3_napi *tnapi, int budget) 6795 { 6796 struct tg3 *tp = tnapi->tp; 6797 u32 work_mask, rx_std_posted = 0; 6798 u32 std_prod_idx, jmb_prod_idx; 6799 u32 sw_idx = tnapi->rx_rcb_ptr; 6800 u16 hw_idx; 6801 int received; 6802 struct tg3_rx_prodring_set *tpr = &tnapi->prodring; 6803 6804 hw_idx = *(tnapi->rx_rcb_prod_idx); 6805 /* 6806 * We need to order the read of hw_idx and the read of 6807 * the opaque cookie. 6808 */ 6809 rmb(); 6810 work_mask = 0; 6811 received = 0; 6812 std_prod_idx = tpr->rx_std_prod_idx; 6813 jmb_prod_idx = tpr->rx_jmb_prod_idx; 6814 while (sw_idx != hw_idx && budget > 0) { 6815 struct ring_info *ri; 6816 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; 6817 unsigned int len; 6818 struct sk_buff *skb; 6819 dma_addr_t dma_addr; 6820 u32 opaque_key, desc_idx, *post_ptr; 6821 u8 *data; 6822 u64 tstamp = 0; 6823 6824 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 6825 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 6826 if (opaque_key == RXD_OPAQUE_RING_STD) { 6827 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; 6828 dma_addr = dma_unmap_addr(ri, mapping); 6829 data = ri->data; 6830 post_ptr = &std_prod_idx; 6831 rx_std_posted++; 6832 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 6833 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; 6834 dma_addr = dma_unmap_addr(ri, mapping); 6835 data = ri->data; 6836 post_ptr = &jmb_prod_idx; 6837 } else 6838 goto next_pkt_nopost; 6839 6840 work_mask |= opaque_key; 6841 6842 if (desc->err_vlan & RXD_ERR_MASK) { 6843 drop_it: 6844 tg3_recycle_rx(tnapi, tpr, opaque_key, 6845 desc_idx, *post_ptr); 6846 drop_it_no_recycle: 6847 /* Other statistics kept track of by card. */ 6848 tp->rx_dropped++; 6849 goto next_pkt; 6850 } 6851 6852 prefetch(data + TG3_RX_OFFSET(tp)); 6853 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 6854 ETH_FCS_LEN; 6855 6856 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6857 RXD_FLAG_PTPSTAT_PTPV1 || 6858 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6859 RXD_FLAG_PTPSTAT_PTPV2) { 6860 tstamp = tr32(TG3_RX_TSTAMP_LSB); 6861 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32; 6862 } 6863 6864 if (len > TG3_RX_COPY_THRESH(tp)) { 6865 int skb_size; 6866 unsigned int frag_size; 6867 6868 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, 6869 *post_ptr, &frag_size); 6870 if (skb_size < 0) 6871 goto drop_it; 6872 6873 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size, 6874 DMA_FROM_DEVICE); 6875 6876 /* Ensure that the update to the data happens 6877 * after the usage of the old DMA mapping. 6878 */ 6879 smp_wmb(); 6880 6881 ri->data = NULL; 6882 6883 skb = build_skb(data, frag_size); 6884 if (!skb) { 6885 tg3_frag_free(frag_size != 0, data); 6886 goto drop_it_no_recycle; 6887 } 6888 skb_reserve(skb, TG3_RX_OFFSET(tp)); 6889 } else { 6890 tg3_recycle_rx(tnapi, tpr, opaque_key, 6891 desc_idx, *post_ptr); 6892 6893 skb = netdev_alloc_skb(tp->dev, 6894 len + TG3_RAW_IP_ALIGN); 6895 if (skb == NULL) 6896 goto drop_it_no_recycle; 6897 6898 skb_reserve(skb, TG3_RAW_IP_ALIGN); 6899 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len, 6900 DMA_FROM_DEVICE); 6901 memcpy(skb->data, 6902 data + TG3_RX_OFFSET(tp), 6903 len); 6904 dma_sync_single_for_device(&tp->pdev->dev, dma_addr, 6905 len, DMA_FROM_DEVICE); 6906 } 6907 6908 skb_put(skb, len); 6909 if (tstamp) 6910 tg3_hwclock_to_timestamp(tp, tstamp, 6911 skb_hwtstamps(skb)); 6912 6913 if ((tp->dev->features & NETIF_F_RXCSUM) && 6914 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 6915 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 6916 >> RXD_TCPCSUM_SHIFT) == 0xffff)) 6917 skb->ip_summed = CHECKSUM_UNNECESSARY; 6918 else 6919 skb_checksum_none_assert(skb); 6920 6921 skb->protocol = eth_type_trans(skb, tp->dev); 6922 6923 if (len > (tp->dev->mtu + ETH_HLEN) && 6924 skb->protocol != htons(ETH_P_8021Q) && 6925 skb->protocol != htons(ETH_P_8021AD)) { 6926 dev_kfree_skb_any(skb); 6927 goto drop_it_no_recycle; 6928 } 6929 6930 if (desc->type_flags & RXD_FLAG_VLAN && 6931 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) 6932 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 6933 desc->err_vlan & RXD_VLAN_MASK); 6934 6935 napi_gro_receive(&tnapi->napi, skb); 6936 6937 received++; 6938 budget--; 6939 6940 next_pkt: 6941 (*post_ptr)++; 6942 6943 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 6944 tpr->rx_std_prod_idx = std_prod_idx & 6945 tp->rx_std_ring_mask; 6946 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 6947 tpr->rx_std_prod_idx); 6948 work_mask &= ~RXD_OPAQUE_RING_STD; 6949 rx_std_posted = 0; 6950 } 6951 next_pkt_nopost: 6952 sw_idx++; 6953 sw_idx &= tp->rx_ret_ring_mask; 6954 6955 /* Refresh hw_idx to see if there is new work */ 6956 if (sw_idx == hw_idx) { 6957 hw_idx = *(tnapi->rx_rcb_prod_idx); 6958 rmb(); 6959 } 6960 } 6961 6962 /* ACK the status ring. */ 6963 tnapi->rx_rcb_ptr = sw_idx; 6964 tw32_rx_mbox(tnapi->consmbox, sw_idx); 6965 6966 /* Refill RX ring(s). */ 6967 if (!tg3_flag(tp, ENABLE_RSS)) { 6968 /* Sync BD data before updating mailbox */ 6969 wmb(); 6970 6971 if (work_mask & RXD_OPAQUE_RING_STD) { 6972 tpr->rx_std_prod_idx = std_prod_idx & 6973 tp->rx_std_ring_mask; 6974 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 6975 tpr->rx_std_prod_idx); 6976 } 6977 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 6978 tpr->rx_jmb_prod_idx = jmb_prod_idx & 6979 tp->rx_jmb_ring_mask; 6980 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 6981 tpr->rx_jmb_prod_idx); 6982 } 6983 } else if (work_mask) { 6984 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be 6985 * updated before the producer indices can be updated. 6986 */ 6987 smp_wmb(); 6988 6989 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; 6990 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; 6991 6992 if (tnapi != &tp->napi[1]) { 6993 tp->rx_refill = true; 6994 napi_schedule(&tp->napi[1].napi); 6995 } 6996 } 6997 6998 return received; 6999 } 7000 7001 static void tg3_poll_link(struct tg3 *tp) 7002 { 7003 /* handle link change and other phy events */ 7004 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 7005 struct tg3_hw_status *sblk = tp->napi[0].hw_status; 7006 7007 if (sblk->status & SD_STATUS_LINK_CHG) { 7008 sblk->status = SD_STATUS_UPDATED | 7009 (sblk->status & ~SD_STATUS_LINK_CHG); 7010 spin_lock(&tp->lock); 7011 if (tg3_flag(tp, USE_PHYLIB)) { 7012 tw32_f(MAC_STATUS, 7013 (MAC_STATUS_SYNC_CHANGED | 7014 MAC_STATUS_CFG_CHANGED | 7015 MAC_STATUS_MI_COMPLETION | 7016 MAC_STATUS_LNKSTATE_CHANGED)); 7017 udelay(40); 7018 } else 7019 tg3_setup_phy(tp, false); 7020 spin_unlock(&tp->lock); 7021 } 7022 } 7023 } 7024 7025 static int tg3_rx_prodring_xfer(struct tg3 *tp, 7026 struct tg3_rx_prodring_set *dpr, 7027 struct tg3_rx_prodring_set *spr) 7028 { 7029 u32 si, di, cpycnt, src_prod_idx; 7030 int i, err = 0; 7031 7032 while (1) { 7033 src_prod_idx = spr->rx_std_prod_idx; 7034 7035 /* Make sure updates to the rx_std_buffers[] entries and the 7036 * standard producer index are seen in the correct order. 7037 */ 7038 smp_rmb(); 7039 7040 if (spr->rx_std_cons_idx == src_prod_idx) 7041 break; 7042 7043 if (spr->rx_std_cons_idx < src_prod_idx) 7044 cpycnt = src_prod_idx - spr->rx_std_cons_idx; 7045 else 7046 cpycnt = tp->rx_std_ring_mask + 1 - 7047 spr->rx_std_cons_idx; 7048 7049 cpycnt = min(cpycnt, 7050 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); 7051 7052 si = spr->rx_std_cons_idx; 7053 di = dpr->rx_std_prod_idx; 7054 7055 for (i = di; i < di + cpycnt; i++) { 7056 if (dpr->rx_std_buffers[i].data) { 7057 cpycnt = i - di; 7058 err = -ENOSPC; 7059 break; 7060 } 7061 } 7062 7063 if (!cpycnt) 7064 break; 7065 7066 /* Ensure that updates to the rx_std_buffers ring and the 7067 * shadowed hardware producer ring from tg3_recycle_skb() are 7068 * ordered correctly WRT the skb check above. 7069 */ 7070 smp_rmb(); 7071 7072 memcpy(&dpr->rx_std_buffers[di], 7073 &spr->rx_std_buffers[si], 7074 cpycnt * sizeof(struct ring_info)); 7075 7076 for (i = 0; i < cpycnt; i++, di++, si++) { 7077 struct tg3_rx_buffer_desc *sbd, *dbd; 7078 sbd = &spr->rx_std[si]; 7079 dbd = &dpr->rx_std[di]; 7080 dbd->addr_hi = sbd->addr_hi; 7081 dbd->addr_lo = sbd->addr_lo; 7082 } 7083 7084 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & 7085 tp->rx_std_ring_mask; 7086 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & 7087 tp->rx_std_ring_mask; 7088 } 7089 7090 while (1) { 7091 src_prod_idx = spr->rx_jmb_prod_idx; 7092 7093 /* Make sure updates to the rx_jmb_buffers[] entries and 7094 * the jumbo producer index are seen in the correct order. 7095 */ 7096 smp_rmb(); 7097 7098 if (spr->rx_jmb_cons_idx == src_prod_idx) 7099 break; 7100 7101 if (spr->rx_jmb_cons_idx < src_prod_idx) 7102 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; 7103 else 7104 cpycnt = tp->rx_jmb_ring_mask + 1 - 7105 spr->rx_jmb_cons_idx; 7106 7107 cpycnt = min(cpycnt, 7108 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); 7109 7110 si = spr->rx_jmb_cons_idx; 7111 di = dpr->rx_jmb_prod_idx; 7112 7113 for (i = di; i < di + cpycnt; i++) { 7114 if (dpr->rx_jmb_buffers[i].data) { 7115 cpycnt = i - di; 7116 err = -ENOSPC; 7117 break; 7118 } 7119 } 7120 7121 if (!cpycnt) 7122 break; 7123 7124 /* Ensure that updates to the rx_jmb_buffers ring and the 7125 * shadowed hardware producer ring from tg3_recycle_skb() are 7126 * ordered correctly WRT the skb check above. 7127 */ 7128 smp_rmb(); 7129 7130 memcpy(&dpr->rx_jmb_buffers[di], 7131 &spr->rx_jmb_buffers[si], 7132 cpycnt * sizeof(struct ring_info)); 7133 7134 for (i = 0; i < cpycnt; i++, di++, si++) { 7135 struct tg3_rx_buffer_desc *sbd, *dbd; 7136 sbd = &spr->rx_jmb[si].std; 7137 dbd = &dpr->rx_jmb[di].std; 7138 dbd->addr_hi = sbd->addr_hi; 7139 dbd->addr_lo = sbd->addr_lo; 7140 } 7141 7142 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & 7143 tp->rx_jmb_ring_mask; 7144 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & 7145 tp->rx_jmb_ring_mask; 7146 } 7147 7148 return err; 7149 } 7150 7151 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) 7152 { 7153 struct tg3 *tp = tnapi->tp; 7154 7155 /* run TX completion thread */ 7156 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { 7157 tg3_tx(tnapi); 7158 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7159 return work_done; 7160 } 7161 7162 if (!tnapi->rx_rcb_prod_idx) 7163 return work_done; 7164 7165 /* run RX thread, within the bounds set by NAPI. 7166 * All RX "locking" is done by ensuring outside 7167 * code synchronizes with tg3->napi.poll() 7168 */ 7169 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 7170 work_done += tg3_rx(tnapi, budget - work_done); 7171 7172 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { 7173 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; 7174 int i, err = 0; 7175 u32 std_prod_idx = dpr->rx_std_prod_idx; 7176 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; 7177 7178 tp->rx_refill = false; 7179 for (i = 1; i <= tp->rxq_cnt; i++) 7180 err |= tg3_rx_prodring_xfer(tp, dpr, 7181 &tp->napi[i].prodring); 7182 7183 wmb(); 7184 7185 if (std_prod_idx != dpr->rx_std_prod_idx) 7186 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 7187 dpr->rx_std_prod_idx); 7188 7189 if (jmb_prod_idx != dpr->rx_jmb_prod_idx) 7190 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 7191 dpr->rx_jmb_prod_idx); 7192 7193 if (err) 7194 tw32_f(HOSTCC_MODE, tp->coal_now); 7195 } 7196 7197 return work_done; 7198 } 7199 7200 static inline void tg3_reset_task_schedule(struct tg3 *tp) 7201 { 7202 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) 7203 schedule_work(&tp->reset_task); 7204 } 7205 7206 static inline void tg3_reset_task_cancel(struct tg3 *tp) 7207 { 7208 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) 7209 cancel_work_sync(&tp->reset_task); 7210 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 7211 } 7212 7213 static int tg3_poll_msix(struct napi_struct *napi, int budget) 7214 { 7215 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7216 struct tg3 *tp = tnapi->tp; 7217 int work_done = 0; 7218 struct tg3_hw_status *sblk = tnapi->hw_status; 7219 7220 while (1) { 7221 work_done = tg3_poll_work(tnapi, work_done, budget); 7222 7223 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7224 goto tx_recovery; 7225 7226 if (unlikely(work_done >= budget)) 7227 break; 7228 7229 /* tp->last_tag is used in tg3_int_reenable() below 7230 * to tell the hw how much work has been processed, 7231 * so we must read it before checking for more work. 7232 */ 7233 tnapi->last_tag = sblk->status_tag; 7234 tnapi->last_irq_tag = tnapi->last_tag; 7235 rmb(); 7236 7237 /* check for RX/TX work to do */ 7238 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && 7239 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { 7240 7241 /* This test here is not race free, but will reduce 7242 * the number of interrupts by looping again. 7243 */ 7244 if (tnapi == &tp->napi[1] && tp->rx_refill) 7245 continue; 7246 7247 napi_complete_done(napi, work_done); 7248 /* Reenable interrupts. */ 7249 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 7250 7251 /* This test here is synchronized by napi_schedule() 7252 * and napi_complete() to close the race condition. 7253 */ 7254 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) { 7255 tw32(HOSTCC_MODE, tp->coalesce_mode | 7256 HOSTCC_MODE_ENABLE | 7257 tnapi->coal_now); 7258 } 7259 break; 7260 } 7261 } 7262 7263 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); 7264 return work_done; 7265 7266 tx_recovery: 7267 /* work_done is guaranteed to be less than budget. */ 7268 napi_complete(napi); 7269 tg3_reset_task_schedule(tp); 7270 return work_done; 7271 } 7272 7273 static void tg3_process_error(struct tg3 *tp) 7274 { 7275 u32 val; 7276 bool real_error = false; 7277 7278 if (tg3_flag(tp, ERROR_PROCESSED)) 7279 return; 7280 7281 /* Check Flow Attention register */ 7282 val = tr32(HOSTCC_FLOW_ATTN); 7283 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { 7284 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); 7285 real_error = true; 7286 } 7287 7288 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { 7289 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); 7290 real_error = true; 7291 } 7292 7293 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { 7294 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); 7295 real_error = true; 7296 } 7297 7298 if (!real_error) 7299 return; 7300 7301 tg3_dump_state(tp); 7302 7303 tg3_flag_set(tp, ERROR_PROCESSED); 7304 tg3_reset_task_schedule(tp); 7305 } 7306 7307 static int tg3_poll(struct napi_struct *napi, int budget) 7308 { 7309 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7310 struct tg3 *tp = tnapi->tp; 7311 int work_done = 0; 7312 struct tg3_hw_status *sblk = tnapi->hw_status; 7313 7314 while (1) { 7315 if (sblk->status & SD_STATUS_ERROR) 7316 tg3_process_error(tp); 7317 7318 tg3_poll_link(tp); 7319 7320 work_done = tg3_poll_work(tnapi, work_done, budget); 7321 7322 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7323 goto tx_recovery; 7324 7325 if (unlikely(work_done >= budget)) 7326 break; 7327 7328 if (tg3_flag(tp, TAGGED_STATUS)) { 7329 /* tp->last_tag is used in tg3_int_reenable() below 7330 * to tell the hw how much work has been processed, 7331 * so we must read it before checking for more work. 7332 */ 7333 tnapi->last_tag = sblk->status_tag; 7334 tnapi->last_irq_tag = tnapi->last_tag; 7335 rmb(); 7336 } else 7337 sblk->status &= ~SD_STATUS_UPDATED; 7338 7339 if (likely(!tg3_has_work(tnapi))) { 7340 napi_complete_done(napi, work_done); 7341 tg3_int_reenable(tnapi); 7342 break; 7343 } 7344 } 7345 7346 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); 7347 return work_done; 7348 7349 tx_recovery: 7350 /* work_done is guaranteed to be less than budget. */ 7351 napi_complete(napi); 7352 tg3_reset_task_schedule(tp); 7353 return work_done; 7354 } 7355 7356 static void tg3_napi_disable(struct tg3 *tp) 7357 { 7358 int i; 7359 7360 for (i = tp->irq_cnt - 1; i >= 0; i--) 7361 napi_disable(&tp->napi[i].napi); 7362 } 7363 7364 static void tg3_napi_enable(struct tg3 *tp) 7365 { 7366 int i; 7367 7368 for (i = 0; i < tp->irq_cnt; i++) 7369 napi_enable(&tp->napi[i].napi); 7370 } 7371 7372 static void tg3_napi_init(struct tg3 *tp) 7373 { 7374 int i; 7375 7376 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll); 7377 for (i = 1; i < tp->irq_cnt; i++) 7378 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix); 7379 } 7380 7381 static void tg3_napi_fini(struct tg3 *tp) 7382 { 7383 int i; 7384 7385 for (i = 0; i < tp->irq_cnt; i++) 7386 netif_napi_del(&tp->napi[i].napi); 7387 } 7388 7389 static inline void tg3_netif_stop(struct tg3 *tp) 7390 { 7391 netif_trans_update(tp->dev); /* prevent tx timeout */ 7392 tg3_napi_disable(tp); 7393 netif_carrier_off(tp->dev); 7394 netif_tx_disable(tp->dev); 7395 } 7396 7397 /* tp->lock must be held */ 7398 static inline void tg3_netif_start(struct tg3 *tp) 7399 { 7400 tg3_ptp_resume(tp); 7401 7402 /* NOTE: unconditional netif_tx_wake_all_queues is only 7403 * appropriate so long as all callers are assured to 7404 * have free tx slots (such as after tg3_init_hw) 7405 */ 7406 netif_tx_wake_all_queues(tp->dev); 7407 7408 if (tp->link_up) 7409 netif_carrier_on(tp->dev); 7410 7411 tg3_napi_enable(tp); 7412 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; 7413 tg3_enable_ints(tp); 7414 } 7415 7416 static void tg3_irq_quiesce(struct tg3 *tp) 7417 __releases(tp->lock) 7418 __acquires(tp->lock) 7419 { 7420 int i; 7421 7422 BUG_ON(tp->irq_sync); 7423 7424 tp->irq_sync = 1; 7425 smp_mb(); 7426 7427 spin_unlock_bh(&tp->lock); 7428 7429 for (i = 0; i < tp->irq_cnt; i++) 7430 synchronize_irq(tp->napi[i].irq_vec); 7431 7432 spin_lock_bh(&tp->lock); 7433 } 7434 7435 /* Fully shutdown all tg3 driver activity elsewhere in the system. 7436 * If irq_sync is non-zero, then the IRQ handler must be synchronized 7437 * with as well. Most of the time, this is not necessary except when 7438 * shutting down the device. 7439 */ 7440 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) 7441 { 7442 spin_lock_bh(&tp->lock); 7443 if (irq_sync) 7444 tg3_irq_quiesce(tp); 7445 } 7446 7447 static inline void tg3_full_unlock(struct tg3 *tp) 7448 { 7449 spin_unlock_bh(&tp->lock); 7450 } 7451 7452 /* One-shot MSI handler - Chip automatically disables interrupt 7453 * after sending MSI so driver doesn't have to do it. 7454 */ 7455 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) 7456 { 7457 struct tg3_napi *tnapi = dev_id; 7458 struct tg3 *tp = tnapi->tp; 7459 7460 prefetch(tnapi->hw_status); 7461 if (tnapi->rx_rcb) 7462 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7463 7464 if (likely(!tg3_irq_sync(tp))) 7465 napi_schedule(&tnapi->napi); 7466 7467 return IRQ_HANDLED; 7468 } 7469 7470 /* MSI ISR - No need to check for interrupt sharing and no need to 7471 * flush status block and interrupt mailbox. PCI ordering rules 7472 * guarantee that MSI will arrive after the status block. 7473 */ 7474 static irqreturn_t tg3_msi(int irq, void *dev_id) 7475 { 7476 struct tg3_napi *tnapi = dev_id; 7477 struct tg3 *tp = tnapi->tp; 7478 7479 prefetch(tnapi->hw_status); 7480 if (tnapi->rx_rcb) 7481 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7482 /* 7483 * Writing any value to intr-mbox-0 clears PCI INTA# and 7484 * chip-internal interrupt pending events. 7485 * Writing non-zero to intr-mbox-0 additional tells the 7486 * NIC to stop sending us irqs, engaging "in-intr-handler" 7487 * event coalescing. 7488 */ 7489 tw32_mailbox(tnapi->int_mbox, 0x00000001); 7490 if (likely(!tg3_irq_sync(tp))) 7491 napi_schedule(&tnapi->napi); 7492 7493 return IRQ_RETVAL(1); 7494 } 7495 7496 static irqreturn_t tg3_interrupt(int irq, void *dev_id) 7497 { 7498 struct tg3_napi *tnapi = dev_id; 7499 struct tg3 *tp = tnapi->tp; 7500 struct tg3_hw_status *sblk = tnapi->hw_status; 7501 unsigned int handled = 1; 7502 7503 /* In INTx mode, it is possible for the interrupt to arrive at 7504 * the CPU before the status block posted prior to the interrupt. 7505 * Reading the PCI State register will confirm whether the 7506 * interrupt is ours and will flush the status block. 7507 */ 7508 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { 7509 if (tg3_flag(tp, CHIP_RESETTING) || 7510 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7511 handled = 0; 7512 goto out; 7513 } 7514 } 7515 7516 /* 7517 * Writing any value to intr-mbox-0 clears PCI INTA# and 7518 * chip-internal interrupt pending events. 7519 * Writing non-zero to intr-mbox-0 additional tells the 7520 * NIC to stop sending us irqs, engaging "in-intr-handler" 7521 * event coalescing. 7522 * 7523 * Flush the mailbox to de-assert the IRQ immediately to prevent 7524 * spurious interrupts. The flush impacts performance but 7525 * excessive spurious interrupts can be worse in some cases. 7526 */ 7527 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7528 if (tg3_irq_sync(tp)) 7529 goto out; 7530 sblk->status &= ~SD_STATUS_UPDATED; 7531 if (likely(tg3_has_work(tnapi))) { 7532 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7533 napi_schedule(&tnapi->napi); 7534 } else { 7535 /* No work, shared interrupt perhaps? re-enable 7536 * interrupts, and flush that PCI write 7537 */ 7538 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 7539 0x00000000); 7540 } 7541 out: 7542 return IRQ_RETVAL(handled); 7543 } 7544 7545 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) 7546 { 7547 struct tg3_napi *tnapi = dev_id; 7548 struct tg3 *tp = tnapi->tp; 7549 struct tg3_hw_status *sblk = tnapi->hw_status; 7550 unsigned int handled = 1; 7551 7552 /* In INTx mode, it is possible for the interrupt to arrive at 7553 * the CPU before the status block posted prior to the interrupt. 7554 * Reading the PCI State register will confirm whether the 7555 * interrupt is ours and will flush the status block. 7556 */ 7557 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { 7558 if (tg3_flag(tp, CHIP_RESETTING) || 7559 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7560 handled = 0; 7561 goto out; 7562 } 7563 } 7564 7565 /* 7566 * writing any value to intr-mbox-0 clears PCI INTA# and 7567 * chip-internal interrupt pending events. 7568 * writing non-zero to intr-mbox-0 additional tells the 7569 * NIC to stop sending us irqs, engaging "in-intr-handler" 7570 * event coalescing. 7571 * 7572 * Flush the mailbox to de-assert the IRQ immediately to prevent 7573 * spurious interrupts. The flush impacts performance but 7574 * excessive spurious interrupts can be worse in some cases. 7575 */ 7576 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7577 7578 /* 7579 * In a shared interrupt configuration, sometimes other devices' 7580 * interrupts will scream. We record the current status tag here 7581 * so that the above check can report that the screaming interrupts 7582 * are unhandled. Eventually they will be silenced. 7583 */ 7584 tnapi->last_irq_tag = sblk->status_tag; 7585 7586 if (tg3_irq_sync(tp)) 7587 goto out; 7588 7589 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7590 7591 napi_schedule(&tnapi->napi); 7592 7593 out: 7594 return IRQ_RETVAL(handled); 7595 } 7596 7597 /* ISR for interrupt test */ 7598 static irqreturn_t tg3_test_isr(int irq, void *dev_id) 7599 { 7600 struct tg3_napi *tnapi = dev_id; 7601 struct tg3 *tp = tnapi->tp; 7602 struct tg3_hw_status *sblk = tnapi->hw_status; 7603 7604 if ((sblk->status & SD_STATUS_UPDATED) || 7605 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7606 tg3_disable_ints(tp); 7607 return IRQ_RETVAL(1); 7608 } 7609 return IRQ_RETVAL(0); 7610 } 7611 7612 #ifdef CONFIG_NET_POLL_CONTROLLER 7613 static void tg3_poll_controller(struct net_device *dev) 7614 { 7615 int i; 7616 struct tg3 *tp = netdev_priv(dev); 7617 7618 if (tg3_irq_sync(tp)) 7619 return; 7620 7621 for (i = 0; i < tp->irq_cnt; i++) 7622 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); 7623 } 7624 #endif 7625 7626 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue) 7627 { 7628 struct tg3 *tp = netdev_priv(dev); 7629 7630 if (netif_msg_tx_err(tp)) { 7631 netdev_err(dev, "transmit timed out, resetting\n"); 7632 tg3_dump_state(tp); 7633 } 7634 7635 tg3_reset_task_schedule(tp); 7636 } 7637 7638 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ 7639 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) 7640 { 7641 u32 base = (u32) mapping & 0xffffffff; 7642 7643 return base + len + 8 < base; 7644 } 7645 7646 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes 7647 * of any 4GB boundaries: 4G, 8G, etc 7648 */ 7649 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7650 u32 len, u32 mss) 7651 { 7652 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) { 7653 u32 base = (u32) mapping & 0xffffffff; 7654 7655 return ((base + len + (mss & 0x3fff)) < base); 7656 } 7657 return 0; 7658 } 7659 7660 /* Test for DMA addresses > 40-bit */ 7661 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7662 int len) 7663 { 7664 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) 7665 if (tg3_flag(tp, 40BIT_DMA_BUG)) 7666 return ((u64) mapping + len) > DMA_BIT_MASK(40); 7667 return 0; 7668 #else 7669 return 0; 7670 #endif 7671 } 7672 7673 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, 7674 dma_addr_t mapping, u32 len, u32 flags, 7675 u32 mss, u32 vlan) 7676 { 7677 txbd->addr_hi = ((u64) mapping >> 32); 7678 txbd->addr_lo = ((u64) mapping & 0xffffffff); 7679 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); 7680 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); 7681 } 7682 7683 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, 7684 dma_addr_t map, u32 len, u32 flags, 7685 u32 mss, u32 vlan) 7686 { 7687 struct tg3 *tp = tnapi->tp; 7688 bool hwbug = false; 7689 7690 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) 7691 hwbug = true; 7692 7693 if (tg3_4g_overflow_test(map, len)) 7694 hwbug = true; 7695 7696 if (tg3_4g_tso_overflow_test(tp, map, len, mss)) 7697 hwbug = true; 7698 7699 if (tg3_40bit_overflow_test(tp, map, len)) 7700 hwbug = true; 7701 7702 if (tp->dma_limit) { 7703 u32 prvidx = *entry; 7704 u32 tmp_flag = flags & ~TXD_FLAG_END; 7705 while (len > tp->dma_limit && *budget) { 7706 u32 frag_len = tp->dma_limit; 7707 len -= tp->dma_limit; 7708 7709 /* Avoid the 8byte DMA problem */ 7710 if (len <= 8) { 7711 len += tp->dma_limit / 2; 7712 frag_len = tp->dma_limit / 2; 7713 } 7714 7715 tnapi->tx_buffers[*entry].fragmented = true; 7716 7717 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7718 frag_len, tmp_flag, mss, vlan); 7719 *budget -= 1; 7720 prvidx = *entry; 7721 *entry = NEXT_TX(*entry); 7722 7723 map += frag_len; 7724 } 7725 7726 if (len) { 7727 if (*budget) { 7728 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7729 len, flags, mss, vlan); 7730 *budget -= 1; 7731 *entry = NEXT_TX(*entry); 7732 } else { 7733 hwbug = true; 7734 tnapi->tx_buffers[prvidx].fragmented = false; 7735 } 7736 } 7737 } else { 7738 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7739 len, flags, mss, vlan); 7740 *entry = NEXT_TX(*entry); 7741 } 7742 7743 return hwbug; 7744 } 7745 7746 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) 7747 { 7748 int i; 7749 struct sk_buff *skb; 7750 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; 7751 7752 skb = txb->skb; 7753 txb->skb = NULL; 7754 7755 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping), 7756 skb_headlen(skb), DMA_TO_DEVICE); 7757 7758 while (txb->fragmented) { 7759 txb->fragmented = false; 7760 entry = NEXT_TX(entry); 7761 txb = &tnapi->tx_buffers[entry]; 7762 } 7763 7764 for (i = 0; i <= last; i++) { 7765 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 7766 7767 entry = NEXT_TX(entry); 7768 txb = &tnapi->tx_buffers[entry]; 7769 7770 dma_unmap_page(&tnapi->tp->pdev->dev, 7771 dma_unmap_addr(txb, mapping), 7772 skb_frag_size(frag), DMA_TO_DEVICE); 7773 7774 while (txb->fragmented) { 7775 txb->fragmented = false; 7776 entry = NEXT_TX(entry); 7777 txb = &tnapi->tx_buffers[entry]; 7778 } 7779 } 7780 } 7781 7782 /* Workaround 4GB and 40-bit hardware DMA bugs. */ 7783 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, 7784 struct sk_buff **pskb, 7785 u32 *entry, u32 *budget, 7786 u32 base_flags, u32 mss, u32 vlan) 7787 { 7788 struct tg3 *tp = tnapi->tp; 7789 struct sk_buff *new_skb, *skb = *pskb; 7790 dma_addr_t new_addr = 0; 7791 int ret = 0; 7792 7793 if (tg3_asic_rev(tp) != ASIC_REV_5701) 7794 new_skb = skb_copy(skb, GFP_ATOMIC); 7795 else { 7796 int more_headroom = 4 - ((unsigned long)skb->data & 3); 7797 7798 new_skb = skb_copy_expand(skb, 7799 skb_headroom(skb) + more_headroom, 7800 skb_tailroom(skb), GFP_ATOMIC); 7801 } 7802 7803 if (!new_skb) { 7804 ret = -1; 7805 } else { 7806 /* New SKB is guaranteed to be linear. */ 7807 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data, 7808 new_skb->len, DMA_TO_DEVICE); 7809 /* Make sure the mapping succeeded */ 7810 if (dma_mapping_error(&tp->pdev->dev, new_addr)) { 7811 dev_kfree_skb_any(new_skb); 7812 ret = -1; 7813 } else { 7814 u32 save_entry = *entry; 7815 7816 base_flags |= TXD_FLAG_END; 7817 7818 tnapi->tx_buffers[*entry].skb = new_skb; 7819 dma_unmap_addr_set(&tnapi->tx_buffers[*entry], 7820 mapping, new_addr); 7821 7822 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, 7823 new_skb->len, base_flags, 7824 mss, vlan)) { 7825 tg3_tx_skb_unmap(tnapi, save_entry, -1); 7826 dev_kfree_skb_any(new_skb); 7827 ret = -1; 7828 } 7829 } 7830 } 7831 7832 dev_consume_skb_any(skb); 7833 *pskb = new_skb; 7834 return ret; 7835 } 7836 7837 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb) 7838 { 7839 /* Check if we will never have enough descriptors, 7840 * as gso_segs can be more than current ring size 7841 */ 7842 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; 7843 } 7844 7845 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); 7846 7847 /* Use GSO to workaround all TSO packets that meet HW bug conditions 7848 * indicated in tg3_tx_frag_set() 7849 */ 7850 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, 7851 struct netdev_queue *txq, struct sk_buff *skb) 7852 { 7853 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; 7854 struct sk_buff *segs, *seg, *next; 7855 7856 /* Estimate the number of fragments in the worst case */ 7857 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) { 7858 netif_tx_stop_queue(txq); 7859 7860 /* netif_tx_stop_queue() must be done before checking 7861 * checking tx index in tg3_tx_avail() below, because in 7862 * tg3_tx(), we update tx index before checking for 7863 * netif_tx_queue_stopped(). 7864 */ 7865 smp_mb(); 7866 if (tg3_tx_avail(tnapi) <= frag_cnt_est) 7867 return NETDEV_TX_BUSY; 7868 7869 netif_tx_wake_queue(txq); 7870 } 7871 7872 segs = skb_gso_segment(skb, tp->dev->features & 7873 ~(NETIF_F_TSO | NETIF_F_TSO6)); 7874 if (IS_ERR(segs) || !segs) 7875 goto tg3_tso_bug_end; 7876 7877 skb_list_walk_safe(segs, seg, next) { 7878 skb_mark_not_on_list(seg); 7879 tg3_start_xmit(seg, tp->dev); 7880 } 7881 7882 tg3_tso_bug_end: 7883 dev_consume_skb_any(skb); 7884 7885 return NETDEV_TX_OK; 7886 } 7887 7888 /* hard_start_xmit for all devices */ 7889 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 7890 { 7891 struct tg3 *tp = netdev_priv(dev); 7892 u32 len, entry, base_flags, mss, vlan = 0; 7893 u32 budget; 7894 int i = -1, would_hit_hwbug; 7895 dma_addr_t mapping; 7896 struct tg3_napi *tnapi; 7897 struct netdev_queue *txq; 7898 unsigned int last; 7899 struct iphdr *iph = NULL; 7900 struct tcphdr *tcph = NULL; 7901 __sum16 tcp_csum = 0, ip_csum = 0; 7902 __be16 ip_tot_len = 0; 7903 7904 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 7905 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 7906 if (tg3_flag(tp, ENABLE_TSS)) 7907 tnapi++; 7908 7909 budget = tg3_tx_avail(tnapi); 7910 7911 /* We are running in BH disabled context with netif_tx_lock 7912 * and TX reclaim runs via tp->napi.poll inside of a software 7913 * interrupt. Furthermore, IRQ processing runs lockless so we have 7914 * no IRQ context deadlocks to worry about either. Rejoice! 7915 */ 7916 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { 7917 if (!netif_tx_queue_stopped(txq)) { 7918 netif_tx_stop_queue(txq); 7919 7920 /* This is a hard error, log it. */ 7921 netdev_err(dev, 7922 "BUG! Tx Ring full when queue awake!\n"); 7923 } 7924 return NETDEV_TX_BUSY; 7925 } 7926 7927 entry = tnapi->tx_prod; 7928 base_flags = 0; 7929 7930 mss = skb_shinfo(skb)->gso_size; 7931 if (mss) { 7932 u32 tcp_opt_len, hdr_len; 7933 7934 if (skb_cow_head(skb, 0)) 7935 goto drop; 7936 7937 iph = ip_hdr(skb); 7938 tcp_opt_len = tcp_optlen(skb); 7939 7940 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN; 7941 7942 /* HW/FW can not correctly segment packets that have been 7943 * vlan encapsulated. 7944 */ 7945 if (skb->protocol == htons(ETH_P_8021Q) || 7946 skb->protocol == htons(ETH_P_8021AD)) { 7947 if (tg3_tso_bug_gso_check(tnapi, skb)) 7948 return tg3_tso_bug(tp, tnapi, txq, skb); 7949 goto drop; 7950 } 7951 7952 if (!skb_is_gso_v6(skb)) { 7953 if (unlikely((ETH_HLEN + hdr_len) > 80) && 7954 tg3_flag(tp, TSO_BUG)) { 7955 if (tg3_tso_bug_gso_check(tnapi, skb)) 7956 return tg3_tso_bug(tp, tnapi, txq, skb); 7957 goto drop; 7958 } 7959 ip_csum = iph->check; 7960 ip_tot_len = iph->tot_len; 7961 iph->check = 0; 7962 iph->tot_len = htons(mss + hdr_len); 7963 } 7964 7965 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 7966 TXD_FLAG_CPU_POST_DMA); 7967 7968 tcph = tcp_hdr(skb); 7969 tcp_csum = tcph->check; 7970 7971 if (tg3_flag(tp, HW_TSO_1) || 7972 tg3_flag(tp, HW_TSO_2) || 7973 tg3_flag(tp, HW_TSO_3)) { 7974 tcph->check = 0; 7975 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 7976 } else { 7977 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 7978 0, IPPROTO_TCP, 0); 7979 } 7980 7981 if (tg3_flag(tp, HW_TSO_3)) { 7982 mss |= (hdr_len & 0xc) << 12; 7983 if (hdr_len & 0x10) 7984 base_flags |= 0x00000010; 7985 base_flags |= (hdr_len & 0x3e0) << 5; 7986 } else if (tg3_flag(tp, HW_TSO_2)) 7987 mss |= hdr_len << 9; 7988 else if (tg3_flag(tp, HW_TSO_1) || 7989 tg3_asic_rev(tp) == ASIC_REV_5705) { 7990 if (tcp_opt_len || iph->ihl > 5) { 7991 int tsflags; 7992 7993 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 7994 mss |= (tsflags << 11); 7995 } 7996 } else { 7997 if (tcp_opt_len || iph->ihl > 5) { 7998 int tsflags; 7999 8000 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 8001 base_flags |= tsflags << 12; 8002 } 8003 } 8004 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 8005 /* HW/FW can not correctly checksum packets that have been 8006 * vlan encapsulated. 8007 */ 8008 if (skb->protocol == htons(ETH_P_8021Q) || 8009 skb->protocol == htons(ETH_P_8021AD)) { 8010 if (skb_checksum_help(skb)) 8011 goto drop; 8012 } else { 8013 base_flags |= TXD_FLAG_TCPUDP_CSUM; 8014 } 8015 } 8016 8017 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 8018 !mss && skb->len > VLAN_ETH_FRAME_LEN) 8019 base_flags |= TXD_FLAG_JMB_PKT; 8020 8021 if (skb_vlan_tag_present(skb)) { 8022 base_flags |= TXD_FLAG_VLAN; 8023 vlan = skb_vlan_tag_get(skb); 8024 } 8025 8026 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && 8027 tg3_flag(tp, TX_TSTAMP_EN)) { 8028 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 8029 base_flags |= TXD_FLAG_HWTSTAMP; 8030 } 8031 8032 len = skb_headlen(skb); 8033 8034 mapping = dma_map_single(&tp->pdev->dev, skb->data, len, 8035 DMA_TO_DEVICE); 8036 if (dma_mapping_error(&tp->pdev->dev, mapping)) 8037 goto drop; 8038 8039 8040 tnapi->tx_buffers[entry].skb = skb; 8041 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 8042 8043 would_hit_hwbug = 0; 8044 8045 if (tg3_flag(tp, 5701_DMA_BUG)) 8046 would_hit_hwbug = 1; 8047 8048 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | 8049 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), 8050 mss, vlan)) { 8051 would_hit_hwbug = 1; 8052 } else if (skb_shinfo(skb)->nr_frags > 0) { 8053 u32 tmp_mss = mss; 8054 8055 if (!tg3_flag(tp, HW_TSO_1) && 8056 !tg3_flag(tp, HW_TSO_2) && 8057 !tg3_flag(tp, HW_TSO_3)) 8058 tmp_mss = 0; 8059 8060 /* Now loop through additional data 8061 * fragments, and queue them. 8062 */ 8063 last = skb_shinfo(skb)->nr_frags - 1; 8064 for (i = 0; i <= last; i++) { 8065 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 8066 8067 len = skb_frag_size(frag); 8068 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, 8069 len, DMA_TO_DEVICE); 8070 8071 tnapi->tx_buffers[entry].skb = NULL; 8072 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 8073 mapping); 8074 if (dma_mapping_error(&tp->pdev->dev, mapping)) 8075 goto dma_error; 8076 8077 if (!budget || 8078 tg3_tx_frag_set(tnapi, &entry, &budget, mapping, 8079 len, base_flags | 8080 ((i == last) ? TXD_FLAG_END : 0), 8081 tmp_mss, vlan)) { 8082 would_hit_hwbug = 1; 8083 break; 8084 } 8085 } 8086 } 8087 8088 if (would_hit_hwbug) { 8089 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 8090 8091 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) { 8092 /* If it's a TSO packet, do GSO instead of 8093 * allocating and copying to a large linear SKB 8094 */ 8095 if (ip_tot_len) { 8096 iph->check = ip_csum; 8097 iph->tot_len = ip_tot_len; 8098 } 8099 tcph->check = tcp_csum; 8100 return tg3_tso_bug(tp, tnapi, txq, skb); 8101 } 8102 8103 /* If the workaround fails due to memory/mapping 8104 * failure, silently drop this packet. 8105 */ 8106 entry = tnapi->tx_prod; 8107 budget = tg3_tx_avail(tnapi); 8108 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, 8109 base_flags, mss, vlan)) 8110 goto drop_nofree; 8111 } 8112 8113 skb_tx_timestamp(skb); 8114 netdev_tx_sent_queue(txq, skb->len); 8115 8116 /* Sync BD data before updating mailbox */ 8117 wmb(); 8118 8119 tnapi->tx_prod = entry; 8120 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 8121 netif_tx_stop_queue(txq); 8122 8123 /* netif_tx_stop_queue() must be done before checking 8124 * checking tx index in tg3_tx_avail() below, because in 8125 * tg3_tx(), we update tx index before checking for 8126 * netif_tx_queue_stopped(). 8127 */ 8128 smp_mb(); 8129 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 8130 netif_tx_wake_queue(txq); 8131 } 8132 8133 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { 8134 /* Packets are ready, update Tx producer idx on card. */ 8135 tw32_tx_mbox(tnapi->prodmbox, entry); 8136 } 8137 8138 return NETDEV_TX_OK; 8139 8140 dma_error: 8141 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); 8142 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; 8143 drop: 8144 dev_kfree_skb_any(skb); 8145 drop_nofree: 8146 tp->tx_dropped++; 8147 return NETDEV_TX_OK; 8148 } 8149 8150 static void tg3_mac_loopback(struct tg3 *tp, bool enable) 8151 { 8152 if (enable) { 8153 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | 8154 MAC_MODE_PORT_MODE_MASK); 8155 8156 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; 8157 8158 if (!tg3_flag(tp, 5705_PLUS)) 8159 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 8160 8161 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 8162 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 8163 else 8164 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 8165 } else { 8166 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; 8167 8168 if (tg3_flag(tp, 5705_PLUS) || 8169 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || 8170 tg3_asic_rev(tp) == ASIC_REV_5700) 8171 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 8172 } 8173 8174 tw32(MAC_MODE, tp->mac_mode); 8175 udelay(40); 8176 } 8177 8178 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) 8179 { 8180 u32 val, bmcr, mac_mode, ptest = 0; 8181 8182 tg3_phy_toggle_apd(tp, false); 8183 tg3_phy_toggle_automdix(tp, false); 8184 8185 if (extlpbk && tg3_phy_set_extloopbk(tp)) 8186 return -EIO; 8187 8188 bmcr = BMCR_FULLDPLX; 8189 switch (speed) { 8190 case SPEED_10: 8191 break; 8192 case SPEED_100: 8193 bmcr |= BMCR_SPEED100; 8194 break; 8195 case SPEED_1000: 8196 default: 8197 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 8198 speed = SPEED_100; 8199 bmcr |= BMCR_SPEED100; 8200 } else { 8201 speed = SPEED_1000; 8202 bmcr |= BMCR_SPEED1000; 8203 } 8204 } 8205 8206 if (extlpbk) { 8207 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 8208 tg3_readphy(tp, MII_CTRL1000, &val); 8209 val |= CTL1000_AS_MASTER | 8210 CTL1000_ENABLE_MASTER; 8211 tg3_writephy(tp, MII_CTRL1000, val); 8212 } else { 8213 ptest = MII_TG3_FET_PTEST_TRIM_SEL | 8214 MII_TG3_FET_PTEST_TRIM_2; 8215 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); 8216 } 8217 } else 8218 bmcr |= BMCR_LOOPBACK; 8219 8220 tg3_writephy(tp, MII_BMCR, bmcr); 8221 8222 /* The write needs to be flushed for the FETs */ 8223 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 8224 tg3_readphy(tp, MII_BMCR, &bmcr); 8225 8226 udelay(40); 8227 8228 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 8229 tg3_asic_rev(tp) == ASIC_REV_5785) { 8230 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | 8231 MII_TG3_FET_PTEST_FRC_TX_LINK | 8232 MII_TG3_FET_PTEST_FRC_TX_LOCK); 8233 8234 /* The write needs to be flushed for the AC131 */ 8235 tg3_readphy(tp, MII_TG3_FET_PTEST, &val); 8236 } 8237 8238 /* Reset to prevent losing 1st rx packet intermittently */ 8239 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 8240 tg3_flag(tp, 5780_CLASS)) { 8241 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 8242 udelay(10); 8243 tw32_f(MAC_RX_MODE, tp->rx_mode); 8244 } 8245 8246 mac_mode = tp->mac_mode & 8247 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 8248 if (speed == SPEED_1000) 8249 mac_mode |= MAC_MODE_PORT_MODE_GMII; 8250 else 8251 mac_mode |= MAC_MODE_PORT_MODE_MII; 8252 8253 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 8254 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; 8255 8256 if (masked_phy_id == TG3_PHY_ID_BCM5401) 8257 mac_mode &= ~MAC_MODE_LINK_POLARITY; 8258 else if (masked_phy_id == TG3_PHY_ID_BCM5411) 8259 mac_mode |= MAC_MODE_LINK_POLARITY; 8260 8261 tg3_writephy(tp, MII_TG3_EXT_CTRL, 8262 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 8263 } 8264 8265 tw32(MAC_MODE, mac_mode); 8266 udelay(40); 8267 8268 return 0; 8269 } 8270 8271 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) 8272 { 8273 struct tg3 *tp = netdev_priv(dev); 8274 8275 if (features & NETIF_F_LOOPBACK) { 8276 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) 8277 return; 8278 8279 spin_lock_bh(&tp->lock); 8280 tg3_mac_loopback(tp, true); 8281 netif_carrier_on(tp->dev); 8282 spin_unlock_bh(&tp->lock); 8283 netdev_info(dev, "Internal MAC loopback mode enabled.\n"); 8284 } else { 8285 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 8286 return; 8287 8288 spin_lock_bh(&tp->lock); 8289 tg3_mac_loopback(tp, false); 8290 /* Force link status check */ 8291 tg3_setup_phy(tp, true); 8292 spin_unlock_bh(&tp->lock); 8293 netdev_info(dev, "Internal MAC loopback mode disabled.\n"); 8294 } 8295 } 8296 8297 static netdev_features_t tg3_fix_features(struct net_device *dev, 8298 netdev_features_t features) 8299 { 8300 struct tg3 *tp = netdev_priv(dev); 8301 8302 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) 8303 features &= ~NETIF_F_ALL_TSO; 8304 8305 return features; 8306 } 8307 8308 static int tg3_set_features(struct net_device *dev, netdev_features_t features) 8309 { 8310 netdev_features_t changed = dev->features ^ features; 8311 8312 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) 8313 tg3_set_loopback(dev, features); 8314 8315 return 0; 8316 } 8317 8318 static void tg3_rx_prodring_free(struct tg3 *tp, 8319 struct tg3_rx_prodring_set *tpr) 8320 { 8321 int i; 8322 8323 if (tpr != &tp->napi[0].prodring) { 8324 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; 8325 i = (i + 1) & tp->rx_std_ring_mask) 8326 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8327 tp->rx_pkt_map_sz); 8328 8329 if (tg3_flag(tp, JUMBO_CAPABLE)) { 8330 for (i = tpr->rx_jmb_cons_idx; 8331 i != tpr->rx_jmb_prod_idx; 8332 i = (i + 1) & tp->rx_jmb_ring_mask) { 8333 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8334 TG3_RX_JMB_MAP_SZ); 8335 } 8336 } 8337 8338 return; 8339 } 8340 8341 for (i = 0; i <= tp->rx_std_ring_mask; i++) 8342 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8343 tp->rx_pkt_map_sz); 8344 8345 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8346 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) 8347 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8348 TG3_RX_JMB_MAP_SZ); 8349 } 8350 } 8351 8352 /* Initialize rx rings for packet processing. 8353 * 8354 * The chip has been shut down and the driver detached from 8355 * the networking, so no interrupts or new tx packets will 8356 * end up in the driver. tp->{tx,}lock are held and thus 8357 * we may not sleep. 8358 */ 8359 static int tg3_rx_prodring_alloc(struct tg3 *tp, 8360 struct tg3_rx_prodring_set *tpr) 8361 { 8362 u32 i, rx_pkt_dma_sz; 8363 8364 tpr->rx_std_cons_idx = 0; 8365 tpr->rx_std_prod_idx = 0; 8366 tpr->rx_jmb_cons_idx = 0; 8367 tpr->rx_jmb_prod_idx = 0; 8368 8369 if (tpr != &tp->napi[0].prodring) { 8370 memset(&tpr->rx_std_buffers[0], 0, 8371 TG3_RX_STD_BUFF_RING_SIZE(tp)); 8372 if (tpr->rx_jmb_buffers) 8373 memset(&tpr->rx_jmb_buffers[0], 0, 8374 TG3_RX_JMB_BUFF_RING_SIZE(tp)); 8375 goto done; 8376 } 8377 8378 /* Zero out all descriptors. */ 8379 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); 8380 8381 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; 8382 if (tg3_flag(tp, 5780_CLASS) && 8383 tp->dev->mtu > ETH_DATA_LEN) 8384 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; 8385 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); 8386 8387 /* Initialize invariants of the rings, we only set this 8388 * stuff once. This works because the card does not 8389 * write into the rx buffer posting rings. 8390 */ 8391 for (i = 0; i <= tp->rx_std_ring_mask; i++) { 8392 struct tg3_rx_buffer_desc *rxd; 8393 8394 rxd = &tpr->rx_std[i]; 8395 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; 8396 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); 8397 rxd->opaque = (RXD_OPAQUE_RING_STD | 8398 (i << RXD_OPAQUE_INDEX_SHIFT)); 8399 } 8400 8401 /* Now allocate fresh SKBs for each rx ring. */ 8402 for (i = 0; i < tp->rx_pending; i++) { 8403 unsigned int frag_size; 8404 8405 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i, 8406 &frag_size) < 0) { 8407 netdev_warn(tp->dev, 8408 "Using a smaller RX standard ring. Only " 8409 "%d out of %d buffers were allocated " 8410 "successfully\n", i, tp->rx_pending); 8411 if (i == 0) 8412 goto initfail; 8413 tp->rx_pending = i; 8414 break; 8415 } 8416 } 8417 8418 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 8419 goto done; 8420 8421 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); 8422 8423 if (!tg3_flag(tp, JUMBO_RING_ENABLE)) 8424 goto done; 8425 8426 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { 8427 struct tg3_rx_buffer_desc *rxd; 8428 8429 rxd = &tpr->rx_jmb[i].std; 8430 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; 8431 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | 8432 RXD_FLAG_JUMBO; 8433 rxd->opaque = (RXD_OPAQUE_RING_JUMBO | 8434 (i << RXD_OPAQUE_INDEX_SHIFT)); 8435 } 8436 8437 for (i = 0; i < tp->rx_jumbo_pending; i++) { 8438 unsigned int frag_size; 8439 8440 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i, 8441 &frag_size) < 0) { 8442 netdev_warn(tp->dev, 8443 "Using a smaller RX jumbo ring. Only %d " 8444 "out of %d buffers were allocated " 8445 "successfully\n", i, tp->rx_jumbo_pending); 8446 if (i == 0) 8447 goto initfail; 8448 tp->rx_jumbo_pending = i; 8449 break; 8450 } 8451 } 8452 8453 done: 8454 return 0; 8455 8456 initfail: 8457 tg3_rx_prodring_free(tp, tpr); 8458 return -ENOMEM; 8459 } 8460 8461 static void tg3_rx_prodring_fini(struct tg3 *tp, 8462 struct tg3_rx_prodring_set *tpr) 8463 { 8464 kfree(tpr->rx_std_buffers); 8465 tpr->rx_std_buffers = NULL; 8466 kfree(tpr->rx_jmb_buffers); 8467 tpr->rx_jmb_buffers = NULL; 8468 if (tpr->rx_std) { 8469 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), 8470 tpr->rx_std, tpr->rx_std_mapping); 8471 tpr->rx_std = NULL; 8472 } 8473 if (tpr->rx_jmb) { 8474 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), 8475 tpr->rx_jmb, tpr->rx_jmb_mapping); 8476 tpr->rx_jmb = NULL; 8477 } 8478 } 8479 8480 static int tg3_rx_prodring_init(struct tg3 *tp, 8481 struct tg3_rx_prodring_set *tpr) 8482 { 8483 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), 8484 GFP_KERNEL); 8485 if (!tpr->rx_std_buffers) 8486 return -ENOMEM; 8487 8488 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, 8489 TG3_RX_STD_RING_BYTES(tp), 8490 &tpr->rx_std_mapping, 8491 GFP_KERNEL); 8492 if (!tpr->rx_std) 8493 goto err_out; 8494 8495 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8496 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), 8497 GFP_KERNEL); 8498 if (!tpr->rx_jmb_buffers) 8499 goto err_out; 8500 8501 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, 8502 TG3_RX_JMB_RING_BYTES(tp), 8503 &tpr->rx_jmb_mapping, 8504 GFP_KERNEL); 8505 if (!tpr->rx_jmb) 8506 goto err_out; 8507 } 8508 8509 return 0; 8510 8511 err_out: 8512 tg3_rx_prodring_fini(tp, tpr); 8513 return -ENOMEM; 8514 } 8515 8516 /* Free up pending packets in all rx/tx rings. 8517 * 8518 * The chip has been shut down and the driver detached from 8519 * the networking, so no interrupts or new tx packets will 8520 * end up in the driver. tp->{tx,}lock is not held and we are not 8521 * in an interrupt context and thus may sleep. 8522 */ 8523 static void tg3_free_rings(struct tg3 *tp) 8524 { 8525 int i, j; 8526 8527 for (j = 0; j < tp->irq_cnt; j++) { 8528 struct tg3_napi *tnapi = &tp->napi[j]; 8529 8530 tg3_rx_prodring_free(tp, &tnapi->prodring); 8531 8532 if (!tnapi->tx_buffers) 8533 continue; 8534 8535 for (i = 0; i < TG3_TX_RING_SIZE; i++) { 8536 struct sk_buff *skb = tnapi->tx_buffers[i].skb; 8537 8538 if (!skb) 8539 continue; 8540 8541 tg3_tx_skb_unmap(tnapi, i, 8542 skb_shinfo(skb)->nr_frags - 1); 8543 8544 dev_consume_skb_any(skb); 8545 } 8546 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j)); 8547 } 8548 } 8549 8550 /* Initialize tx/rx rings for packet processing. 8551 * 8552 * The chip has been shut down and the driver detached from 8553 * the networking, so no interrupts or new tx packets will 8554 * end up in the driver. tp->{tx,}lock are held and thus 8555 * we may not sleep. 8556 */ 8557 static int tg3_init_rings(struct tg3 *tp) 8558 { 8559 int i; 8560 8561 /* Free up all the SKBs. */ 8562 tg3_free_rings(tp); 8563 8564 for (i = 0; i < tp->irq_cnt; i++) { 8565 struct tg3_napi *tnapi = &tp->napi[i]; 8566 8567 tnapi->last_tag = 0; 8568 tnapi->last_irq_tag = 0; 8569 tnapi->hw_status->status = 0; 8570 tnapi->hw_status->status_tag = 0; 8571 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8572 8573 tnapi->tx_prod = 0; 8574 tnapi->tx_cons = 0; 8575 if (tnapi->tx_ring) 8576 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); 8577 8578 tnapi->rx_rcb_ptr = 0; 8579 if (tnapi->rx_rcb) 8580 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 8581 8582 if (tnapi->prodring.rx_std && 8583 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { 8584 tg3_free_rings(tp); 8585 return -ENOMEM; 8586 } 8587 } 8588 8589 return 0; 8590 } 8591 8592 static void tg3_mem_tx_release(struct tg3 *tp) 8593 { 8594 int i; 8595 8596 for (i = 0; i < tp->irq_max; i++) { 8597 struct tg3_napi *tnapi = &tp->napi[i]; 8598 8599 if (tnapi->tx_ring) { 8600 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, 8601 tnapi->tx_ring, tnapi->tx_desc_mapping); 8602 tnapi->tx_ring = NULL; 8603 } 8604 8605 kfree(tnapi->tx_buffers); 8606 tnapi->tx_buffers = NULL; 8607 } 8608 } 8609 8610 static int tg3_mem_tx_acquire(struct tg3 *tp) 8611 { 8612 int i; 8613 struct tg3_napi *tnapi = &tp->napi[0]; 8614 8615 /* If multivector TSS is enabled, vector 0 does not handle 8616 * tx interrupts. Don't allocate any resources for it. 8617 */ 8618 if (tg3_flag(tp, ENABLE_TSS)) 8619 tnapi++; 8620 8621 for (i = 0; i < tp->txq_cnt; i++, tnapi++) { 8622 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE, 8623 sizeof(struct tg3_tx_ring_info), 8624 GFP_KERNEL); 8625 if (!tnapi->tx_buffers) 8626 goto err_out; 8627 8628 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, 8629 TG3_TX_RING_BYTES, 8630 &tnapi->tx_desc_mapping, 8631 GFP_KERNEL); 8632 if (!tnapi->tx_ring) 8633 goto err_out; 8634 } 8635 8636 return 0; 8637 8638 err_out: 8639 tg3_mem_tx_release(tp); 8640 return -ENOMEM; 8641 } 8642 8643 static void tg3_mem_rx_release(struct tg3 *tp) 8644 { 8645 int i; 8646 8647 for (i = 0; i < tp->irq_max; i++) { 8648 struct tg3_napi *tnapi = &tp->napi[i]; 8649 8650 tg3_rx_prodring_fini(tp, &tnapi->prodring); 8651 8652 if (!tnapi->rx_rcb) 8653 continue; 8654 8655 dma_free_coherent(&tp->pdev->dev, 8656 TG3_RX_RCB_RING_BYTES(tp), 8657 tnapi->rx_rcb, 8658 tnapi->rx_rcb_mapping); 8659 tnapi->rx_rcb = NULL; 8660 } 8661 } 8662 8663 static int tg3_mem_rx_acquire(struct tg3 *tp) 8664 { 8665 unsigned int i, limit; 8666 8667 limit = tp->rxq_cnt; 8668 8669 /* If RSS is enabled, we need a (dummy) producer ring 8670 * set on vector zero. This is the true hw prodring. 8671 */ 8672 if (tg3_flag(tp, ENABLE_RSS)) 8673 limit++; 8674 8675 for (i = 0; i < limit; i++) { 8676 struct tg3_napi *tnapi = &tp->napi[i]; 8677 8678 if (tg3_rx_prodring_init(tp, &tnapi->prodring)) 8679 goto err_out; 8680 8681 /* If multivector RSS is enabled, vector 0 8682 * does not handle rx or tx interrupts. 8683 * Don't allocate any resources for it. 8684 */ 8685 if (!i && tg3_flag(tp, ENABLE_RSS)) 8686 continue; 8687 8688 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, 8689 TG3_RX_RCB_RING_BYTES(tp), 8690 &tnapi->rx_rcb_mapping, 8691 GFP_KERNEL); 8692 if (!tnapi->rx_rcb) 8693 goto err_out; 8694 } 8695 8696 return 0; 8697 8698 err_out: 8699 tg3_mem_rx_release(tp); 8700 return -ENOMEM; 8701 } 8702 8703 /* 8704 * Must not be invoked with interrupt sources disabled and 8705 * the hardware shutdown down. 8706 */ 8707 static void tg3_free_consistent(struct tg3 *tp) 8708 { 8709 int i; 8710 8711 for (i = 0; i < tp->irq_cnt; i++) { 8712 struct tg3_napi *tnapi = &tp->napi[i]; 8713 8714 if (tnapi->hw_status) { 8715 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, 8716 tnapi->hw_status, 8717 tnapi->status_mapping); 8718 tnapi->hw_status = NULL; 8719 } 8720 } 8721 8722 tg3_mem_rx_release(tp); 8723 tg3_mem_tx_release(tp); 8724 8725 /* tp->hw_stats can be referenced safely: 8726 * 1. under rtnl_lock 8727 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set. 8728 */ 8729 if (tp->hw_stats) { 8730 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), 8731 tp->hw_stats, tp->stats_mapping); 8732 tp->hw_stats = NULL; 8733 } 8734 } 8735 8736 /* 8737 * Must not be invoked with interrupt sources disabled and 8738 * the hardware shutdown down. Can sleep. 8739 */ 8740 static int tg3_alloc_consistent(struct tg3 *tp) 8741 { 8742 int i; 8743 8744 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, 8745 sizeof(struct tg3_hw_stats), 8746 &tp->stats_mapping, GFP_KERNEL); 8747 if (!tp->hw_stats) 8748 goto err_out; 8749 8750 for (i = 0; i < tp->irq_cnt; i++) { 8751 struct tg3_napi *tnapi = &tp->napi[i]; 8752 struct tg3_hw_status *sblk; 8753 8754 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, 8755 TG3_HW_STATUS_SIZE, 8756 &tnapi->status_mapping, 8757 GFP_KERNEL); 8758 if (!tnapi->hw_status) 8759 goto err_out; 8760 8761 sblk = tnapi->hw_status; 8762 8763 if (tg3_flag(tp, ENABLE_RSS)) { 8764 u16 *prodptr = NULL; 8765 8766 /* 8767 * When RSS is enabled, the status block format changes 8768 * slightly. The "rx_jumbo_consumer", "reserved", 8769 * and "rx_mini_consumer" members get mapped to the 8770 * other three rx return ring producer indexes. 8771 */ 8772 switch (i) { 8773 case 1: 8774 prodptr = &sblk->idx[0].rx_producer; 8775 break; 8776 case 2: 8777 prodptr = &sblk->rx_jumbo_consumer; 8778 break; 8779 case 3: 8780 prodptr = &sblk->reserved; 8781 break; 8782 case 4: 8783 prodptr = &sblk->rx_mini_consumer; 8784 break; 8785 } 8786 tnapi->rx_rcb_prod_idx = prodptr; 8787 } else { 8788 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; 8789 } 8790 } 8791 8792 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp)) 8793 goto err_out; 8794 8795 return 0; 8796 8797 err_out: 8798 tg3_free_consistent(tp); 8799 return -ENOMEM; 8800 } 8801 8802 #define MAX_WAIT_CNT 1000 8803 8804 /* To stop a block, clear the enable bit and poll till it 8805 * clears. tp->lock is held. 8806 */ 8807 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent) 8808 { 8809 unsigned int i; 8810 u32 val; 8811 8812 if (tg3_flag(tp, 5705_PLUS)) { 8813 switch (ofs) { 8814 case RCVLSC_MODE: 8815 case DMAC_MODE: 8816 case MBFREE_MODE: 8817 case BUFMGR_MODE: 8818 case MEMARB_MODE: 8819 /* We can't enable/disable these bits of the 8820 * 5705/5750, just say success. 8821 */ 8822 return 0; 8823 8824 default: 8825 break; 8826 } 8827 } 8828 8829 val = tr32(ofs); 8830 val &= ~enable_bit; 8831 tw32_f(ofs, val); 8832 8833 for (i = 0; i < MAX_WAIT_CNT; i++) { 8834 if (pci_channel_offline(tp->pdev)) { 8835 dev_err(&tp->pdev->dev, 8836 "tg3_stop_block device offline, " 8837 "ofs=%lx enable_bit=%x\n", 8838 ofs, enable_bit); 8839 return -ENODEV; 8840 } 8841 8842 udelay(100); 8843 val = tr32(ofs); 8844 if ((val & enable_bit) == 0) 8845 break; 8846 } 8847 8848 if (i == MAX_WAIT_CNT && !silent) { 8849 dev_err(&tp->pdev->dev, 8850 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", 8851 ofs, enable_bit); 8852 return -ENODEV; 8853 } 8854 8855 return 0; 8856 } 8857 8858 /* tp->lock is held. */ 8859 static int tg3_abort_hw(struct tg3 *tp, bool silent) 8860 { 8861 int i, err; 8862 8863 tg3_disable_ints(tp); 8864 8865 if (pci_channel_offline(tp->pdev)) { 8866 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE); 8867 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8868 err = -ENODEV; 8869 goto err_no_dev; 8870 } 8871 8872 tp->rx_mode &= ~RX_MODE_ENABLE; 8873 tw32_f(MAC_RX_MODE, tp->rx_mode); 8874 udelay(10); 8875 8876 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); 8877 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); 8878 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); 8879 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); 8880 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); 8881 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); 8882 8883 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); 8884 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); 8885 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); 8886 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); 8887 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); 8888 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); 8889 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); 8890 8891 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8892 tw32_f(MAC_MODE, tp->mac_mode); 8893 udelay(40); 8894 8895 tp->tx_mode &= ~TX_MODE_ENABLE; 8896 tw32_f(MAC_TX_MODE, tp->tx_mode); 8897 8898 for (i = 0; i < MAX_WAIT_CNT; i++) { 8899 udelay(100); 8900 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) 8901 break; 8902 } 8903 if (i >= MAX_WAIT_CNT) { 8904 dev_err(&tp->pdev->dev, 8905 "%s timed out, TX_MODE_ENABLE will not clear " 8906 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); 8907 err |= -ENODEV; 8908 } 8909 8910 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); 8911 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); 8912 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); 8913 8914 tw32(FTQ_RESET, 0xffffffff); 8915 tw32(FTQ_RESET, 0x00000000); 8916 8917 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); 8918 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); 8919 8920 err_no_dev: 8921 for (i = 0; i < tp->irq_cnt; i++) { 8922 struct tg3_napi *tnapi = &tp->napi[i]; 8923 if (tnapi->hw_status) 8924 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8925 } 8926 8927 return err; 8928 } 8929 8930 /* Save PCI command register before chip reset */ 8931 static void tg3_save_pci_state(struct tg3 *tp) 8932 { 8933 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); 8934 } 8935 8936 /* Restore PCI state after chip reset */ 8937 static void tg3_restore_pci_state(struct tg3 *tp) 8938 { 8939 u32 val; 8940 8941 /* Re-enable indirect register accesses. */ 8942 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 8943 tp->misc_host_ctrl); 8944 8945 /* Set MAX PCI retry to zero. */ 8946 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); 8947 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 8948 tg3_flag(tp, PCIX_MODE)) 8949 val |= PCISTATE_RETRY_SAME_DMA; 8950 /* Allow reads and writes to the APE register and memory space. */ 8951 if (tg3_flag(tp, ENABLE_APE)) 8952 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 8953 PCISTATE_ALLOW_APE_SHMEM_WR | 8954 PCISTATE_ALLOW_APE_PSPACE_WR; 8955 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); 8956 8957 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); 8958 8959 if (!tg3_flag(tp, PCI_EXPRESS)) { 8960 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 8961 tp->pci_cacheline_sz); 8962 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 8963 tp->pci_lat_timer); 8964 } 8965 8966 /* Make sure PCI-X relaxed ordering bit is clear. */ 8967 if (tg3_flag(tp, PCIX_MODE)) { 8968 u16 pcix_cmd; 8969 8970 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 8971 &pcix_cmd); 8972 pcix_cmd &= ~PCI_X_CMD_ERO; 8973 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 8974 pcix_cmd); 8975 } 8976 8977 if (tg3_flag(tp, 5780_CLASS)) { 8978 8979 /* Chip reset on 5780 will reset MSI enable bit, 8980 * so need to restore it. 8981 */ 8982 if (tg3_flag(tp, USING_MSI)) { 8983 u16 ctrl; 8984 8985 pci_read_config_word(tp->pdev, 8986 tp->msi_cap + PCI_MSI_FLAGS, 8987 &ctrl); 8988 pci_write_config_word(tp->pdev, 8989 tp->msi_cap + PCI_MSI_FLAGS, 8990 ctrl | PCI_MSI_FLAGS_ENABLE); 8991 val = tr32(MSGINT_MODE); 8992 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); 8993 } 8994 } 8995 } 8996 8997 static void tg3_override_clk(struct tg3 *tp) 8998 { 8999 u32 val; 9000 9001 switch (tg3_asic_rev(tp)) { 9002 case ASIC_REV_5717: 9003 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9004 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 9005 TG3_CPMU_MAC_ORIDE_ENABLE); 9006 break; 9007 9008 case ASIC_REV_5719: 9009 case ASIC_REV_5720: 9010 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 9011 break; 9012 9013 default: 9014 return; 9015 } 9016 } 9017 9018 static void tg3_restore_clk(struct tg3 *tp) 9019 { 9020 u32 val; 9021 9022 switch (tg3_asic_rev(tp)) { 9023 case ASIC_REV_5717: 9024 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9025 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, 9026 val & ~TG3_CPMU_MAC_ORIDE_ENABLE); 9027 break; 9028 9029 case ASIC_REV_5719: 9030 case ASIC_REV_5720: 9031 val = tr32(TG3_CPMU_CLCK_ORIDE); 9032 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 9033 break; 9034 9035 default: 9036 return; 9037 } 9038 } 9039 9040 /* tp->lock is held. */ 9041 static int tg3_chip_reset(struct tg3 *tp) 9042 __releases(tp->lock) 9043 __acquires(tp->lock) 9044 { 9045 u32 val; 9046 void (*write_op)(struct tg3 *, u32, u32); 9047 int i, err; 9048 9049 if (!pci_device_is_present(tp->pdev)) 9050 return -ENODEV; 9051 9052 tg3_nvram_lock(tp); 9053 9054 tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 9055 9056 /* No matching tg3_nvram_unlock() after this because 9057 * chip reset below will undo the nvram lock. 9058 */ 9059 tp->nvram_lock_cnt = 0; 9060 9061 /* GRC_MISC_CFG core clock reset will clear the memory 9062 * enable bit in PCI register 4 and the MSI enable bit 9063 * on some chips, so we save relevant registers here. 9064 */ 9065 tg3_save_pci_state(tp); 9066 9067 if (tg3_asic_rev(tp) == ASIC_REV_5752 || 9068 tg3_flag(tp, 5755_PLUS)) 9069 tw32(GRC_FASTBOOT_PC, 0); 9070 9071 /* 9072 * We must avoid the readl() that normally takes place. 9073 * It locks machines, causes machine checks, and other 9074 * fun things. So, temporarily disable the 5701 9075 * hardware workaround, while we do the reset. 9076 */ 9077 write_op = tp->write32; 9078 if (write_op == tg3_write_flush_reg32) 9079 tp->write32 = tg3_write32; 9080 9081 /* Prevent the irq handler from reading or writing PCI registers 9082 * during chip reset when the memory enable bit in the PCI command 9083 * register may be cleared. The chip does not generate interrupt 9084 * at this time, but the irq handler may still be called due to irq 9085 * sharing or irqpoll. 9086 */ 9087 tg3_flag_set(tp, CHIP_RESETTING); 9088 for (i = 0; i < tp->irq_cnt; i++) { 9089 struct tg3_napi *tnapi = &tp->napi[i]; 9090 if (tnapi->hw_status) { 9091 tnapi->hw_status->status = 0; 9092 tnapi->hw_status->status_tag = 0; 9093 } 9094 tnapi->last_tag = 0; 9095 tnapi->last_irq_tag = 0; 9096 } 9097 smp_mb(); 9098 9099 tg3_full_unlock(tp); 9100 9101 for (i = 0; i < tp->irq_cnt; i++) 9102 synchronize_irq(tp->napi[i].irq_vec); 9103 9104 tg3_full_lock(tp, 0); 9105 9106 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9107 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9108 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9109 } 9110 9111 /* do the reset */ 9112 val = GRC_MISC_CFG_CORECLK_RESET; 9113 9114 if (tg3_flag(tp, PCI_EXPRESS)) { 9115 /* Force PCIe 1.0a mode */ 9116 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 9117 !tg3_flag(tp, 57765_PLUS) && 9118 tr32(TG3_PCIE_PHY_TSTCTL) == 9119 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) 9120 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); 9121 9122 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) { 9123 tw32(GRC_MISC_CFG, (1 << 29)); 9124 val |= (1 << 29); 9125 } 9126 } 9127 9128 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 9129 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); 9130 tw32(GRC_VCPU_EXT_CTRL, 9131 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); 9132 } 9133 9134 /* Set the clock to the highest frequency to avoid timeouts. With link 9135 * aware mode, the clock speed could be slow and bootcode does not 9136 * complete within the expected time. Override the clock to allow the 9137 * bootcode to finish sooner and then restore it. 9138 */ 9139 tg3_override_clk(tp); 9140 9141 /* Manage gphy power for all CPMU absent PCIe devices. */ 9142 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) 9143 val |= GRC_MISC_CFG_KEEP_GPHY_POWER; 9144 9145 tw32(GRC_MISC_CFG, val); 9146 9147 /* restore 5701 hardware bug workaround write method */ 9148 tp->write32 = write_op; 9149 9150 /* Unfortunately, we have to delay before the PCI read back. 9151 * Some 575X chips even will not respond to a PCI cfg access 9152 * when the reset command is given to the chip. 9153 * 9154 * How do these hardware designers expect things to work 9155 * properly if the PCI write is posted for a long period 9156 * of time? It is always necessary to have some method by 9157 * which a register read back can occur to push the write 9158 * out which does the reset. 9159 * 9160 * For most tg3 variants the trick below was working. 9161 * Ho hum... 9162 */ 9163 udelay(120); 9164 9165 /* Flush PCI posted writes. The normal MMIO registers 9166 * are inaccessible at this time so this is the only 9167 * way to make this reliably (actually, this is no longer 9168 * the case, see above). I tried to use indirect 9169 * register read/write but this upset some 5701 variants. 9170 */ 9171 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); 9172 9173 udelay(120); 9174 9175 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) { 9176 u16 val16; 9177 9178 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) { 9179 int j; 9180 u32 cfg_val; 9181 9182 /* Wait for link training to complete. */ 9183 for (j = 0; j < 5000; j++) 9184 udelay(100); 9185 9186 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); 9187 pci_write_config_dword(tp->pdev, 0xc4, 9188 cfg_val | (1 << 15)); 9189 } 9190 9191 /* Clear the "no snoop" and "relaxed ordering" bits. */ 9192 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN; 9193 /* 9194 * Older PCIe devices only support the 128 byte 9195 * MPS setting. Enforce the restriction. 9196 */ 9197 if (!tg3_flag(tp, CPMU_PRESENT)) 9198 val16 |= PCI_EXP_DEVCTL_PAYLOAD; 9199 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16); 9200 9201 /* Clear error status */ 9202 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA, 9203 PCI_EXP_DEVSTA_CED | 9204 PCI_EXP_DEVSTA_NFED | 9205 PCI_EXP_DEVSTA_FED | 9206 PCI_EXP_DEVSTA_URD); 9207 } 9208 9209 tg3_restore_pci_state(tp); 9210 9211 tg3_flag_clear(tp, CHIP_RESETTING); 9212 tg3_flag_clear(tp, ERROR_PROCESSED); 9213 9214 val = 0; 9215 if (tg3_flag(tp, 5780_CLASS)) 9216 val = tr32(MEMARB_MODE); 9217 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 9218 9219 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) { 9220 tg3_stop_fw(tp); 9221 tw32(0x5000, 0x400); 9222 } 9223 9224 if (tg3_flag(tp, IS_SSB_CORE)) { 9225 /* 9226 * BCM4785: In order to avoid repercussions from using 9227 * potentially defective internal ROM, stop the Rx RISC CPU, 9228 * which is not required. 9229 */ 9230 tg3_stop_fw(tp); 9231 tg3_halt_cpu(tp, RX_CPU_BASE); 9232 } 9233 9234 err = tg3_poll_fw(tp); 9235 if (err) 9236 return err; 9237 9238 tw32(GRC_MODE, tp->grc_mode); 9239 9240 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) { 9241 val = tr32(0xc4); 9242 9243 tw32(0xc4, val | (1 << 15)); 9244 } 9245 9246 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && 9247 tg3_asic_rev(tp) == ASIC_REV_5705) { 9248 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; 9249 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) 9250 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; 9251 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 9252 } 9253 9254 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 9255 tp->mac_mode = MAC_MODE_PORT_MODE_TBI; 9256 val = tp->mac_mode; 9257 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 9258 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 9259 val = tp->mac_mode; 9260 } else 9261 val = 0; 9262 9263 tw32_f(MAC_MODE, val); 9264 udelay(40); 9265 9266 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); 9267 9268 tg3_mdio_start(tp); 9269 9270 if (tg3_flag(tp, PCI_EXPRESS) && 9271 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 9272 tg3_asic_rev(tp) != ASIC_REV_5785 && 9273 !tg3_flag(tp, 57765_PLUS)) { 9274 val = tr32(0x7c00); 9275 9276 tw32(0x7c00, val | (1 << 25)); 9277 } 9278 9279 tg3_restore_clk(tp); 9280 9281 /* Increase the core clock speed to fix tx timeout issue for 5762 9282 * with 100Mbps link speed. 9283 */ 9284 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 9285 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9286 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 9287 TG3_CPMU_MAC_ORIDE_ENABLE); 9288 } 9289 9290 /* Reprobe ASF enable state. */ 9291 tg3_flag_clear(tp, ENABLE_ASF); 9292 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 9293 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 9294 9295 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); 9296 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 9297 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 9298 u32 nic_cfg; 9299 9300 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 9301 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 9302 tg3_flag_set(tp, ENABLE_ASF); 9303 tp->last_event_jiffies = jiffies; 9304 if (tg3_flag(tp, 5750_PLUS)) 9305 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 9306 9307 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg); 9308 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK) 9309 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 9310 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID) 9311 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 9312 } 9313 } 9314 9315 return 0; 9316 } 9317 9318 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *); 9319 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *); 9320 static void __tg3_set_rx_mode(struct net_device *); 9321 9322 /* tp->lock is held. */ 9323 static int tg3_halt(struct tg3 *tp, int kind, bool silent) 9324 { 9325 int err; 9326 9327 tg3_stop_fw(tp); 9328 9329 tg3_write_sig_pre_reset(tp, kind); 9330 9331 tg3_abort_hw(tp, silent); 9332 err = tg3_chip_reset(tp); 9333 9334 __tg3_set_mac_addr(tp, false); 9335 9336 tg3_write_sig_legacy(tp, kind); 9337 tg3_write_sig_post_reset(tp, kind); 9338 9339 if (tp->hw_stats) { 9340 /* Save the stats across chip resets... */ 9341 tg3_get_nstats(tp, &tp->net_stats_prev); 9342 tg3_get_estats(tp, &tp->estats_prev); 9343 9344 /* And make sure the next sample is new data */ 9345 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); 9346 } 9347 9348 return err; 9349 } 9350 9351 static int tg3_set_mac_addr(struct net_device *dev, void *p) 9352 { 9353 struct tg3 *tp = netdev_priv(dev); 9354 struct sockaddr *addr = p; 9355 int err = 0; 9356 bool skip_mac_1 = false; 9357 9358 if (!is_valid_ether_addr(addr->sa_data)) 9359 return -EADDRNOTAVAIL; 9360 9361 eth_hw_addr_set(dev, addr->sa_data); 9362 9363 if (!netif_running(dev)) 9364 return 0; 9365 9366 if (tg3_flag(tp, ENABLE_ASF)) { 9367 u32 addr0_high, addr0_low, addr1_high, addr1_low; 9368 9369 addr0_high = tr32(MAC_ADDR_0_HIGH); 9370 addr0_low = tr32(MAC_ADDR_0_LOW); 9371 addr1_high = tr32(MAC_ADDR_1_HIGH); 9372 addr1_low = tr32(MAC_ADDR_1_LOW); 9373 9374 /* Skip MAC addr 1 if ASF is using it. */ 9375 if ((addr0_high != addr1_high || addr0_low != addr1_low) && 9376 !(addr1_high == 0 && addr1_low == 0)) 9377 skip_mac_1 = true; 9378 } 9379 spin_lock_bh(&tp->lock); 9380 __tg3_set_mac_addr(tp, skip_mac_1); 9381 __tg3_set_rx_mode(dev); 9382 spin_unlock_bh(&tp->lock); 9383 9384 return err; 9385 } 9386 9387 /* tp->lock is held. */ 9388 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, 9389 dma_addr_t mapping, u32 maxlen_flags, 9390 u32 nic_addr) 9391 { 9392 tg3_write_mem(tp, 9393 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), 9394 ((u64) mapping >> 32)); 9395 tg3_write_mem(tp, 9396 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), 9397 ((u64) mapping & 0xffffffff)); 9398 tg3_write_mem(tp, 9399 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), 9400 maxlen_flags); 9401 9402 if (!tg3_flag(tp, 5705_PLUS)) 9403 tg3_write_mem(tp, 9404 (bdinfo_addr + TG3_BDINFO_NIC_ADDR), 9405 nic_addr); 9406 } 9407 9408 9409 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9410 { 9411 int i = 0; 9412 9413 if (!tg3_flag(tp, ENABLE_TSS)) { 9414 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); 9415 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); 9416 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); 9417 } else { 9418 tw32(HOSTCC_TXCOL_TICKS, 0); 9419 tw32(HOSTCC_TXMAX_FRAMES, 0); 9420 tw32(HOSTCC_TXCOAL_MAXF_INT, 0); 9421 9422 for (; i < tp->txq_cnt; i++) { 9423 u32 reg; 9424 9425 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; 9426 tw32(reg, ec->tx_coalesce_usecs); 9427 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; 9428 tw32(reg, ec->tx_max_coalesced_frames); 9429 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; 9430 tw32(reg, ec->tx_max_coalesced_frames_irq); 9431 } 9432 } 9433 9434 for (; i < tp->irq_max - 1; i++) { 9435 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); 9436 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); 9437 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9438 } 9439 } 9440 9441 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9442 { 9443 int i = 0; 9444 u32 limit = tp->rxq_cnt; 9445 9446 if (!tg3_flag(tp, ENABLE_RSS)) { 9447 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); 9448 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); 9449 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); 9450 limit--; 9451 } else { 9452 tw32(HOSTCC_RXCOL_TICKS, 0); 9453 tw32(HOSTCC_RXMAX_FRAMES, 0); 9454 tw32(HOSTCC_RXCOAL_MAXF_INT, 0); 9455 } 9456 9457 for (; i < limit; i++) { 9458 u32 reg; 9459 9460 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; 9461 tw32(reg, ec->rx_coalesce_usecs); 9462 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; 9463 tw32(reg, ec->rx_max_coalesced_frames); 9464 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; 9465 tw32(reg, ec->rx_max_coalesced_frames_irq); 9466 } 9467 9468 for (; i < tp->irq_max - 1; i++) { 9469 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); 9470 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); 9471 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9472 } 9473 } 9474 9475 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) 9476 { 9477 tg3_coal_tx_init(tp, ec); 9478 tg3_coal_rx_init(tp, ec); 9479 9480 if (!tg3_flag(tp, 5705_PLUS)) { 9481 u32 val = ec->stats_block_coalesce_usecs; 9482 9483 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); 9484 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); 9485 9486 if (!tp->link_up) 9487 val = 0; 9488 9489 tw32(HOSTCC_STAT_COAL_TICKS, val); 9490 } 9491 } 9492 9493 /* tp->lock is held. */ 9494 static void tg3_tx_rcbs_disable(struct tg3 *tp) 9495 { 9496 u32 txrcb, limit; 9497 9498 /* Disable all transmit rings but the first. */ 9499 if (!tg3_flag(tp, 5705_PLUS)) 9500 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; 9501 else if (tg3_flag(tp, 5717_PLUS)) 9502 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; 9503 else if (tg3_flag(tp, 57765_CLASS) || 9504 tg3_asic_rev(tp) == ASIC_REV_5762) 9505 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; 9506 else 9507 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9508 9509 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9510 txrcb < limit; txrcb += TG3_BDINFO_SIZE) 9511 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, 9512 BDINFO_FLAGS_DISABLED); 9513 } 9514 9515 /* tp->lock is held. */ 9516 static void tg3_tx_rcbs_init(struct tg3 *tp) 9517 { 9518 int i = 0; 9519 u32 txrcb = NIC_SRAM_SEND_RCB; 9520 9521 if (tg3_flag(tp, ENABLE_TSS)) 9522 i++; 9523 9524 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { 9525 struct tg3_napi *tnapi = &tp->napi[i]; 9526 9527 if (!tnapi->tx_ring) 9528 continue; 9529 9530 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, 9531 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), 9532 NIC_SRAM_TX_BUFFER_DESC); 9533 } 9534 } 9535 9536 /* tp->lock is held. */ 9537 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp) 9538 { 9539 u32 rxrcb, limit; 9540 9541 /* Disable all receive return rings but the first. */ 9542 if (tg3_flag(tp, 5717_PLUS)) 9543 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; 9544 else if (!tg3_flag(tp, 5705_PLUS)) 9545 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; 9546 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9547 tg3_asic_rev(tp) == ASIC_REV_5762 || 9548 tg3_flag(tp, 57765_CLASS)) 9549 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; 9550 else 9551 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9552 9553 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9554 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) 9555 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, 9556 BDINFO_FLAGS_DISABLED); 9557 } 9558 9559 /* tp->lock is held. */ 9560 static void tg3_rx_ret_rcbs_init(struct tg3 *tp) 9561 { 9562 int i = 0; 9563 u32 rxrcb = NIC_SRAM_RCV_RET_RCB; 9564 9565 if (tg3_flag(tp, ENABLE_RSS)) 9566 i++; 9567 9568 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { 9569 struct tg3_napi *tnapi = &tp->napi[i]; 9570 9571 if (!tnapi->rx_rcb) 9572 continue; 9573 9574 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, 9575 (tp->rx_ret_ring_mask + 1) << 9576 BDINFO_FLAGS_MAXLEN_SHIFT, 0); 9577 } 9578 } 9579 9580 /* tp->lock is held. */ 9581 static void tg3_rings_reset(struct tg3 *tp) 9582 { 9583 int i; 9584 u32 stblk; 9585 struct tg3_napi *tnapi = &tp->napi[0]; 9586 9587 tg3_tx_rcbs_disable(tp); 9588 9589 tg3_rx_ret_rcbs_disable(tp); 9590 9591 /* Disable interrupts */ 9592 tw32_mailbox_f(tp->napi[0].int_mbox, 1); 9593 tp->napi[0].chk_msi_cnt = 0; 9594 tp->napi[0].last_rx_cons = 0; 9595 tp->napi[0].last_tx_cons = 0; 9596 9597 /* Zero mailbox registers. */ 9598 if (tg3_flag(tp, SUPPORT_MSIX)) { 9599 for (i = 1; i < tp->irq_max; i++) { 9600 tp->napi[i].tx_prod = 0; 9601 tp->napi[i].tx_cons = 0; 9602 if (tg3_flag(tp, ENABLE_TSS)) 9603 tw32_mailbox(tp->napi[i].prodmbox, 0); 9604 tw32_rx_mbox(tp->napi[i].consmbox, 0); 9605 tw32_mailbox_f(tp->napi[i].int_mbox, 1); 9606 tp->napi[i].chk_msi_cnt = 0; 9607 tp->napi[i].last_rx_cons = 0; 9608 tp->napi[i].last_tx_cons = 0; 9609 } 9610 if (!tg3_flag(tp, ENABLE_TSS)) 9611 tw32_mailbox(tp->napi[0].prodmbox, 0); 9612 } else { 9613 tp->napi[0].tx_prod = 0; 9614 tp->napi[0].tx_cons = 0; 9615 tw32_mailbox(tp->napi[0].prodmbox, 0); 9616 tw32_rx_mbox(tp->napi[0].consmbox, 0); 9617 } 9618 9619 /* Make sure the NIC-based send BD rings are disabled. */ 9620 if (!tg3_flag(tp, 5705_PLUS)) { 9621 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; 9622 for (i = 0; i < 16; i++) 9623 tw32_tx_mbox(mbox + i * 8, 0); 9624 } 9625 9626 /* Clear status block in ram. */ 9627 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9628 9629 /* Set status block DMA address */ 9630 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 9631 ((u64) tnapi->status_mapping >> 32)); 9632 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 9633 ((u64) tnapi->status_mapping & 0xffffffff)); 9634 9635 stblk = HOSTCC_STATBLCK_RING1; 9636 9637 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { 9638 u64 mapping = (u64)tnapi->status_mapping; 9639 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); 9640 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); 9641 stblk += 8; 9642 9643 /* Clear status block in ram. */ 9644 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9645 } 9646 9647 tg3_tx_rcbs_init(tp); 9648 tg3_rx_ret_rcbs_init(tp); 9649 } 9650 9651 static void tg3_setup_rxbd_thresholds(struct tg3 *tp) 9652 { 9653 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; 9654 9655 if (!tg3_flag(tp, 5750_PLUS) || 9656 tg3_flag(tp, 5780_CLASS) || 9657 tg3_asic_rev(tp) == ASIC_REV_5750 || 9658 tg3_asic_rev(tp) == ASIC_REV_5752 || 9659 tg3_flag(tp, 57765_PLUS)) 9660 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; 9661 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9662 tg3_asic_rev(tp) == ASIC_REV_5787) 9663 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; 9664 else 9665 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; 9666 9667 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); 9668 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); 9669 9670 val = min(nic_rep_thresh, host_rep_thresh); 9671 tw32(RCVBDI_STD_THRESH, val); 9672 9673 if (tg3_flag(tp, 57765_PLUS)) 9674 tw32(STD_REPLENISH_LWM, bdcache_maxcnt); 9675 9676 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 9677 return; 9678 9679 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; 9680 9681 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); 9682 9683 val = min(bdcache_maxcnt / 2, host_rep_thresh); 9684 tw32(RCVBDI_JUMBO_THRESH, val); 9685 9686 if (tg3_flag(tp, 57765_PLUS)) 9687 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); 9688 } 9689 9690 static inline u32 calc_crc(unsigned char *buf, int len) 9691 { 9692 u32 reg; 9693 u32 tmp; 9694 int j, k; 9695 9696 reg = 0xffffffff; 9697 9698 for (j = 0; j < len; j++) { 9699 reg ^= buf[j]; 9700 9701 for (k = 0; k < 8; k++) { 9702 tmp = reg & 0x01; 9703 9704 reg >>= 1; 9705 9706 if (tmp) 9707 reg ^= CRC32_POLY_LE; 9708 } 9709 } 9710 9711 return ~reg; 9712 } 9713 9714 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) 9715 { 9716 /* accept or reject all multicast frames */ 9717 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); 9718 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); 9719 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); 9720 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); 9721 } 9722 9723 static void __tg3_set_rx_mode(struct net_device *dev) 9724 { 9725 struct tg3 *tp = netdev_priv(dev); 9726 u32 rx_mode; 9727 9728 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | 9729 RX_MODE_KEEP_VLAN_TAG); 9730 9731 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) 9732 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 9733 * flag clear. 9734 */ 9735 if (!tg3_flag(tp, ENABLE_ASF)) 9736 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 9737 #endif 9738 9739 if (dev->flags & IFF_PROMISC) { 9740 /* Promiscuous mode. */ 9741 rx_mode |= RX_MODE_PROMISC; 9742 } else if (dev->flags & IFF_ALLMULTI) { 9743 /* Accept all multicast. */ 9744 tg3_set_multi(tp, 1); 9745 } else if (netdev_mc_empty(dev)) { 9746 /* Reject all multicast. */ 9747 tg3_set_multi(tp, 0); 9748 } else { 9749 /* Accept one or more multicast(s). */ 9750 struct netdev_hw_addr *ha; 9751 u32 mc_filter[4] = { 0, }; 9752 u32 regidx; 9753 u32 bit; 9754 u32 crc; 9755 9756 netdev_for_each_mc_addr(ha, dev) { 9757 crc = calc_crc(ha->addr, ETH_ALEN); 9758 bit = ~crc & 0x7f; 9759 regidx = (bit & 0x60) >> 5; 9760 bit &= 0x1f; 9761 mc_filter[regidx] |= (1 << bit); 9762 } 9763 9764 tw32(MAC_HASH_REG_0, mc_filter[0]); 9765 tw32(MAC_HASH_REG_1, mc_filter[1]); 9766 tw32(MAC_HASH_REG_2, mc_filter[2]); 9767 tw32(MAC_HASH_REG_3, mc_filter[3]); 9768 } 9769 9770 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) { 9771 rx_mode |= RX_MODE_PROMISC; 9772 } else if (!(dev->flags & IFF_PROMISC)) { 9773 /* Add all entries into to the mac addr filter list */ 9774 int i = 0; 9775 struct netdev_hw_addr *ha; 9776 9777 netdev_for_each_uc_addr(ha, dev) { 9778 __tg3_set_one_mac_addr(tp, ha->addr, 9779 i + TG3_UCAST_ADDR_IDX(tp)); 9780 i++; 9781 } 9782 } 9783 9784 if (rx_mode != tp->rx_mode) { 9785 tp->rx_mode = rx_mode; 9786 tw32_f(MAC_RX_MODE, rx_mode); 9787 udelay(10); 9788 } 9789 } 9790 9791 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt) 9792 { 9793 int i; 9794 9795 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 9796 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt); 9797 } 9798 9799 static void tg3_rss_check_indir_tbl(struct tg3 *tp) 9800 { 9801 int i; 9802 9803 if (!tg3_flag(tp, SUPPORT_MSIX)) 9804 return; 9805 9806 if (tp->rxq_cnt == 1) { 9807 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); 9808 return; 9809 } 9810 9811 /* Validate table against current IRQ count */ 9812 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { 9813 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt) 9814 break; 9815 } 9816 9817 if (i != TG3_RSS_INDIR_TBL_SIZE) 9818 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt); 9819 } 9820 9821 static void tg3_rss_write_indir_tbl(struct tg3 *tp) 9822 { 9823 int i = 0; 9824 u32 reg = MAC_RSS_INDIR_TBL_0; 9825 9826 while (i < TG3_RSS_INDIR_TBL_SIZE) { 9827 u32 val = tp->rss_ind_tbl[i]; 9828 i++; 9829 for (; i % 8; i++) { 9830 val <<= 4; 9831 val |= tp->rss_ind_tbl[i]; 9832 } 9833 tw32(reg, val); 9834 reg += 4; 9835 } 9836 } 9837 9838 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp) 9839 { 9840 if (tg3_asic_rev(tp) == ASIC_REV_5719) 9841 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719; 9842 else 9843 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720; 9844 } 9845 9846 /* tp->lock is held. */ 9847 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) 9848 { 9849 u32 val, rdmac_mode; 9850 int i, err, limit; 9851 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 9852 9853 tg3_disable_ints(tp); 9854 9855 tg3_stop_fw(tp); 9856 9857 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); 9858 9859 if (tg3_flag(tp, INIT_COMPLETE)) 9860 tg3_abort_hw(tp, 1); 9861 9862 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 9863 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { 9864 tg3_phy_pull_config(tp); 9865 tg3_eee_pull_config(tp, NULL); 9866 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 9867 } 9868 9869 /* Enable MAC control of LPI */ 9870 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 9871 tg3_setup_eee(tp); 9872 9873 if (reset_phy) 9874 tg3_phy_reset(tp); 9875 9876 err = tg3_chip_reset(tp); 9877 if (err) 9878 return err; 9879 9880 tg3_write_sig_legacy(tp, RESET_KIND_INIT); 9881 9882 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 9883 val = tr32(TG3_CPMU_CTRL); 9884 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); 9885 tw32(TG3_CPMU_CTRL, val); 9886 9887 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 9888 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 9889 val |= CPMU_LSPD_10MB_MACCLK_6_25; 9890 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 9891 9892 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); 9893 val &= ~CPMU_LNK_AWARE_MACCLK_MASK; 9894 val |= CPMU_LNK_AWARE_MACCLK_6_25; 9895 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); 9896 9897 val = tr32(TG3_CPMU_HST_ACC); 9898 val &= ~CPMU_HST_ACC_MACCLK_MASK; 9899 val |= CPMU_HST_ACC_MACCLK_6_25; 9900 tw32(TG3_CPMU_HST_ACC, val); 9901 } 9902 9903 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9904 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; 9905 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | 9906 PCIE_PWR_MGMT_L1_THRESH_4MS; 9907 tw32(PCIE_PWR_MGMT_THRESH, val); 9908 9909 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; 9910 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); 9911 9912 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); 9913 9914 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9915 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9916 } 9917 9918 if (tg3_flag(tp, L1PLLPD_EN)) { 9919 u32 grc_mode = tr32(GRC_MODE); 9920 9921 /* Access the lower 1K of PL PCIE block registers. */ 9922 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9923 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 9924 9925 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); 9926 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, 9927 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); 9928 9929 tw32(GRC_MODE, grc_mode); 9930 } 9931 9932 if (tg3_flag(tp, 57765_CLASS)) { 9933 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 9934 u32 grc_mode = tr32(GRC_MODE); 9935 9936 /* Access the lower 1K of PL PCIE block registers. */ 9937 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9938 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 9939 9940 val = tr32(TG3_PCIE_TLDLPL_PORT + 9941 TG3_PCIE_PL_LO_PHYCTL5); 9942 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, 9943 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); 9944 9945 tw32(GRC_MODE, grc_mode); 9946 } 9947 9948 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) { 9949 u32 grc_mode; 9950 9951 /* Fix transmit hangs */ 9952 val = tr32(TG3_CPMU_PADRNG_CTL); 9953 val |= TG3_CPMU_PADRNG_CTL_RDIV2; 9954 tw32(TG3_CPMU_PADRNG_CTL, val); 9955 9956 grc_mode = tr32(GRC_MODE); 9957 9958 /* Access the lower 1K of DL PCIE block registers. */ 9959 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9960 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); 9961 9962 val = tr32(TG3_PCIE_TLDLPL_PORT + 9963 TG3_PCIE_DL_LO_FTSMAX); 9964 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; 9965 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, 9966 val | TG3_PCIE_DL_LO_FTSMAX_VAL); 9967 9968 tw32(GRC_MODE, grc_mode); 9969 } 9970 9971 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 9972 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 9973 val |= CPMU_LSPD_10MB_MACCLK_6_25; 9974 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 9975 } 9976 9977 /* This works around an issue with Athlon chipsets on 9978 * B3 tigon3 silicon. This bit has no effect on any 9979 * other revision. But do not set this on PCI Express 9980 * chips and don't even touch the clocks if the CPMU is present. 9981 */ 9982 if (!tg3_flag(tp, CPMU_PRESENT)) { 9983 if (!tg3_flag(tp, PCI_EXPRESS)) 9984 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; 9985 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 9986 } 9987 9988 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 9989 tg3_flag(tp, PCIX_MODE)) { 9990 val = tr32(TG3PCI_PCISTATE); 9991 val |= PCISTATE_RETRY_SAME_DMA; 9992 tw32(TG3PCI_PCISTATE, val); 9993 } 9994 9995 if (tg3_flag(tp, ENABLE_APE)) { 9996 /* Allow reads and writes to the 9997 * APE register and memory space. 9998 */ 9999 val = tr32(TG3PCI_PCISTATE); 10000 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 10001 PCISTATE_ALLOW_APE_SHMEM_WR | 10002 PCISTATE_ALLOW_APE_PSPACE_WR; 10003 tw32(TG3PCI_PCISTATE, val); 10004 } 10005 10006 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) { 10007 /* Enable some hw fixes. */ 10008 val = tr32(TG3PCI_MSI_DATA); 10009 val |= (1 << 26) | (1 << 28) | (1 << 29); 10010 tw32(TG3PCI_MSI_DATA, val); 10011 } 10012 10013 /* Descriptor ring init may make accesses to the 10014 * NIC SRAM area to setup the TX descriptors, so we 10015 * can only do this after the hardware has been 10016 * successfully reset. 10017 */ 10018 err = tg3_init_rings(tp); 10019 if (err) 10020 return err; 10021 10022 if (tg3_flag(tp, 57765_PLUS)) { 10023 val = tr32(TG3PCI_DMA_RW_CTRL) & 10024 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 10025 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 10026 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; 10027 if (!tg3_flag(tp, 57765_CLASS) && 10028 tg3_asic_rev(tp) != ASIC_REV_5717 && 10029 tg3_asic_rev(tp) != ASIC_REV_5762) 10030 val |= DMA_RWCTRL_TAGGED_STAT_WA; 10031 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); 10032 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 && 10033 tg3_asic_rev(tp) != ASIC_REV_5761) { 10034 /* This value is determined during the probe time DMA 10035 * engine test, tg3_test_dma. 10036 */ 10037 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 10038 } 10039 10040 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | 10041 GRC_MODE_4X_NIC_SEND_RINGS | 10042 GRC_MODE_NO_TX_PHDR_CSUM | 10043 GRC_MODE_NO_RX_PHDR_CSUM); 10044 tp->grc_mode |= GRC_MODE_HOST_SENDBDS; 10045 10046 /* Pseudo-header checksum is done by hardware logic and not 10047 * the offload processers, so make the chip do the pseudo- 10048 * header checksums on receive. For transmit it is more 10049 * convenient to do the pseudo-header checksum in software 10050 * as Linux does that on transmit for us in all cases. 10051 */ 10052 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; 10053 10054 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP; 10055 if (tp->rxptpctl) 10056 tw32(TG3_RX_PTP_CTL, 10057 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 10058 10059 if (tg3_flag(tp, PTP_CAPABLE)) 10060 val |= GRC_MODE_TIME_SYNC_ENABLE; 10061 10062 tw32(GRC_MODE, tp->grc_mode | val); 10063 10064 /* On one of the AMD platform, MRRS is restricted to 4000 because of 10065 * south bridge limitation. As a workaround, Driver is setting MRRS 10066 * to 2048 instead of default 4096. 10067 */ 10068 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 10069 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) { 10070 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK; 10071 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048); 10072 } 10073 10074 /* Setup the timer prescalar register. Clock is always 66Mhz. */ 10075 val = tr32(GRC_MISC_CFG); 10076 val &= ~0xff; 10077 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); 10078 tw32(GRC_MISC_CFG, val); 10079 10080 /* Initialize MBUF/DESC pool. */ 10081 if (tg3_flag(tp, 5750_PLUS)) { 10082 /* Do nothing. */ 10083 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) { 10084 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); 10085 if (tg3_asic_rev(tp) == ASIC_REV_5704) 10086 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); 10087 else 10088 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); 10089 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); 10090 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); 10091 } else if (tg3_flag(tp, TSO_CAPABLE)) { 10092 int fw_len; 10093 10094 fw_len = tp->fw_len; 10095 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); 10096 tw32(BUFMGR_MB_POOL_ADDR, 10097 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); 10098 tw32(BUFMGR_MB_POOL_SIZE, 10099 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); 10100 } 10101 10102 if (tp->dev->mtu <= ETH_DATA_LEN) { 10103 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10104 tp->bufmgr_config.mbuf_read_dma_low_water); 10105 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10106 tp->bufmgr_config.mbuf_mac_rx_low_water); 10107 tw32(BUFMGR_MB_HIGH_WATER, 10108 tp->bufmgr_config.mbuf_high_water); 10109 } else { 10110 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10111 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); 10112 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10113 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); 10114 tw32(BUFMGR_MB_HIGH_WATER, 10115 tp->bufmgr_config.mbuf_high_water_jumbo); 10116 } 10117 tw32(BUFMGR_DMA_LOW_WATER, 10118 tp->bufmgr_config.dma_low_water); 10119 tw32(BUFMGR_DMA_HIGH_WATER, 10120 tp->bufmgr_config.dma_high_water); 10121 10122 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; 10123 if (tg3_asic_rev(tp) == ASIC_REV_5719) 10124 val |= BUFMGR_MODE_NO_TX_UNDERRUN; 10125 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10126 tg3_asic_rev(tp) == ASIC_REV_5762 || 10127 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10128 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) 10129 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; 10130 tw32(BUFMGR_MODE, val); 10131 for (i = 0; i < 2000; i++) { 10132 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) 10133 break; 10134 udelay(10); 10135 } 10136 if (i >= 2000) { 10137 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); 10138 return -ENODEV; 10139 } 10140 10141 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1) 10142 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); 10143 10144 tg3_setup_rxbd_thresholds(tp); 10145 10146 /* Initialize TG3_BDINFO's at: 10147 * RCVDBDI_STD_BD: standard eth size rx ring 10148 * RCVDBDI_JUMBO_BD: jumbo frame rx ring 10149 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) 10150 * 10151 * like so: 10152 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring 10153 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | 10154 * ring attribute flags 10155 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM 10156 * 10157 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. 10158 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. 10159 * 10160 * The size of each ring is fixed in the firmware, but the location is 10161 * configurable. 10162 */ 10163 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10164 ((u64) tpr->rx_std_mapping >> 32)); 10165 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10166 ((u64) tpr->rx_std_mapping & 0xffffffff)); 10167 if (!tg3_flag(tp, 5717_PLUS)) 10168 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 10169 NIC_SRAM_RX_BUFFER_DESC); 10170 10171 /* Disable the mini ring */ 10172 if (!tg3_flag(tp, 5705_PLUS)) 10173 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, 10174 BDINFO_FLAGS_DISABLED); 10175 10176 /* Program the jumbo buffer descriptor ring control 10177 * blocks on those devices that have them. 10178 */ 10179 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10180 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { 10181 10182 if (tg3_flag(tp, JUMBO_RING_ENABLE)) { 10183 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10184 ((u64) tpr->rx_jmb_mapping >> 32)); 10185 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10186 ((u64) tpr->rx_jmb_mapping & 0xffffffff)); 10187 val = TG3_RX_JMB_RING_SIZE(tp) << 10188 BDINFO_FLAGS_MAXLEN_SHIFT; 10189 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10190 val | BDINFO_FLAGS_USE_EXT_RECV); 10191 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || 10192 tg3_flag(tp, 57765_CLASS) || 10193 tg3_asic_rev(tp) == ASIC_REV_5762) 10194 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 10195 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 10196 } else { 10197 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10198 BDINFO_FLAGS_DISABLED); 10199 } 10200 10201 if (tg3_flag(tp, 57765_PLUS)) { 10202 val = TG3_RX_STD_RING_SIZE(tp); 10203 val <<= BDINFO_FLAGS_MAXLEN_SHIFT; 10204 val |= (TG3_RX_STD_DMA_SZ << 2); 10205 } else 10206 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; 10207 } else 10208 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; 10209 10210 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); 10211 10212 tpr->rx_std_prod_idx = tp->rx_pending; 10213 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); 10214 10215 tpr->rx_jmb_prod_idx = 10216 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; 10217 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); 10218 10219 tg3_rings_reset(tp); 10220 10221 /* Initialize MAC address and backoff seed. */ 10222 __tg3_set_mac_addr(tp, false); 10223 10224 /* MTU + ethernet header + FCS + optional VLAN tag */ 10225 tw32(MAC_RX_MTU_SIZE, 10226 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 10227 10228 /* The slot time is changed by tg3_setup_phy if we 10229 * run at gigabit with half duplex. 10230 */ 10231 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 10232 (6 << TX_LENGTHS_IPG_SHIFT) | 10233 (32 << TX_LENGTHS_SLOT_TIME_SHIFT); 10234 10235 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10236 tg3_asic_rev(tp) == ASIC_REV_5762) 10237 val |= tr32(MAC_TX_LENGTHS) & 10238 (TX_LENGTHS_JMB_FRM_LEN_MSK | 10239 TX_LENGTHS_CNT_DWN_VAL_MSK); 10240 10241 tw32(MAC_TX_LENGTHS, val); 10242 10243 /* Receive rules. */ 10244 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); 10245 tw32(RCVLPC_CONFIG, 0x0181); 10246 10247 /* Calculate RDMAC_MODE setting early, we need it to determine 10248 * the RCVLPC_STATE_ENABLE mask. 10249 */ 10250 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | 10251 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | 10252 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | 10253 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 10254 RDMAC_MODE_LNGREAD_ENAB); 10255 10256 if (tg3_asic_rev(tp) == ASIC_REV_5717) 10257 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; 10258 10259 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 10260 tg3_asic_rev(tp) == ASIC_REV_5785 || 10261 tg3_asic_rev(tp) == ASIC_REV_57780) 10262 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | 10263 RDMAC_MODE_MBUF_RBD_CRPT_ENAB | 10264 RDMAC_MODE_MBUF_SBD_CRPT_ENAB; 10265 10266 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10267 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10268 if (tg3_flag(tp, TSO_CAPABLE)) { 10269 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; 10270 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10271 !tg3_flag(tp, IS_5788)) { 10272 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10273 } 10274 } 10275 10276 if (tg3_flag(tp, PCI_EXPRESS)) 10277 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10278 10279 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10280 tp->dma_limit = 0; 10281 if (tp->dev->mtu <= ETH_DATA_LEN) { 10282 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; 10283 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K; 10284 } 10285 } 10286 10287 if (tg3_flag(tp, HW_TSO_1) || 10288 tg3_flag(tp, HW_TSO_2) || 10289 tg3_flag(tp, HW_TSO_3)) 10290 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; 10291 10292 if (tg3_flag(tp, 57765_PLUS) || 10293 tg3_asic_rev(tp) == ASIC_REV_5785 || 10294 tg3_asic_rev(tp) == ASIC_REV_57780) 10295 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 10296 10297 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10298 tg3_asic_rev(tp) == ASIC_REV_5762) 10299 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; 10300 10301 if (tg3_asic_rev(tp) == ASIC_REV_5761 || 10302 tg3_asic_rev(tp) == ASIC_REV_5784 || 10303 tg3_asic_rev(tp) == ASIC_REV_5785 || 10304 tg3_asic_rev(tp) == ASIC_REV_57780 || 10305 tg3_flag(tp, 57765_PLUS)) { 10306 u32 tgtreg; 10307 10308 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10309 tgtreg = TG3_RDMA_RSRVCTRL_REG2; 10310 else 10311 tgtreg = TG3_RDMA_RSRVCTRL_REG; 10312 10313 val = tr32(tgtreg); 10314 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10315 tg3_asic_rev(tp) == ASIC_REV_5762) { 10316 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | 10317 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | 10318 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); 10319 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | 10320 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 10321 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; 10322 } 10323 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 10324 } 10325 10326 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10327 tg3_asic_rev(tp) == ASIC_REV_5720 || 10328 tg3_asic_rev(tp) == ASIC_REV_5762) { 10329 u32 tgtreg; 10330 10331 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10332 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2; 10333 else 10334 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL; 10335 10336 val = tr32(tgtreg); 10337 tw32(tgtreg, val | 10338 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | 10339 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); 10340 } 10341 10342 /* Receive/send statistics. */ 10343 if (tg3_flag(tp, 5750_PLUS)) { 10344 val = tr32(RCVLPC_STATS_ENABLE); 10345 val &= ~RCVLPC_STATSENAB_DACK_FIX; 10346 tw32(RCVLPC_STATS_ENABLE, val); 10347 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && 10348 tg3_flag(tp, TSO_CAPABLE)) { 10349 val = tr32(RCVLPC_STATS_ENABLE); 10350 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; 10351 tw32(RCVLPC_STATS_ENABLE, val); 10352 } else { 10353 tw32(RCVLPC_STATS_ENABLE, 0xffffff); 10354 } 10355 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); 10356 tw32(SNDDATAI_STATSENAB, 0xffffff); 10357 tw32(SNDDATAI_STATSCTRL, 10358 (SNDDATAI_SCTRL_ENABLE | 10359 SNDDATAI_SCTRL_FASTUPD)); 10360 10361 /* Setup host coalescing engine. */ 10362 tw32(HOSTCC_MODE, 0); 10363 for (i = 0; i < 2000; i++) { 10364 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) 10365 break; 10366 udelay(10); 10367 } 10368 10369 __tg3_set_coalesce(tp, &tp->coal); 10370 10371 if (!tg3_flag(tp, 5705_PLUS)) { 10372 /* Status/statistics block address. See tg3_timer, 10373 * the tg3_periodic_fetch_stats call there, and 10374 * tg3_get_stats to see how this works for 5705/5750 chips. 10375 */ 10376 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 10377 ((u64) tp->stats_mapping >> 32)); 10378 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 10379 ((u64) tp->stats_mapping & 0xffffffff)); 10380 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); 10381 10382 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); 10383 10384 /* Clear statistics and status block memory areas */ 10385 for (i = NIC_SRAM_STATS_BLK; 10386 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; 10387 i += sizeof(u32)) { 10388 tg3_write_mem(tp, i, 0); 10389 udelay(40); 10390 } 10391 } 10392 10393 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); 10394 10395 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); 10396 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); 10397 if (!tg3_flag(tp, 5705_PLUS)) 10398 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); 10399 10400 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 10401 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 10402 /* reset to prevent losing 1st rx packet intermittently */ 10403 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10404 udelay(10); 10405 } 10406 10407 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 10408 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | 10409 MAC_MODE_FHDE_ENABLE; 10410 if (tg3_flag(tp, ENABLE_APE)) 10411 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 10412 if (!tg3_flag(tp, 5705_PLUS) && 10413 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10414 tg3_asic_rev(tp) != ASIC_REV_5700) 10415 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 10416 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); 10417 udelay(40); 10418 10419 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). 10420 * If TG3_FLAG_IS_NIC is zero, we should read the 10421 * register to preserve the GPIO settings for LOMs. The GPIOs, 10422 * whether used as inputs or outputs, are set by boot code after 10423 * reset. 10424 */ 10425 if (!tg3_flag(tp, IS_NIC)) { 10426 u32 gpio_mask; 10427 10428 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | 10429 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | 10430 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; 10431 10432 if (tg3_asic_rev(tp) == ASIC_REV_5752) 10433 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | 10434 GRC_LCLCTRL_GPIO_OUTPUT3; 10435 10436 if (tg3_asic_rev(tp) == ASIC_REV_5755) 10437 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; 10438 10439 tp->grc_local_ctrl &= ~gpio_mask; 10440 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; 10441 10442 /* GPIO1 must be driven high for eeprom write protect */ 10443 if (tg3_flag(tp, EEPROM_WRITE_PROT)) 10444 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 10445 GRC_LCLCTRL_GPIO_OUTPUT1); 10446 } 10447 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10448 udelay(100); 10449 10450 if (tg3_flag(tp, USING_MSIX)) { 10451 val = tr32(MSGINT_MODE); 10452 val |= MSGINT_MODE_ENABLE; 10453 if (tp->irq_cnt > 1) 10454 val |= MSGINT_MODE_MULTIVEC_EN; 10455 if (!tg3_flag(tp, 1SHOT_MSI)) 10456 val |= MSGINT_MODE_ONE_SHOT_DISABLE; 10457 tw32(MSGINT_MODE, val); 10458 } 10459 10460 if (!tg3_flag(tp, 5705_PLUS)) { 10461 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 10462 udelay(40); 10463 } 10464 10465 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | 10466 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | 10467 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | 10468 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | 10469 WDMAC_MODE_LNGREAD_ENAB); 10470 10471 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10472 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10473 if (tg3_flag(tp, TSO_CAPABLE) && 10474 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 || 10475 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) { 10476 /* nothing */ 10477 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10478 !tg3_flag(tp, IS_5788)) { 10479 val |= WDMAC_MODE_RX_ACCEL; 10480 } 10481 } 10482 10483 /* Enable host coalescing bug fix */ 10484 if (tg3_flag(tp, 5755_PLUS)) 10485 val |= WDMAC_MODE_STATUS_TAG_FIX; 10486 10487 if (tg3_asic_rev(tp) == ASIC_REV_5785) 10488 val |= WDMAC_MODE_BURST_ALL_DATA; 10489 10490 tw32_f(WDMAC_MODE, val); 10491 udelay(40); 10492 10493 if (tg3_flag(tp, PCIX_MODE)) { 10494 u16 pcix_cmd; 10495 10496 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10497 &pcix_cmd); 10498 if (tg3_asic_rev(tp) == ASIC_REV_5703) { 10499 pcix_cmd &= ~PCI_X_CMD_MAX_READ; 10500 pcix_cmd |= PCI_X_CMD_READ_2K; 10501 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) { 10502 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); 10503 pcix_cmd |= PCI_X_CMD_READ_2K; 10504 } 10505 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10506 pcix_cmd); 10507 } 10508 10509 tw32_f(RDMAC_MODE, rdmac_mode); 10510 udelay(40); 10511 10512 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10513 tg3_asic_rev(tp) == ASIC_REV_5720) { 10514 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) { 10515 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp)) 10516 break; 10517 } 10518 if (i < TG3_NUM_RDMA_CHANNELS) { 10519 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10520 val |= tg3_lso_rd_dma_workaround_bit(tp); 10521 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10522 tg3_flag_set(tp, 5719_5720_RDMA_BUG); 10523 } 10524 } 10525 10526 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); 10527 if (!tg3_flag(tp, 5705_PLUS)) 10528 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); 10529 10530 if (tg3_asic_rev(tp) == ASIC_REV_5761) 10531 tw32(SNDDATAC_MODE, 10532 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); 10533 else 10534 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); 10535 10536 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); 10537 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); 10538 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; 10539 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 10540 val |= RCVDBDI_MODE_LRG_RING_SZ; 10541 tw32(RCVDBDI_MODE, val); 10542 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 10543 if (tg3_flag(tp, HW_TSO_1) || 10544 tg3_flag(tp, HW_TSO_2) || 10545 tg3_flag(tp, HW_TSO_3)) 10546 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); 10547 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; 10548 if (tg3_flag(tp, ENABLE_TSS)) 10549 val |= SNDBDI_MODE_MULTI_TXQ_EN; 10550 tw32(SNDBDI_MODE, val); 10551 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); 10552 10553 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 10554 err = tg3_load_5701_a0_firmware_fix(tp); 10555 if (err) 10556 return err; 10557 } 10558 10559 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10560 /* Ignore any errors for the firmware download. If download 10561 * fails, the device will operate with EEE disabled 10562 */ 10563 tg3_load_57766_firmware(tp); 10564 } 10565 10566 if (tg3_flag(tp, TSO_CAPABLE)) { 10567 err = tg3_load_tso_firmware(tp); 10568 if (err) 10569 return err; 10570 } 10571 10572 tp->tx_mode = TX_MODE_ENABLE; 10573 10574 if (tg3_flag(tp, 5755_PLUS) || 10575 tg3_asic_rev(tp) == ASIC_REV_5906) 10576 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; 10577 10578 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10579 tg3_asic_rev(tp) == ASIC_REV_5762) { 10580 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; 10581 tp->tx_mode &= ~val; 10582 tp->tx_mode |= tr32(MAC_TX_MODE) & val; 10583 } 10584 10585 tw32_f(MAC_TX_MODE, tp->tx_mode); 10586 udelay(100); 10587 10588 if (tg3_flag(tp, ENABLE_RSS)) { 10589 u32 rss_key[10]; 10590 10591 tg3_rss_write_indir_tbl(tp); 10592 10593 netdev_rss_key_fill(rss_key, 10 * sizeof(u32)); 10594 10595 for (i = 0; i < 10 ; i++) 10596 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]); 10597 } 10598 10599 tp->rx_mode = RX_MODE_ENABLE; 10600 if (tg3_flag(tp, 5755_PLUS)) 10601 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 10602 10603 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10604 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX; 10605 10606 if (tg3_flag(tp, ENABLE_RSS)) 10607 tp->rx_mode |= RX_MODE_RSS_ENABLE | 10608 RX_MODE_RSS_ITBL_HASH_BITS_7 | 10609 RX_MODE_RSS_IPV6_HASH_EN | 10610 RX_MODE_RSS_TCP_IPV6_HASH_EN | 10611 RX_MODE_RSS_IPV4_HASH_EN | 10612 RX_MODE_RSS_TCP_IPV4_HASH_EN; 10613 10614 tw32_f(MAC_RX_MODE, tp->rx_mode); 10615 udelay(10); 10616 10617 tw32(MAC_LED_CTRL, tp->led_ctrl); 10618 10619 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 10620 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10621 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10622 udelay(10); 10623 } 10624 tw32_f(MAC_RX_MODE, tp->rx_mode); 10625 udelay(10); 10626 10627 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10628 if ((tg3_asic_rev(tp) == ASIC_REV_5704) && 10629 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { 10630 /* Set drive transmission level to 1.2V */ 10631 /* only if the signal pre-emphasis bit is not set */ 10632 val = tr32(MAC_SERDES_CFG); 10633 val &= 0xfffff000; 10634 val |= 0x880; 10635 tw32(MAC_SERDES_CFG, val); 10636 } 10637 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) 10638 tw32(MAC_SERDES_CFG, 0x616000); 10639 } 10640 10641 /* Prevent chip from dropping frames when flow control 10642 * is enabled. 10643 */ 10644 if (tg3_flag(tp, 57765_CLASS)) 10645 val = 1; 10646 else 10647 val = 2; 10648 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); 10649 10650 if (tg3_asic_rev(tp) == ASIC_REV_5704 && 10651 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 10652 /* Use hardware link auto-negotiation */ 10653 tg3_flag_set(tp, HW_AUTONEG); 10654 } 10655 10656 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 10657 tg3_asic_rev(tp) == ASIC_REV_5714) { 10658 u32 tmp; 10659 10660 tmp = tr32(SERDES_RX_CTRL); 10661 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); 10662 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; 10663 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; 10664 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10665 } 10666 10667 if (!tg3_flag(tp, USE_PHYLIB)) { 10668 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 10669 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 10670 10671 err = tg3_setup_phy(tp, false); 10672 if (err) 10673 return err; 10674 10675 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10676 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 10677 u32 tmp; 10678 10679 /* Clear CRC stats. */ 10680 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { 10681 tg3_writephy(tp, MII_TG3_TEST1, 10682 tmp | MII_TG3_TEST1_CRC_EN); 10683 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); 10684 } 10685 } 10686 } 10687 10688 __tg3_set_rx_mode(tp->dev); 10689 10690 /* Initialize receive rules. */ 10691 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); 10692 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); 10693 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); 10694 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); 10695 10696 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) 10697 limit = 8; 10698 else 10699 limit = 16; 10700 if (tg3_flag(tp, ENABLE_ASF)) 10701 limit -= 4; 10702 switch (limit) { 10703 case 16: 10704 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); 10705 fallthrough; 10706 case 15: 10707 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); 10708 fallthrough; 10709 case 14: 10710 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); 10711 fallthrough; 10712 case 13: 10713 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); 10714 fallthrough; 10715 case 12: 10716 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); 10717 fallthrough; 10718 case 11: 10719 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); 10720 fallthrough; 10721 case 10: 10722 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); 10723 fallthrough; 10724 case 9: 10725 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); 10726 fallthrough; 10727 case 8: 10728 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); 10729 fallthrough; 10730 case 7: 10731 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); 10732 fallthrough; 10733 case 6: 10734 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); 10735 fallthrough; 10736 case 5: 10737 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); 10738 fallthrough; 10739 case 4: 10740 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ 10741 case 3: 10742 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ 10743 case 2: 10744 case 1: 10745 10746 default: 10747 break; 10748 } 10749 10750 if (tg3_flag(tp, ENABLE_APE)) 10751 /* Write our heartbeat update interval to APE. */ 10752 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, 10753 APE_HOST_HEARTBEAT_INT_5SEC); 10754 10755 tg3_write_sig_post_reset(tp, RESET_KIND_INIT); 10756 10757 return 0; 10758 } 10759 10760 /* Called at device open time to get the chip ready for 10761 * packet processing. Invoked with tp->lock held. 10762 */ 10763 static int tg3_init_hw(struct tg3 *tp, bool reset_phy) 10764 { 10765 /* Chip may have been just powered on. If so, the boot code may still 10766 * be running initialization. Wait for it to finish to avoid races in 10767 * accessing the hardware. 10768 */ 10769 tg3_enable_register_access(tp); 10770 tg3_poll_fw(tp); 10771 10772 tg3_switch_clocks(tp); 10773 10774 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 10775 10776 return tg3_reset_hw(tp, reset_phy); 10777 } 10778 10779 #ifdef CONFIG_TIGON3_HWMON 10780 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) 10781 { 10782 u32 off, len = TG3_OCIR_LEN; 10783 int i; 10784 10785 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) { 10786 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len); 10787 10788 if (ocir->signature != TG3_OCIR_SIG_MAGIC || 10789 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE)) 10790 memset(ocir, 0, len); 10791 } 10792 } 10793 10794 /* sysfs attributes for hwmon */ 10795 static ssize_t tg3_show_temp(struct device *dev, 10796 struct device_attribute *devattr, char *buf) 10797 { 10798 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 10799 struct tg3 *tp = dev_get_drvdata(dev); 10800 u32 temperature; 10801 10802 spin_lock_bh(&tp->lock); 10803 tg3_ape_scratchpad_read(tp, &temperature, attr->index, 10804 sizeof(temperature)); 10805 spin_unlock_bh(&tp->lock); 10806 return sprintf(buf, "%u\n", temperature * 1000); 10807 } 10808 10809 10810 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL, 10811 TG3_TEMP_SENSOR_OFFSET); 10812 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL, 10813 TG3_TEMP_CAUTION_OFFSET); 10814 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL, 10815 TG3_TEMP_MAX_OFFSET); 10816 10817 static struct attribute *tg3_attrs[] = { 10818 &sensor_dev_attr_temp1_input.dev_attr.attr, 10819 &sensor_dev_attr_temp1_crit.dev_attr.attr, 10820 &sensor_dev_attr_temp1_max.dev_attr.attr, 10821 NULL 10822 }; 10823 ATTRIBUTE_GROUPS(tg3); 10824 10825 static void tg3_hwmon_close(struct tg3 *tp) 10826 { 10827 if (tp->hwmon_dev) { 10828 hwmon_device_unregister(tp->hwmon_dev); 10829 tp->hwmon_dev = NULL; 10830 } 10831 } 10832 10833 static void tg3_hwmon_open(struct tg3 *tp) 10834 { 10835 int i; 10836 u32 size = 0; 10837 struct pci_dev *pdev = tp->pdev; 10838 struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; 10839 10840 tg3_sd_scan_scratchpad(tp, ocirs); 10841 10842 for (i = 0; i < TG3_SD_NUM_RECS; i++) { 10843 if (!ocirs[i].src_data_length) 10844 continue; 10845 10846 size += ocirs[i].src_hdr_length; 10847 size += ocirs[i].src_data_length; 10848 } 10849 10850 if (!size) 10851 return; 10852 10853 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", 10854 tp, tg3_groups); 10855 if (IS_ERR(tp->hwmon_dev)) { 10856 tp->hwmon_dev = NULL; 10857 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); 10858 } 10859 } 10860 #else 10861 static inline void tg3_hwmon_close(struct tg3 *tp) { } 10862 static inline void tg3_hwmon_open(struct tg3 *tp) { } 10863 #endif /* CONFIG_TIGON3_HWMON */ 10864 10865 10866 #define TG3_STAT_ADD32(PSTAT, REG) \ 10867 do { u32 __val = tr32(REG); \ 10868 (PSTAT)->low += __val; \ 10869 if ((PSTAT)->low < __val) \ 10870 (PSTAT)->high += 1; \ 10871 } while (0) 10872 10873 static void tg3_periodic_fetch_stats(struct tg3 *tp) 10874 { 10875 struct tg3_hw_stats *sp = tp->hw_stats; 10876 10877 if (!tp->link_up) 10878 return; 10879 10880 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); 10881 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); 10882 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); 10883 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); 10884 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); 10885 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); 10886 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); 10887 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); 10888 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); 10889 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); 10890 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); 10891 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); 10892 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); 10893 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) && 10894 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low + 10895 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) { 10896 u32 val; 10897 10898 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10899 val &= ~tg3_lso_rd_dma_workaround_bit(tp); 10900 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10901 tg3_flag_clear(tp, 5719_5720_RDMA_BUG); 10902 } 10903 10904 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); 10905 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); 10906 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); 10907 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); 10908 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); 10909 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); 10910 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); 10911 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); 10912 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); 10913 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); 10914 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); 10915 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); 10916 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); 10917 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); 10918 10919 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); 10920 if (tg3_asic_rev(tp) != ASIC_REV_5717 && 10921 tg3_asic_rev(tp) != ASIC_REV_5762 && 10922 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 && 10923 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) { 10924 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); 10925 } else { 10926 u32 val = tr32(HOSTCC_FLOW_ATTN); 10927 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; 10928 if (val) { 10929 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); 10930 sp->rx_discards.low += val; 10931 if (sp->rx_discards.low < val) 10932 sp->rx_discards.high += 1; 10933 } 10934 sp->mbuf_lwm_thresh_hit = sp->rx_discards; 10935 } 10936 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); 10937 } 10938 10939 static void tg3_chk_missed_msi(struct tg3 *tp) 10940 { 10941 u32 i; 10942 10943 for (i = 0; i < tp->irq_cnt; i++) { 10944 struct tg3_napi *tnapi = &tp->napi[i]; 10945 10946 if (tg3_has_work(tnapi)) { 10947 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && 10948 tnapi->last_tx_cons == tnapi->tx_cons) { 10949 if (tnapi->chk_msi_cnt < 1) { 10950 tnapi->chk_msi_cnt++; 10951 return; 10952 } 10953 tg3_msi(0, tnapi); 10954 } 10955 } 10956 tnapi->chk_msi_cnt = 0; 10957 tnapi->last_rx_cons = tnapi->rx_rcb_ptr; 10958 tnapi->last_tx_cons = tnapi->tx_cons; 10959 } 10960 } 10961 10962 static void tg3_timer(struct timer_list *t) 10963 { 10964 struct tg3 *tp = from_timer(tp, t, timer); 10965 10966 spin_lock(&tp->lock); 10967 10968 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) { 10969 spin_unlock(&tp->lock); 10970 goto restart_timer; 10971 } 10972 10973 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10974 tg3_flag(tp, 57765_CLASS)) 10975 tg3_chk_missed_msi(tp); 10976 10977 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 10978 /* BCM4785: Flush posted writes from GbE to host memory. */ 10979 tr32(HOSTCC_MODE); 10980 } 10981 10982 if (!tg3_flag(tp, TAGGED_STATUS)) { 10983 /* All of this garbage is because when using non-tagged 10984 * IRQ status the mailbox/status_block protocol the chip 10985 * uses with the cpu is race prone. 10986 */ 10987 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { 10988 tw32(GRC_LOCAL_CTRL, 10989 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 10990 } else { 10991 tw32(HOSTCC_MODE, tp->coalesce_mode | 10992 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); 10993 } 10994 10995 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 10996 spin_unlock(&tp->lock); 10997 tg3_reset_task_schedule(tp); 10998 goto restart_timer; 10999 } 11000 } 11001 11002 /* This part only runs once per second. */ 11003 if (!--tp->timer_counter) { 11004 if (tg3_flag(tp, 5705_PLUS)) 11005 tg3_periodic_fetch_stats(tp); 11006 11007 if (tp->setlpicnt && !--tp->setlpicnt) 11008 tg3_phy_eee_enable(tp); 11009 11010 if (tg3_flag(tp, USE_LINKCHG_REG)) { 11011 u32 mac_stat; 11012 int phy_event; 11013 11014 mac_stat = tr32(MAC_STATUS); 11015 11016 phy_event = 0; 11017 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { 11018 if (mac_stat & MAC_STATUS_MI_INTERRUPT) 11019 phy_event = 1; 11020 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) 11021 phy_event = 1; 11022 11023 if (phy_event) 11024 tg3_setup_phy(tp, false); 11025 } else if (tg3_flag(tp, POLL_SERDES)) { 11026 u32 mac_stat = tr32(MAC_STATUS); 11027 int need_setup = 0; 11028 11029 if (tp->link_up && 11030 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { 11031 need_setup = 1; 11032 } 11033 if (!tp->link_up && 11034 (mac_stat & (MAC_STATUS_PCS_SYNCED | 11035 MAC_STATUS_SIGNAL_DET))) { 11036 need_setup = 1; 11037 } 11038 if (need_setup) { 11039 if (!tp->serdes_counter) { 11040 tw32_f(MAC_MODE, 11041 (tp->mac_mode & 11042 ~MAC_MODE_PORT_MODE_MASK)); 11043 udelay(40); 11044 tw32_f(MAC_MODE, tp->mac_mode); 11045 udelay(40); 11046 } 11047 tg3_setup_phy(tp, false); 11048 } 11049 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 11050 tg3_flag(tp, 5780_CLASS)) { 11051 tg3_serdes_parallel_detect(tp); 11052 } else if (tg3_flag(tp, POLL_CPMU_LINK)) { 11053 u32 cpmu = tr32(TG3_CPMU_STATUS); 11054 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) == 11055 TG3_CPMU_STATUS_LINK_MASK); 11056 11057 if (link_up != tp->link_up) 11058 tg3_setup_phy(tp, false); 11059 } 11060 11061 tp->timer_counter = tp->timer_multiplier; 11062 } 11063 11064 /* Heartbeat is only sent once every 2 seconds. 11065 * 11066 * The heartbeat is to tell the ASF firmware that the host 11067 * driver is still alive. In the event that the OS crashes, 11068 * ASF needs to reset the hardware to free up the FIFO space 11069 * that may be filled with rx packets destined for the host. 11070 * If the FIFO is full, ASF will no longer function properly. 11071 * 11072 * Unintended resets have been reported on real time kernels 11073 * where the timer doesn't run on time. Netpoll will also have 11074 * same problem. 11075 * 11076 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware 11077 * to check the ring condition when the heartbeat is expiring 11078 * before doing the reset. This will prevent most unintended 11079 * resets. 11080 */ 11081 if (!--tp->asf_counter) { 11082 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 11083 tg3_wait_for_event_ack(tp); 11084 11085 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, 11086 FWCMD_NICDRV_ALIVE3); 11087 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 11088 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 11089 TG3_FW_UPDATE_TIMEOUT_SEC); 11090 11091 tg3_generate_fw_event(tp); 11092 } 11093 tp->asf_counter = tp->asf_multiplier; 11094 } 11095 11096 /* Update the APE heartbeat every 5 seconds.*/ 11097 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL); 11098 11099 spin_unlock(&tp->lock); 11100 11101 restart_timer: 11102 tp->timer.expires = jiffies + tp->timer_offset; 11103 add_timer(&tp->timer); 11104 } 11105 11106 static void tg3_timer_init(struct tg3 *tp) 11107 { 11108 if (tg3_flag(tp, TAGGED_STATUS) && 11109 tg3_asic_rev(tp) != ASIC_REV_5717 && 11110 !tg3_flag(tp, 57765_CLASS)) 11111 tp->timer_offset = HZ; 11112 else 11113 tp->timer_offset = HZ / 10; 11114 11115 BUG_ON(tp->timer_offset > HZ); 11116 11117 tp->timer_multiplier = (HZ / tp->timer_offset); 11118 tp->asf_multiplier = (HZ / tp->timer_offset) * 11119 TG3_FW_UPDATE_FREQ_SEC; 11120 11121 timer_setup(&tp->timer, tg3_timer, 0); 11122 } 11123 11124 static void tg3_timer_start(struct tg3 *tp) 11125 { 11126 tp->asf_counter = tp->asf_multiplier; 11127 tp->timer_counter = tp->timer_multiplier; 11128 11129 tp->timer.expires = jiffies + tp->timer_offset; 11130 add_timer(&tp->timer); 11131 } 11132 11133 static void tg3_timer_stop(struct tg3 *tp) 11134 { 11135 del_timer_sync(&tp->timer); 11136 } 11137 11138 /* Restart hardware after configuration changes, self-test, etc. 11139 * Invoked with tp->lock held. 11140 */ 11141 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy) 11142 __releases(tp->lock) 11143 __acquires(tp->lock) 11144 { 11145 int err; 11146 11147 err = tg3_init_hw(tp, reset_phy); 11148 if (err) { 11149 netdev_err(tp->dev, 11150 "Failed to re-initialize device, aborting\n"); 11151 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11152 tg3_full_unlock(tp); 11153 tg3_timer_stop(tp); 11154 tp->irq_sync = 0; 11155 tg3_napi_enable(tp); 11156 dev_close(tp->dev); 11157 tg3_full_lock(tp, 0); 11158 } 11159 return err; 11160 } 11161 11162 static void tg3_reset_task(struct work_struct *work) 11163 { 11164 struct tg3 *tp = container_of(work, struct tg3, reset_task); 11165 int err; 11166 11167 rtnl_lock(); 11168 tg3_full_lock(tp, 0); 11169 11170 if (tp->pcierr_recovery || !netif_running(tp->dev)) { 11171 tg3_flag_clear(tp, RESET_TASK_PENDING); 11172 tg3_full_unlock(tp); 11173 rtnl_unlock(); 11174 return; 11175 } 11176 11177 tg3_full_unlock(tp); 11178 11179 tg3_phy_stop(tp); 11180 11181 tg3_netif_stop(tp); 11182 11183 tg3_full_lock(tp, 1); 11184 11185 if (tg3_flag(tp, TX_RECOVERY_PENDING)) { 11186 tp->write32_tx_mbox = tg3_write32_tx_mbox; 11187 tp->write32_rx_mbox = tg3_write_flush_reg32; 11188 tg3_flag_set(tp, MBOX_WRITE_REORDER); 11189 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 11190 } 11191 11192 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 11193 err = tg3_init_hw(tp, true); 11194 if (err) { 11195 tg3_full_unlock(tp); 11196 tp->irq_sync = 0; 11197 tg3_napi_enable(tp); 11198 /* Clear this flag so that tg3_reset_task_cancel() will not 11199 * call cancel_work_sync() and wait forever. 11200 */ 11201 tg3_flag_clear(tp, RESET_TASK_PENDING); 11202 dev_close(tp->dev); 11203 goto out; 11204 } 11205 11206 tg3_netif_start(tp); 11207 tg3_full_unlock(tp); 11208 tg3_phy_start(tp); 11209 tg3_flag_clear(tp, RESET_TASK_PENDING); 11210 out: 11211 rtnl_unlock(); 11212 } 11213 11214 static int tg3_request_irq(struct tg3 *tp, int irq_num) 11215 { 11216 irq_handler_t fn; 11217 unsigned long flags; 11218 char *name; 11219 struct tg3_napi *tnapi = &tp->napi[irq_num]; 11220 11221 if (tp->irq_cnt == 1) 11222 name = tp->dev->name; 11223 else { 11224 name = &tnapi->irq_lbl[0]; 11225 if (tnapi->tx_buffers && tnapi->rx_rcb) 11226 snprintf(name, IFNAMSIZ, 11227 "%s-txrx-%d", tp->dev->name, irq_num); 11228 else if (tnapi->tx_buffers) 11229 snprintf(name, IFNAMSIZ, 11230 "%s-tx-%d", tp->dev->name, irq_num); 11231 else if (tnapi->rx_rcb) 11232 snprintf(name, IFNAMSIZ, 11233 "%s-rx-%d", tp->dev->name, irq_num); 11234 else 11235 snprintf(name, IFNAMSIZ, 11236 "%s-%d", tp->dev->name, irq_num); 11237 name[IFNAMSIZ-1] = 0; 11238 } 11239 11240 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11241 fn = tg3_msi; 11242 if (tg3_flag(tp, 1SHOT_MSI)) 11243 fn = tg3_msi_1shot; 11244 flags = 0; 11245 } else { 11246 fn = tg3_interrupt; 11247 if (tg3_flag(tp, TAGGED_STATUS)) 11248 fn = tg3_interrupt_tagged; 11249 flags = IRQF_SHARED; 11250 } 11251 11252 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); 11253 } 11254 11255 static int tg3_test_interrupt(struct tg3 *tp) 11256 { 11257 struct tg3_napi *tnapi = &tp->napi[0]; 11258 struct net_device *dev = tp->dev; 11259 int err, i, intr_ok = 0; 11260 u32 val; 11261 11262 if (!netif_running(dev)) 11263 return -ENODEV; 11264 11265 tg3_disable_ints(tp); 11266 11267 free_irq(tnapi->irq_vec, tnapi); 11268 11269 /* 11270 * Turn off MSI one shot mode. Otherwise this test has no 11271 * observable way to know whether the interrupt was delivered. 11272 */ 11273 if (tg3_flag(tp, 57765_PLUS)) { 11274 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; 11275 tw32(MSGINT_MODE, val); 11276 } 11277 11278 err = request_irq(tnapi->irq_vec, tg3_test_isr, 11279 IRQF_SHARED, dev->name, tnapi); 11280 if (err) 11281 return err; 11282 11283 tnapi->hw_status->status &= ~SD_STATUS_UPDATED; 11284 tg3_enable_ints(tp); 11285 11286 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 11287 tnapi->coal_now); 11288 11289 for (i = 0; i < 5; i++) { 11290 u32 int_mbox, misc_host_ctrl; 11291 11292 int_mbox = tr32_mailbox(tnapi->int_mbox); 11293 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 11294 11295 if ((int_mbox != 0) || 11296 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { 11297 intr_ok = 1; 11298 break; 11299 } 11300 11301 if (tg3_flag(tp, 57765_PLUS) && 11302 tnapi->hw_status->status_tag != tnapi->last_tag) 11303 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 11304 11305 msleep(10); 11306 } 11307 11308 tg3_disable_ints(tp); 11309 11310 free_irq(tnapi->irq_vec, tnapi); 11311 11312 err = tg3_request_irq(tp, 0); 11313 11314 if (err) 11315 return err; 11316 11317 if (intr_ok) { 11318 /* Reenable MSI one shot mode. */ 11319 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) { 11320 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; 11321 tw32(MSGINT_MODE, val); 11322 } 11323 return 0; 11324 } 11325 11326 return -EIO; 11327 } 11328 11329 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is 11330 * successfully restored 11331 */ 11332 static int tg3_test_msi(struct tg3 *tp) 11333 { 11334 int err; 11335 u16 pci_cmd; 11336 11337 if (!tg3_flag(tp, USING_MSI)) 11338 return 0; 11339 11340 /* Turn off SERR reporting in case MSI terminates with Master 11341 * Abort. 11342 */ 11343 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 11344 pci_write_config_word(tp->pdev, PCI_COMMAND, 11345 pci_cmd & ~PCI_COMMAND_SERR); 11346 11347 err = tg3_test_interrupt(tp); 11348 11349 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 11350 11351 if (!err) 11352 return 0; 11353 11354 /* other failures */ 11355 if (err != -EIO) 11356 return err; 11357 11358 /* MSI test failed, go back to INTx mode */ 11359 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " 11360 "to INTx mode. Please report this failure to the PCI " 11361 "maintainer and include system chipset information\n"); 11362 11363 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11364 11365 pci_disable_msi(tp->pdev); 11366 11367 tg3_flag_clear(tp, USING_MSI); 11368 tp->napi[0].irq_vec = tp->pdev->irq; 11369 11370 err = tg3_request_irq(tp, 0); 11371 if (err) 11372 return err; 11373 11374 /* Need to reset the chip because the MSI cycle may have terminated 11375 * with Master Abort. 11376 */ 11377 tg3_full_lock(tp, 1); 11378 11379 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11380 err = tg3_init_hw(tp, true); 11381 11382 tg3_full_unlock(tp); 11383 11384 if (err) 11385 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11386 11387 return err; 11388 } 11389 11390 static int tg3_request_firmware(struct tg3 *tp) 11391 { 11392 const struct tg3_firmware_hdr *fw_hdr; 11393 11394 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { 11395 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", 11396 tp->fw_needed); 11397 return -ENOENT; 11398 } 11399 11400 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 11401 11402 /* Firmware blob starts with version numbers, followed by 11403 * start address and _full_ length including BSS sections 11404 * (which must be longer than the actual data, of course 11405 */ 11406 11407 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */ 11408 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) { 11409 netdev_err(tp->dev, "bogus length %d in \"%s\"\n", 11410 tp->fw_len, tp->fw_needed); 11411 release_firmware(tp->fw); 11412 tp->fw = NULL; 11413 return -EINVAL; 11414 } 11415 11416 /* We no longer need firmware; we have it. */ 11417 tp->fw_needed = NULL; 11418 return 0; 11419 } 11420 11421 static u32 tg3_irq_count(struct tg3 *tp) 11422 { 11423 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt); 11424 11425 if (irq_cnt > 1) { 11426 /* We want as many rx rings enabled as there are cpus. 11427 * In multiqueue MSI-X mode, the first MSI-X vector 11428 * only deals with link interrupts, etc, so we add 11429 * one to the number of vectors we are requesting. 11430 */ 11431 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max); 11432 } 11433 11434 return irq_cnt; 11435 } 11436 11437 static bool tg3_enable_msix(struct tg3 *tp) 11438 { 11439 int i, rc; 11440 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS]; 11441 11442 tp->txq_cnt = tp->txq_req; 11443 tp->rxq_cnt = tp->rxq_req; 11444 if (!tp->rxq_cnt) 11445 tp->rxq_cnt = netif_get_num_default_rss_queues(); 11446 if (tp->rxq_cnt > tp->rxq_max) 11447 tp->rxq_cnt = tp->rxq_max; 11448 11449 /* Disable multiple TX rings by default. Simple round-robin hardware 11450 * scheduling of the TX rings can cause starvation of rings with 11451 * small packets when other rings have TSO or jumbo packets. 11452 */ 11453 if (!tp->txq_req) 11454 tp->txq_cnt = 1; 11455 11456 tp->irq_cnt = tg3_irq_count(tp); 11457 11458 for (i = 0; i < tp->irq_max; i++) { 11459 msix_ent[i].entry = i; 11460 msix_ent[i].vector = 0; 11461 } 11462 11463 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt); 11464 if (rc < 0) { 11465 return false; 11466 } else if (rc < tp->irq_cnt) { 11467 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", 11468 tp->irq_cnt, rc); 11469 tp->irq_cnt = rc; 11470 tp->rxq_cnt = max(rc - 1, 1); 11471 if (tp->txq_cnt) 11472 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max); 11473 } 11474 11475 for (i = 0; i < tp->irq_max; i++) 11476 tp->napi[i].irq_vec = msix_ent[i].vector; 11477 11478 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) { 11479 pci_disable_msix(tp->pdev); 11480 return false; 11481 } 11482 11483 if (tp->irq_cnt == 1) 11484 return true; 11485 11486 tg3_flag_set(tp, ENABLE_RSS); 11487 11488 if (tp->txq_cnt > 1) 11489 tg3_flag_set(tp, ENABLE_TSS); 11490 11491 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt); 11492 11493 return true; 11494 } 11495 11496 static void tg3_ints_init(struct tg3 *tp) 11497 { 11498 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && 11499 !tg3_flag(tp, TAGGED_STATUS)) { 11500 /* All MSI supporting chips should support tagged 11501 * status. Assert that this is the case. 11502 */ 11503 netdev_warn(tp->dev, 11504 "MSI without TAGGED_STATUS? Not using MSI\n"); 11505 goto defcfg; 11506 } 11507 11508 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) 11509 tg3_flag_set(tp, USING_MSIX); 11510 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) 11511 tg3_flag_set(tp, USING_MSI); 11512 11513 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11514 u32 msi_mode = tr32(MSGINT_MODE); 11515 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) 11516 msi_mode |= MSGINT_MODE_MULTIVEC_EN; 11517 if (!tg3_flag(tp, 1SHOT_MSI)) 11518 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE; 11519 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); 11520 } 11521 defcfg: 11522 if (!tg3_flag(tp, USING_MSIX)) { 11523 tp->irq_cnt = 1; 11524 tp->napi[0].irq_vec = tp->pdev->irq; 11525 } 11526 11527 if (tp->irq_cnt == 1) { 11528 tp->txq_cnt = 1; 11529 tp->rxq_cnt = 1; 11530 netif_set_real_num_tx_queues(tp->dev, 1); 11531 netif_set_real_num_rx_queues(tp->dev, 1); 11532 } 11533 } 11534 11535 static void tg3_ints_fini(struct tg3 *tp) 11536 { 11537 if (tg3_flag(tp, USING_MSIX)) 11538 pci_disable_msix(tp->pdev); 11539 else if (tg3_flag(tp, USING_MSI)) 11540 pci_disable_msi(tp->pdev); 11541 tg3_flag_clear(tp, USING_MSI); 11542 tg3_flag_clear(tp, USING_MSIX); 11543 tg3_flag_clear(tp, ENABLE_RSS); 11544 tg3_flag_clear(tp, ENABLE_TSS); 11545 } 11546 11547 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, 11548 bool init) 11549 { 11550 struct net_device *dev = tp->dev; 11551 int i, err; 11552 11553 /* 11554 * Setup interrupts first so we know how 11555 * many NAPI resources to allocate 11556 */ 11557 tg3_ints_init(tp); 11558 11559 tg3_rss_check_indir_tbl(tp); 11560 11561 /* The placement of this call is tied 11562 * to the setup and use of Host TX descriptors. 11563 */ 11564 err = tg3_alloc_consistent(tp); 11565 if (err) 11566 goto out_ints_fini; 11567 11568 tg3_napi_init(tp); 11569 11570 tg3_napi_enable(tp); 11571 11572 for (i = 0; i < tp->irq_cnt; i++) { 11573 err = tg3_request_irq(tp, i); 11574 if (err) { 11575 for (i--; i >= 0; i--) { 11576 struct tg3_napi *tnapi = &tp->napi[i]; 11577 11578 free_irq(tnapi->irq_vec, tnapi); 11579 } 11580 goto out_napi_fini; 11581 } 11582 } 11583 11584 tg3_full_lock(tp, 0); 11585 11586 if (init) 11587 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 11588 11589 err = tg3_init_hw(tp, reset_phy); 11590 if (err) { 11591 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11592 tg3_free_rings(tp); 11593 } 11594 11595 tg3_full_unlock(tp); 11596 11597 if (err) 11598 goto out_free_irq; 11599 11600 if (test_irq && tg3_flag(tp, USING_MSI)) { 11601 err = tg3_test_msi(tp); 11602 11603 if (err) { 11604 tg3_full_lock(tp, 0); 11605 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11606 tg3_free_rings(tp); 11607 tg3_full_unlock(tp); 11608 11609 goto out_napi_fini; 11610 } 11611 11612 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { 11613 u32 val = tr32(PCIE_TRANSACTION_CFG); 11614 11615 tw32(PCIE_TRANSACTION_CFG, 11616 val | PCIE_TRANS_CFG_1SHOT_MSI); 11617 } 11618 } 11619 11620 tg3_phy_start(tp); 11621 11622 tg3_hwmon_open(tp); 11623 11624 tg3_full_lock(tp, 0); 11625 11626 tg3_timer_start(tp); 11627 tg3_flag_set(tp, INIT_COMPLETE); 11628 tg3_enable_ints(tp); 11629 11630 tg3_ptp_resume(tp); 11631 11632 tg3_full_unlock(tp); 11633 11634 netif_tx_start_all_queues(dev); 11635 11636 /* 11637 * Reset loopback feature if it was turned on while the device was down 11638 * make sure that it's installed properly now. 11639 */ 11640 if (dev->features & NETIF_F_LOOPBACK) 11641 tg3_set_loopback(dev, dev->features); 11642 11643 return 0; 11644 11645 out_free_irq: 11646 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11647 struct tg3_napi *tnapi = &tp->napi[i]; 11648 free_irq(tnapi->irq_vec, tnapi); 11649 } 11650 11651 out_napi_fini: 11652 tg3_napi_disable(tp); 11653 tg3_napi_fini(tp); 11654 tg3_free_consistent(tp); 11655 11656 out_ints_fini: 11657 tg3_ints_fini(tp); 11658 11659 return err; 11660 } 11661 11662 static void tg3_stop(struct tg3 *tp) 11663 { 11664 int i; 11665 11666 tg3_reset_task_cancel(tp); 11667 tg3_netif_stop(tp); 11668 11669 tg3_timer_stop(tp); 11670 11671 tg3_hwmon_close(tp); 11672 11673 tg3_phy_stop(tp); 11674 11675 tg3_full_lock(tp, 1); 11676 11677 tg3_disable_ints(tp); 11678 11679 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11680 tg3_free_rings(tp); 11681 tg3_flag_clear(tp, INIT_COMPLETE); 11682 11683 tg3_full_unlock(tp); 11684 11685 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11686 struct tg3_napi *tnapi = &tp->napi[i]; 11687 free_irq(tnapi->irq_vec, tnapi); 11688 } 11689 11690 tg3_ints_fini(tp); 11691 11692 tg3_napi_fini(tp); 11693 11694 tg3_free_consistent(tp); 11695 } 11696 11697 static int tg3_open(struct net_device *dev) 11698 { 11699 struct tg3 *tp = netdev_priv(dev); 11700 int err; 11701 11702 if (tp->pcierr_recovery) { 11703 netdev_err(dev, "Failed to open device. PCI error recovery " 11704 "in progress\n"); 11705 return -EAGAIN; 11706 } 11707 11708 if (tp->fw_needed) { 11709 err = tg3_request_firmware(tp); 11710 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 11711 if (err) { 11712 netdev_warn(tp->dev, "EEE capability disabled\n"); 11713 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 11714 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 11715 netdev_warn(tp->dev, "EEE capability restored\n"); 11716 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 11717 } 11718 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 11719 if (err) 11720 return err; 11721 } else if (err) { 11722 netdev_warn(tp->dev, "TSO capability disabled\n"); 11723 tg3_flag_clear(tp, TSO_CAPABLE); 11724 } else if (!tg3_flag(tp, TSO_CAPABLE)) { 11725 netdev_notice(tp->dev, "TSO capability restored\n"); 11726 tg3_flag_set(tp, TSO_CAPABLE); 11727 } 11728 } 11729 11730 tg3_carrier_off(tp); 11731 11732 err = tg3_power_up(tp); 11733 if (err) 11734 return err; 11735 11736 tg3_full_lock(tp, 0); 11737 11738 tg3_disable_ints(tp); 11739 tg3_flag_clear(tp, INIT_COMPLETE); 11740 11741 tg3_full_unlock(tp); 11742 11743 err = tg3_start(tp, 11744 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN), 11745 true, true); 11746 if (err) { 11747 tg3_frob_aux_power(tp, false); 11748 pci_set_power_state(tp->pdev, PCI_D3hot); 11749 } 11750 11751 return err; 11752 } 11753 11754 static int tg3_close(struct net_device *dev) 11755 { 11756 struct tg3 *tp = netdev_priv(dev); 11757 11758 if (tp->pcierr_recovery) { 11759 netdev_err(dev, "Failed to close device. PCI error recovery " 11760 "in progress\n"); 11761 return -EAGAIN; 11762 } 11763 11764 tg3_stop(tp); 11765 11766 if (pci_device_is_present(tp->pdev)) { 11767 tg3_power_down_prepare(tp); 11768 11769 tg3_carrier_off(tp); 11770 } 11771 return 0; 11772 } 11773 11774 static inline u64 get_stat64(tg3_stat64_t *val) 11775 { 11776 return ((u64)val->high << 32) | ((u64)val->low); 11777 } 11778 11779 static u64 tg3_calc_crc_errors(struct tg3 *tp) 11780 { 11781 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11782 11783 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 11784 (tg3_asic_rev(tp) == ASIC_REV_5700 || 11785 tg3_asic_rev(tp) == ASIC_REV_5701)) { 11786 u32 val; 11787 11788 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { 11789 tg3_writephy(tp, MII_TG3_TEST1, 11790 val | MII_TG3_TEST1_CRC_EN); 11791 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); 11792 } else 11793 val = 0; 11794 11795 tp->phy_crc_errors += val; 11796 11797 return tp->phy_crc_errors; 11798 } 11799 11800 return get_stat64(&hw_stats->rx_fcs_errors); 11801 } 11802 11803 #define ESTAT_ADD(member) \ 11804 estats->member = old_estats->member + \ 11805 get_stat64(&hw_stats->member) 11806 11807 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats) 11808 { 11809 struct tg3_ethtool_stats *old_estats = &tp->estats_prev; 11810 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11811 11812 ESTAT_ADD(rx_octets); 11813 ESTAT_ADD(rx_fragments); 11814 ESTAT_ADD(rx_ucast_packets); 11815 ESTAT_ADD(rx_mcast_packets); 11816 ESTAT_ADD(rx_bcast_packets); 11817 ESTAT_ADD(rx_fcs_errors); 11818 ESTAT_ADD(rx_align_errors); 11819 ESTAT_ADD(rx_xon_pause_rcvd); 11820 ESTAT_ADD(rx_xoff_pause_rcvd); 11821 ESTAT_ADD(rx_mac_ctrl_rcvd); 11822 ESTAT_ADD(rx_xoff_entered); 11823 ESTAT_ADD(rx_frame_too_long_errors); 11824 ESTAT_ADD(rx_jabbers); 11825 ESTAT_ADD(rx_undersize_packets); 11826 ESTAT_ADD(rx_in_length_errors); 11827 ESTAT_ADD(rx_out_length_errors); 11828 ESTAT_ADD(rx_64_or_less_octet_packets); 11829 ESTAT_ADD(rx_65_to_127_octet_packets); 11830 ESTAT_ADD(rx_128_to_255_octet_packets); 11831 ESTAT_ADD(rx_256_to_511_octet_packets); 11832 ESTAT_ADD(rx_512_to_1023_octet_packets); 11833 ESTAT_ADD(rx_1024_to_1522_octet_packets); 11834 ESTAT_ADD(rx_1523_to_2047_octet_packets); 11835 ESTAT_ADD(rx_2048_to_4095_octet_packets); 11836 ESTAT_ADD(rx_4096_to_8191_octet_packets); 11837 ESTAT_ADD(rx_8192_to_9022_octet_packets); 11838 11839 ESTAT_ADD(tx_octets); 11840 ESTAT_ADD(tx_collisions); 11841 ESTAT_ADD(tx_xon_sent); 11842 ESTAT_ADD(tx_xoff_sent); 11843 ESTAT_ADD(tx_flow_control); 11844 ESTAT_ADD(tx_mac_errors); 11845 ESTAT_ADD(tx_single_collisions); 11846 ESTAT_ADD(tx_mult_collisions); 11847 ESTAT_ADD(tx_deferred); 11848 ESTAT_ADD(tx_excessive_collisions); 11849 ESTAT_ADD(tx_late_collisions); 11850 ESTAT_ADD(tx_collide_2times); 11851 ESTAT_ADD(tx_collide_3times); 11852 ESTAT_ADD(tx_collide_4times); 11853 ESTAT_ADD(tx_collide_5times); 11854 ESTAT_ADD(tx_collide_6times); 11855 ESTAT_ADD(tx_collide_7times); 11856 ESTAT_ADD(tx_collide_8times); 11857 ESTAT_ADD(tx_collide_9times); 11858 ESTAT_ADD(tx_collide_10times); 11859 ESTAT_ADD(tx_collide_11times); 11860 ESTAT_ADD(tx_collide_12times); 11861 ESTAT_ADD(tx_collide_13times); 11862 ESTAT_ADD(tx_collide_14times); 11863 ESTAT_ADD(tx_collide_15times); 11864 ESTAT_ADD(tx_ucast_packets); 11865 ESTAT_ADD(tx_mcast_packets); 11866 ESTAT_ADD(tx_bcast_packets); 11867 ESTAT_ADD(tx_carrier_sense_errors); 11868 ESTAT_ADD(tx_discards); 11869 ESTAT_ADD(tx_errors); 11870 11871 ESTAT_ADD(dma_writeq_full); 11872 ESTAT_ADD(dma_write_prioq_full); 11873 ESTAT_ADD(rxbds_empty); 11874 ESTAT_ADD(rx_discards); 11875 ESTAT_ADD(rx_errors); 11876 ESTAT_ADD(rx_threshold_hit); 11877 11878 ESTAT_ADD(dma_readq_full); 11879 ESTAT_ADD(dma_read_prioq_full); 11880 ESTAT_ADD(tx_comp_queue_full); 11881 11882 ESTAT_ADD(ring_set_send_prod_index); 11883 ESTAT_ADD(ring_status_update); 11884 ESTAT_ADD(nic_irqs); 11885 ESTAT_ADD(nic_avoided_irqs); 11886 ESTAT_ADD(nic_tx_threshold_hit); 11887 11888 ESTAT_ADD(mbuf_lwm_thresh_hit); 11889 } 11890 11891 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) 11892 { 11893 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; 11894 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11895 11896 stats->rx_packets = old_stats->rx_packets + 11897 get_stat64(&hw_stats->rx_ucast_packets) + 11898 get_stat64(&hw_stats->rx_mcast_packets) + 11899 get_stat64(&hw_stats->rx_bcast_packets); 11900 11901 stats->tx_packets = old_stats->tx_packets + 11902 get_stat64(&hw_stats->tx_ucast_packets) + 11903 get_stat64(&hw_stats->tx_mcast_packets) + 11904 get_stat64(&hw_stats->tx_bcast_packets); 11905 11906 stats->rx_bytes = old_stats->rx_bytes + 11907 get_stat64(&hw_stats->rx_octets); 11908 stats->tx_bytes = old_stats->tx_bytes + 11909 get_stat64(&hw_stats->tx_octets); 11910 11911 stats->rx_errors = old_stats->rx_errors + 11912 get_stat64(&hw_stats->rx_errors); 11913 stats->tx_errors = old_stats->tx_errors + 11914 get_stat64(&hw_stats->tx_errors) + 11915 get_stat64(&hw_stats->tx_mac_errors) + 11916 get_stat64(&hw_stats->tx_carrier_sense_errors) + 11917 get_stat64(&hw_stats->tx_discards); 11918 11919 stats->multicast = old_stats->multicast + 11920 get_stat64(&hw_stats->rx_mcast_packets); 11921 stats->collisions = old_stats->collisions + 11922 get_stat64(&hw_stats->tx_collisions); 11923 11924 stats->rx_length_errors = old_stats->rx_length_errors + 11925 get_stat64(&hw_stats->rx_frame_too_long_errors) + 11926 get_stat64(&hw_stats->rx_undersize_packets); 11927 11928 stats->rx_frame_errors = old_stats->rx_frame_errors + 11929 get_stat64(&hw_stats->rx_align_errors); 11930 stats->tx_aborted_errors = old_stats->tx_aborted_errors + 11931 get_stat64(&hw_stats->tx_discards); 11932 stats->tx_carrier_errors = old_stats->tx_carrier_errors + 11933 get_stat64(&hw_stats->tx_carrier_sense_errors); 11934 11935 stats->rx_crc_errors = old_stats->rx_crc_errors + 11936 tg3_calc_crc_errors(tp); 11937 11938 stats->rx_missed_errors = old_stats->rx_missed_errors + 11939 get_stat64(&hw_stats->rx_discards); 11940 11941 stats->rx_dropped = tp->rx_dropped; 11942 stats->tx_dropped = tp->tx_dropped; 11943 } 11944 11945 static int tg3_get_regs_len(struct net_device *dev) 11946 { 11947 return TG3_REG_BLK_SIZE; 11948 } 11949 11950 static void tg3_get_regs(struct net_device *dev, 11951 struct ethtool_regs *regs, void *_p) 11952 { 11953 struct tg3 *tp = netdev_priv(dev); 11954 11955 regs->version = 0; 11956 11957 memset(_p, 0, TG3_REG_BLK_SIZE); 11958 11959 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11960 return; 11961 11962 tg3_full_lock(tp, 0); 11963 11964 tg3_dump_legacy_regs(tp, (u32 *)_p); 11965 11966 tg3_full_unlock(tp); 11967 } 11968 11969 static int tg3_get_eeprom_len(struct net_device *dev) 11970 { 11971 struct tg3 *tp = netdev_priv(dev); 11972 11973 return tp->nvram_size; 11974 } 11975 11976 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 11977 { 11978 struct tg3 *tp = netdev_priv(dev); 11979 int ret, cpmu_restore = 0; 11980 u8 *pd; 11981 u32 i, offset, len, b_offset, b_count, cpmu_val = 0; 11982 __be32 val; 11983 11984 if (tg3_flag(tp, NO_NVRAM)) 11985 return -EINVAL; 11986 11987 offset = eeprom->offset; 11988 len = eeprom->len; 11989 eeprom->len = 0; 11990 11991 eeprom->magic = TG3_EEPROM_MAGIC; 11992 11993 /* Override clock, link aware and link idle modes */ 11994 if (tg3_flag(tp, CPMU_PRESENT)) { 11995 cpmu_val = tr32(TG3_CPMU_CTRL); 11996 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE | 11997 CPMU_CTRL_LINK_IDLE_MODE)) { 11998 tw32(TG3_CPMU_CTRL, cpmu_val & 11999 ~(CPMU_CTRL_LINK_AWARE_MODE | 12000 CPMU_CTRL_LINK_IDLE_MODE)); 12001 cpmu_restore = 1; 12002 } 12003 } 12004 tg3_override_clk(tp); 12005 12006 if (offset & 3) { 12007 /* adjustments to start on required 4 byte boundary */ 12008 b_offset = offset & 3; 12009 b_count = 4 - b_offset; 12010 if (b_count > len) { 12011 /* i.e. offset=1 len=2 */ 12012 b_count = len; 12013 } 12014 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); 12015 if (ret) 12016 goto eeprom_done; 12017 memcpy(data, ((char *)&val) + b_offset, b_count); 12018 len -= b_count; 12019 offset += b_count; 12020 eeprom->len += b_count; 12021 } 12022 12023 /* read bytes up to the last 4 byte boundary */ 12024 pd = &data[eeprom->len]; 12025 for (i = 0; i < (len - (len & 3)); i += 4) { 12026 ret = tg3_nvram_read_be32(tp, offset + i, &val); 12027 if (ret) { 12028 if (i) 12029 i -= 4; 12030 eeprom->len += i; 12031 goto eeprom_done; 12032 } 12033 memcpy(pd + i, &val, 4); 12034 if (need_resched()) { 12035 if (signal_pending(current)) { 12036 eeprom->len += i; 12037 ret = -EINTR; 12038 goto eeprom_done; 12039 } 12040 cond_resched(); 12041 } 12042 } 12043 eeprom->len += i; 12044 12045 if (len & 3) { 12046 /* read last bytes not ending on 4 byte boundary */ 12047 pd = &data[eeprom->len]; 12048 b_count = len & 3; 12049 b_offset = offset + len - b_count; 12050 ret = tg3_nvram_read_be32(tp, b_offset, &val); 12051 if (ret) 12052 goto eeprom_done; 12053 memcpy(pd, &val, b_count); 12054 eeprom->len += b_count; 12055 } 12056 ret = 0; 12057 12058 eeprom_done: 12059 /* Restore clock, link aware and link idle modes */ 12060 tg3_restore_clk(tp); 12061 if (cpmu_restore) 12062 tw32(TG3_CPMU_CTRL, cpmu_val); 12063 12064 return ret; 12065 } 12066 12067 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 12068 { 12069 struct tg3 *tp = netdev_priv(dev); 12070 int ret; 12071 u32 offset, len, b_offset, odd_len; 12072 u8 *buf; 12073 __be32 start = 0, end; 12074 12075 if (tg3_flag(tp, NO_NVRAM) || 12076 eeprom->magic != TG3_EEPROM_MAGIC) 12077 return -EINVAL; 12078 12079 offset = eeprom->offset; 12080 len = eeprom->len; 12081 12082 if ((b_offset = (offset & 3))) { 12083 /* adjustments to start on required 4 byte boundary */ 12084 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); 12085 if (ret) 12086 return ret; 12087 len += b_offset; 12088 offset &= ~3; 12089 if (len < 4) 12090 len = 4; 12091 } 12092 12093 odd_len = 0; 12094 if (len & 3) { 12095 /* adjustments to end on required 4 byte boundary */ 12096 odd_len = 1; 12097 len = (len + 3) & ~3; 12098 ret = tg3_nvram_read_be32(tp, offset+len-4, &end); 12099 if (ret) 12100 return ret; 12101 } 12102 12103 buf = data; 12104 if (b_offset || odd_len) { 12105 buf = kmalloc(len, GFP_KERNEL); 12106 if (!buf) 12107 return -ENOMEM; 12108 if (b_offset) 12109 memcpy(buf, &start, 4); 12110 if (odd_len) 12111 memcpy(buf+len-4, &end, 4); 12112 memcpy(buf + b_offset, data, eeprom->len); 12113 } 12114 12115 ret = tg3_nvram_write_block(tp, offset, len, buf); 12116 12117 if (buf != data) 12118 kfree(buf); 12119 12120 return ret; 12121 } 12122 12123 static int tg3_get_link_ksettings(struct net_device *dev, 12124 struct ethtool_link_ksettings *cmd) 12125 { 12126 struct tg3 *tp = netdev_priv(dev); 12127 u32 supported, advertising; 12128 12129 if (tg3_flag(tp, USE_PHYLIB)) { 12130 struct phy_device *phydev; 12131 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12132 return -EAGAIN; 12133 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12134 phy_ethtool_ksettings_get(phydev, cmd); 12135 12136 return 0; 12137 } 12138 12139 supported = (SUPPORTED_Autoneg); 12140 12141 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12142 supported |= (SUPPORTED_1000baseT_Half | 12143 SUPPORTED_1000baseT_Full); 12144 12145 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12146 supported |= (SUPPORTED_100baseT_Half | 12147 SUPPORTED_100baseT_Full | 12148 SUPPORTED_10baseT_Half | 12149 SUPPORTED_10baseT_Full | 12150 SUPPORTED_TP); 12151 cmd->base.port = PORT_TP; 12152 } else { 12153 supported |= SUPPORTED_FIBRE; 12154 cmd->base.port = PORT_FIBRE; 12155 } 12156 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 12157 supported); 12158 12159 advertising = tp->link_config.advertising; 12160 if (tg3_flag(tp, PAUSE_AUTONEG)) { 12161 if (tp->link_config.flowctrl & FLOW_CTRL_RX) { 12162 if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12163 advertising |= ADVERTISED_Pause; 12164 } else { 12165 advertising |= ADVERTISED_Pause | 12166 ADVERTISED_Asym_Pause; 12167 } 12168 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12169 advertising |= ADVERTISED_Asym_Pause; 12170 } 12171 } 12172 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 12173 advertising); 12174 12175 if (netif_running(dev) && tp->link_up) { 12176 cmd->base.speed = tp->link_config.active_speed; 12177 cmd->base.duplex = tp->link_config.active_duplex; 12178 ethtool_convert_legacy_u32_to_link_mode( 12179 cmd->link_modes.lp_advertising, 12180 tp->link_config.rmt_adv); 12181 12182 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12183 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE) 12184 cmd->base.eth_tp_mdix = ETH_TP_MDI_X; 12185 else 12186 cmd->base.eth_tp_mdix = ETH_TP_MDI; 12187 } 12188 } else { 12189 cmd->base.speed = SPEED_UNKNOWN; 12190 cmd->base.duplex = DUPLEX_UNKNOWN; 12191 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; 12192 } 12193 cmd->base.phy_address = tp->phy_addr; 12194 cmd->base.autoneg = tp->link_config.autoneg; 12195 return 0; 12196 } 12197 12198 static int tg3_set_link_ksettings(struct net_device *dev, 12199 const struct ethtool_link_ksettings *cmd) 12200 { 12201 struct tg3 *tp = netdev_priv(dev); 12202 u32 speed = cmd->base.speed; 12203 u32 advertising; 12204 12205 if (tg3_flag(tp, USE_PHYLIB)) { 12206 struct phy_device *phydev; 12207 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12208 return -EAGAIN; 12209 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12210 return phy_ethtool_ksettings_set(phydev, cmd); 12211 } 12212 12213 if (cmd->base.autoneg != AUTONEG_ENABLE && 12214 cmd->base.autoneg != AUTONEG_DISABLE) 12215 return -EINVAL; 12216 12217 if (cmd->base.autoneg == AUTONEG_DISABLE && 12218 cmd->base.duplex != DUPLEX_FULL && 12219 cmd->base.duplex != DUPLEX_HALF) 12220 return -EINVAL; 12221 12222 ethtool_convert_link_mode_to_legacy_u32(&advertising, 12223 cmd->link_modes.advertising); 12224 12225 if (cmd->base.autoneg == AUTONEG_ENABLE) { 12226 u32 mask = ADVERTISED_Autoneg | 12227 ADVERTISED_Pause | 12228 ADVERTISED_Asym_Pause; 12229 12230 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12231 mask |= ADVERTISED_1000baseT_Half | 12232 ADVERTISED_1000baseT_Full; 12233 12234 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 12235 mask |= ADVERTISED_100baseT_Half | 12236 ADVERTISED_100baseT_Full | 12237 ADVERTISED_10baseT_Half | 12238 ADVERTISED_10baseT_Full | 12239 ADVERTISED_TP; 12240 else 12241 mask |= ADVERTISED_FIBRE; 12242 12243 if (advertising & ~mask) 12244 return -EINVAL; 12245 12246 mask &= (ADVERTISED_1000baseT_Half | 12247 ADVERTISED_1000baseT_Full | 12248 ADVERTISED_100baseT_Half | 12249 ADVERTISED_100baseT_Full | 12250 ADVERTISED_10baseT_Half | 12251 ADVERTISED_10baseT_Full); 12252 12253 advertising &= mask; 12254 } else { 12255 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { 12256 if (speed != SPEED_1000) 12257 return -EINVAL; 12258 12259 if (cmd->base.duplex != DUPLEX_FULL) 12260 return -EINVAL; 12261 } else { 12262 if (speed != SPEED_100 && 12263 speed != SPEED_10) 12264 return -EINVAL; 12265 } 12266 } 12267 12268 tg3_full_lock(tp, 0); 12269 12270 tp->link_config.autoneg = cmd->base.autoneg; 12271 if (cmd->base.autoneg == AUTONEG_ENABLE) { 12272 tp->link_config.advertising = (advertising | 12273 ADVERTISED_Autoneg); 12274 tp->link_config.speed = SPEED_UNKNOWN; 12275 tp->link_config.duplex = DUPLEX_UNKNOWN; 12276 } else { 12277 tp->link_config.advertising = 0; 12278 tp->link_config.speed = speed; 12279 tp->link_config.duplex = cmd->base.duplex; 12280 } 12281 12282 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12283 12284 tg3_warn_mgmt_link_flap(tp); 12285 12286 if (netif_running(dev)) 12287 tg3_setup_phy(tp, true); 12288 12289 tg3_full_unlock(tp); 12290 12291 return 0; 12292 } 12293 12294 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 12295 { 12296 struct tg3 *tp = netdev_priv(dev); 12297 12298 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 12299 strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version)); 12300 strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info)); 12301 } 12302 12303 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12304 { 12305 struct tg3 *tp = netdev_priv(dev); 12306 12307 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) 12308 wol->supported = WAKE_MAGIC; 12309 else 12310 wol->supported = 0; 12311 wol->wolopts = 0; 12312 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) 12313 wol->wolopts = WAKE_MAGIC; 12314 memset(&wol->sopass, 0, sizeof(wol->sopass)); 12315 } 12316 12317 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12318 { 12319 struct tg3 *tp = netdev_priv(dev); 12320 struct device *dp = &tp->pdev->dev; 12321 12322 if (wol->wolopts & ~WAKE_MAGIC) 12323 return -EINVAL; 12324 if ((wol->wolopts & WAKE_MAGIC) && 12325 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) 12326 return -EINVAL; 12327 12328 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); 12329 12330 if (device_may_wakeup(dp)) 12331 tg3_flag_set(tp, WOL_ENABLE); 12332 else 12333 tg3_flag_clear(tp, WOL_ENABLE); 12334 12335 return 0; 12336 } 12337 12338 static u32 tg3_get_msglevel(struct net_device *dev) 12339 { 12340 struct tg3 *tp = netdev_priv(dev); 12341 return tp->msg_enable; 12342 } 12343 12344 static void tg3_set_msglevel(struct net_device *dev, u32 value) 12345 { 12346 struct tg3 *tp = netdev_priv(dev); 12347 tp->msg_enable = value; 12348 } 12349 12350 static int tg3_nway_reset(struct net_device *dev) 12351 { 12352 struct tg3 *tp = netdev_priv(dev); 12353 int r; 12354 12355 if (!netif_running(dev)) 12356 return -EAGAIN; 12357 12358 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 12359 return -EINVAL; 12360 12361 tg3_warn_mgmt_link_flap(tp); 12362 12363 if (tg3_flag(tp, USE_PHYLIB)) { 12364 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12365 return -EAGAIN; 12366 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 12367 } else { 12368 u32 bmcr; 12369 12370 spin_lock_bh(&tp->lock); 12371 r = -EINVAL; 12372 tg3_readphy(tp, MII_BMCR, &bmcr); 12373 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 12374 ((bmcr & BMCR_ANENABLE) || 12375 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { 12376 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | 12377 BMCR_ANENABLE); 12378 r = 0; 12379 } 12380 spin_unlock_bh(&tp->lock); 12381 } 12382 12383 return r; 12384 } 12385 12386 static void tg3_get_ringparam(struct net_device *dev, 12387 struct ethtool_ringparam *ering, 12388 struct kernel_ethtool_ringparam *kernel_ering, 12389 struct netlink_ext_ack *extack) 12390 { 12391 struct tg3 *tp = netdev_priv(dev); 12392 12393 ering->rx_max_pending = tp->rx_std_ring_mask; 12394 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12395 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; 12396 else 12397 ering->rx_jumbo_max_pending = 0; 12398 12399 ering->tx_max_pending = TG3_TX_RING_SIZE - 1; 12400 12401 ering->rx_pending = tp->rx_pending; 12402 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12403 ering->rx_jumbo_pending = tp->rx_jumbo_pending; 12404 else 12405 ering->rx_jumbo_pending = 0; 12406 12407 ering->tx_pending = tp->napi[0].tx_pending; 12408 } 12409 12410 static int tg3_set_ringparam(struct net_device *dev, 12411 struct ethtool_ringparam *ering, 12412 struct kernel_ethtool_ringparam *kernel_ering, 12413 struct netlink_ext_ack *extack) 12414 { 12415 struct tg3 *tp = netdev_priv(dev); 12416 int i, irq_sync = 0, err = 0; 12417 bool reset_phy = false; 12418 12419 if ((ering->rx_pending > tp->rx_std_ring_mask) || 12420 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || 12421 (ering->tx_pending > TG3_TX_RING_SIZE - 1) || 12422 (ering->tx_pending <= MAX_SKB_FRAGS) || 12423 (tg3_flag(tp, TSO_BUG) && 12424 (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) 12425 return -EINVAL; 12426 12427 if (netif_running(dev)) { 12428 tg3_phy_stop(tp); 12429 tg3_netif_stop(tp); 12430 irq_sync = 1; 12431 } 12432 12433 tg3_full_lock(tp, irq_sync); 12434 12435 tp->rx_pending = ering->rx_pending; 12436 12437 if (tg3_flag(tp, MAX_RXPEND_64) && 12438 tp->rx_pending > 63) 12439 tp->rx_pending = 63; 12440 12441 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12442 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 12443 12444 for (i = 0; i < tp->irq_max; i++) 12445 tp->napi[i].tx_pending = ering->tx_pending; 12446 12447 if (netif_running(dev)) { 12448 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12449 /* Reset PHY to avoid PHY lock up */ 12450 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 12451 tg3_asic_rev(tp) == ASIC_REV_5719 || 12452 tg3_asic_rev(tp) == ASIC_REV_5720) 12453 reset_phy = true; 12454 12455 err = tg3_restart_hw(tp, reset_phy); 12456 if (!err) 12457 tg3_netif_start(tp); 12458 } 12459 12460 tg3_full_unlock(tp); 12461 12462 if (irq_sync && !err) 12463 tg3_phy_start(tp); 12464 12465 return err; 12466 } 12467 12468 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12469 { 12470 struct tg3 *tp = netdev_priv(dev); 12471 12472 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); 12473 12474 if (tp->link_config.flowctrl & FLOW_CTRL_RX) 12475 epause->rx_pause = 1; 12476 else 12477 epause->rx_pause = 0; 12478 12479 if (tp->link_config.flowctrl & FLOW_CTRL_TX) 12480 epause->tx_pause = 1; 12481 else 12482 epause->tx_pause = 0; 12483 } 12484 12485 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12486 { 12487 struct tg3 *tp = netdev_priv(dev); 12488 int err = 0; 12489 bool reset_phy = false; 12490 12491 if (tp->link_config.autoneg == AUTONEG_ENABLE) 12492 tg3_warn_mgmt_link_flap(tp); 12493 12494 if (tg3_flag(tp, USE_PHYLIB)) { 12495 struct phy_device *phydev; 12496 12497 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12498 12499 if (!phy_validate_pause(phydev, epause)) 12500 return -EINVAL; 12501 12502 tp->link_config.flowctrl = 0; 12503 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); 12504 if (epause->rx_pause) { 12505 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12506 12507 if (epause->tx_pause) { 12508 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12509 } 12510 } else if (epause->tx_pause) { 12511 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12512 } 12513 12514 if (epause->autoneg) 12515 tg3_flag_set(tp, PAUSE_AUTONEG); 12516 else 12517 tg3_flag_clear(tp, PAUSE_AUTONEG); 12518 12519 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 12520 if (phydev->autoneg) { 12521 /* phy_set_asym_pause() will 12522 * renegotiate the link to inform our 12523 * link partner of our flow control 12524 * settings, even if the flow control 12525 * is forced. Let tg3_adjust_link() 12526 * do the final flow control setup. 12527 */ 12528 return 0; 12529 } 12530 12531 if (!epause->autoneg) 12532 tg3_setup_flow_control(tp, 0, 0); 12533 } 12534 } else { 12535 int irq_sync = 0; 12536 12537 if (netif_running(dev)) { 12538 tg3_netif_stop(tp); 12539 irq_sync = 1; 12540 } 12541 12542 tg3_full_lock(tp, irq_sync); 12543 12544 if (epause->autoneg) 12545 tg3_flag_set(tp, PAUSE_AUTONEG); 12546 else 12547 tg3_flag_clear(tp, PAUSE_AUTONEG); 12548 if (epause->rx_pause) 12549 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12550 else 12551 tp->link_config.flowctrl &= ~FLOW_CTRL_RX; 12552 if (epause->tx_pause) 12553 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12554 else 12555 tp->link_config.flowctrl &= ~FLOW_CTRL_TX; 12556 12557 if (netif_running(dev)) { 12558 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12559 /* Reset PHY to avoid PHY lock up */ 12560 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 12561 tg3_asic_rev(tp) == ASIC_REV_5719 || 12562 tg3_asic_rev(tp) == ASIC_REV_5720) 12563 reset_phy = true; 12564 12565 err = tg3_restart_hw(tp, reset_phy); 12566 if (!err) 12567 tg3_netif_start(tp); 12568 } 12569 12570 tg3_full_unlock(tp); 12571 } 12572 12573 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12574 12575 return err; 12576 } 12577 12578 static int tg3_get_sset_count(struct net_device *dev, int sset) 12579 { 12580 switch (sset) { 12581 case ETH_SS_TEST: 12582 return TG3_NUM_TEST; 12583 case ETH_SS_STATS: 12584 return TG3_NUM_STATS; 12585 default: 12586 return -EOPNOTSUPP; 12587 } 12588 } 12589 12590 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 12591 u32 *rules __always_unused) 12592 { 12593 struct tg3 *tp = netdev_priv(dev); 12594 12595 if (!tg3_flag(tp, SUPPORT_MSIX)) 12596 return -EOPNOTSUPP; 12597 12598 switch (info->cmd) { 12599 case ETHTOOL_GRXRINGS: 12600 if (netif_running(tp->dev)) 12601 info->data = tp->rxq_cnt; 12602 else { 12603 info->data = num_online_cpus(); 12604 if (info->data > TG3_RSS_MAX_NUM_QS) 12605 info->data = TG3_RSS_MAX_NUM_QS; 12606 } 12607 12608 return 0; 12609 12610 default: 12611 return -EOPNOTSUPP; 12612 } 12613 } 12614 12615 static u32 tg3_get_rxfh_indir_size(struct net_device *dev) 12616 { 12617 u32 size = 0; 12618 struct tg3 *tp = netdev_priv(dev); 12619 12620 if (tg3_flag(tp, SUPPORT_MSIX)) 12621 size = TG3_RSS_INDIR_TBL_SIZE; 12622 12623 return size; 12624 } 12625 12626 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) 12627 { 12628 struct tg3 *tp = netdev_priv(dev); 12629 int i; 12630 12631 if (hfunc) 12632 *hfunc = ETH_RSS_HASH_TOP; 12633 if (!indir) 12634 return 0; 12635 12636 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12637 indir[i] = tp->rss_ind_tbl[i]; 12638 12639 return 0; 12640 } 12641 12642 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, 12643 const u8 hfunc) 12644 { 12645 struct tg3 *tp = netdev_priv(dev); 12646 size_t i; 12647 12648 /* We require at least one supported parameter to be changed and no 12649 * change in any of the unsupported parameters 12650 */ 12651 if (key || 12652 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 12653 return -EOPNOTSUPP; 12654 12655 if (!indir) 12656 return 0; 12657 12658 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12659 tp->rss_ind_tbl[i] = indir[i]; 12660 12661 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS)) 12662 return 0; 12663 12664 /* It is legal to write the indirection 12665 * table while the device is running. 12666 */ 12667 tg3_full_lock(tp, 0); 12668 tg3_rss_write_indir_tbl(tp); 12669 tg3_full_unlock(tp); 12670 12671 return 0; 12672 } 12673 12674 static void tg3_get_channels(struct net_device *dev, 12675 struct ethtool_channels *channel) 12676 { 12677 struct tg3 *tp = netdev_priv(dev); 12678 u32 deflt_qs = netif_get_num_default_rss_queues(); 12679 12680 channel->max_rx = tp->rxq_max; 12681 channel->max_tx = tp->txq_max; 12682 12683 if (netif_running(dev)) { 12684 channel->rx_count = tp->rxq_cnt; 12685 channel->tx_count = tp->txq_cnt; 12686 } else { 12687 if (tp->rxq_req) 12688 channel->rx_count = tp->rxq_req; 12689 else 12690 channel->rx_count = min(deflt_qs, tp->rxq_max); 12691 12692 if (tp->txq_req) 12693 channel->tx_count = tp->txq_req; 12694 else 12695 channel->tx_count = min(deflt_qs, tp->txq_max); 12696 } 12697 } 12698 12699 static int tg3_set_channels(struct net_device *dev, 12700 struct ethtool_channels *channel) 12701 { 12702 struct tg3 *tp = netdev_priv(dev); 12703 12704 if (!tg3_flag(tp, SUPPORT_MSIX)) 12705 return -EOPNOTSUPP; 12706 12707 if (channel->rx_count > tp->rxq_max || 12708 channel->tx_count > tp->txq_max) 12709 return -EINVAL; 12710 12711 tp->rxq_req = channel->rx_count; 12712 tp->txq_req = channel->tx_count; 12713 12714 if (!netif_running(dev)) 12715 return 0; 12716 12717 tg3_stop(tp); 12718 12719 tg3_carrier_off(tp); 12720 12721 tg3_start(tp, true, false, false); 12722 12723 return 0; 12724 } 12725 12726 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 12727 { 12728 switch (stringset) { 12729 case ETH_SS_STATS: 12730 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); 12731 break; 12732 case ETH_SS_TEST: 12733 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); 12734 break; 12735 default: 12736 WARN_ON(1); /* we need a WARN() */ 12737 break; 12738 } 12739 } 12740 12741 static int tg3_set_phys_id(struct net_device *dev, 12742 enum ethtool_phys_id_state state) 12743 { 12744 struct tg3 *tp = netdev_priv(dev); 12745 12746 switch (state) { 12747 case ETHTOOL_ID_ACTIVE: 12748 return 1; /* cycle on/off once per second */ 12749 12750 case ETHTOOL_ID_ON: 12751 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12752 LED_CTRL_1000MBPS_ON | 12753 LED_CTRL_100MBPS_ON | 12754 LED_CTRL_10MBPS_ON | 12755 LED_CTRL_TRAFFIC_OVERRIDE | 12756 LED_CTRL_TRAFFIC_BLINK | 12757 LED_CTRL_TRAFFIC_LED); 12758 break; 12759 12760 case ETHTOOL_ID_OFF: 12761 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12762 LED_CTRL_TRAFFIC_OVERRIDE); 12763 break; 12764 12765 case ETHTOOL_ID_INACTIVE: 12766 tw32(MAC_LED_CTRL, tp->led_ctrl); 12767 break; 12768 } 12769 12770 return 0; 12771 } 12772 12773 static void tg3_get_ethtool_stats(struct net_device *dev, 12774 struct ethtool_stats *estats, u64 *tmp_stats) 12775 { 12776 struct tg3 *tp = netdev_priv(dev); 12777 12778 if (tp->hw_stats) 12779 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats); 12780 else 12781 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats)); 12782 } 12783 12784 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen) 12785 { 12786 int i; 12787 __be32 *buf; 12788 u32 offset = 0, len = 0; 12789 u32 magic, val; 12790 12791 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) 12792 return NULL; 12793 12794 if (magic == TG3_EEPROM_MAGIC) { 12795 for (offset = TG3_NVM_DIR_START; 12796 offset < TG3_NVM_DIR_END; 12797 offset += TG3_NVM_DIRENT_SIZE) { 12798 if (tg3_nvram_read(tp, offset, &val)) 12799 return NULL; 12800 12801 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == 12802 TG3_NVM_DIRTYPE_EXTVPD) 12803 break; 12804 } 12805 12806 if (offset != TG3_NVM_DIR_END) { 12807 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; 12808 if (tg3_nvram_read(tp, offset + 4, &offset)) 12809 return NULL; 12810 12811 offset = tg3_nvram_logical_addr(tp, offset); 12812 } 12813 12814 if (!offset || !len) { 12815 offset = TG3_NVM_VPD_OFF; 12816 len = TG3_NVM_VPD_LEN; 12817 } 12818 12819 buf = kmalloc(len, GFP_KERNEL); 12820 if (!buf) 12821 return NULL; 12822 12823 for (i = 0; i < len; i += 4) { 12824 /* The data is in little-endian format in NVRAM. 12825 * Use the big-endian read routines to preserve 12826 * the byte order as it exists in NVRAM. 12827 */ 12828 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) 12829 goto error; 12830 } 12831 *vpdlen = len; 12832 } else { 12833 buf = pci_vpd_alloc(tp->pdev, vpdlen); 12834 if (IS_ERR(buf)) 12835 return NULL; 12836 } 12837 12838 return buf; 12839 12840 error: 12841 kfree(buf); 12842 return NULL; 12843 } 12844 12845 #define NVRAM_TEST_SIZE 0x100 12846 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 12847 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 12848 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c 12849 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 12850 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 12851 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 12852 #define NVRAM_SELFBOOT_HW_SIZE 0x20 12853 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c 12854 12855 static int tg3_test_nvram(struct tg3 *tp) 12856 { 12857 u32 csum, magic; 12858 __be32 *buf; 12859 int i, j, k, err = 0, size; 12860 unsigned int len; 12861 12862 if (tg3_flag(tp, NO_NVRAM)) 12863 return 0; 12864 12865 if (tg3_nvram_read(tp, 0, &magic) != 0) 12866 return -EIO; 12867 12868 if (magic == TG3_EEPROM_MAGIC) 12869 size = NVRAM_TEST_SIZE; 12870 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { 12871 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == 12872 TG3_EEPROM_SB_FORMAT_1) { 12873 switch (magic & TG3_EEPROM_SB_REVISION_MASK) { 12874 case TG3_EEPROM_SB_REVISION_0: 12875 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; 12876 break; 12877 case TG3_EEPROM_SB_REVISION_2: 12878 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; 12879 break; 12880 case TG3_EEPROM_SB_REVISION_3: 12881 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; 12882 break; 12883 case TG3_EEPROM_SB_REVISION_4: 12884 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; 12885 break; 12886 case TG3_EEPROM_SB_REVISION_5: 12887 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; 12888 break; 12889 case TG3_EEPROM_SB_REVISION_6: 12890 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; 12891 break; 12892 default: 12893 return -EIO; 12894 } 12895 } else 12896 return 0; 12897 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 12898 size = NVRAM_SELFBOOT_HW_SIZE; 12899 else 12900 return -EIO; 12901 12902 buf = kmalloc(size, GFP_KERNEL); 12903 if (buf == NULL) 12904 return -ENOMEM; 12905 12906 err = -EIO; 12907 for (i = 0, j = 0; i < size; i += 4, j++) { 12908 err = tg3_nvram_read_be32(tp, i, &buf[j]); 12909 if (err) 12910 break; 12911 } 12912 if (i < size) 12913 goto out; 12914 12915 /* Selfboot format */ 12916 magic = be32_to_cpu(buf[0]); 12917 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == 12918 TG3_EEPROM_MAGIC_FW) { 12919 u8 *buf8 = (u8 *) buf, csum8 = 0; 12920 12921 if ((magic & TG3_EEPROM_SB_REVISION_MASK) == 12922 TG3_EEPROM_SB_REVISION_2) { 12923 /* For rev 2, the csum doesn't include the MBA. */ 12924 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) 12925 csum8 += buf8[i]; 12926 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) 12927 csum8 += buf8[i]; 12928 } else { 12929 for (i = 0; i < size; i++) 12930 csum8 += buf8[i]; 12931 } 12932 12933 if (csum8 == 0) { 12934 err = 0; 12935 goto out; 12936 } 12937 12938 err = -EIO; 12939 goto out; 12940 } 12941 12942 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == 12943 TG3_EEPROM_MAGIC_HW) { 12944 u8 data[NVRAM_SELFBOOT_DATA_SIZE]; 12945 u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; 12946 u8 *buf8 = (u8 *) buf; 12947 12948 /* Separate the parity bits and the data bytes. */ 12949 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { 12950 if ((i == 0) || (i == 8)) { 12951 int l; 12952 u8 msk; 12953 12954 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) 12955 parity[k++] = buf8[i] & msk; 12956 i++; 12957 } else if (i == 16) { 12958 int l; 12959 u8 msk; 12960 12961 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) 12962 parity[k++] = buf8[i] & msk; 12963 i++; 12964 12965 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) 12966 parity[k++] = buf8[i] & msk; 12967 i++; 12968 } 12969 data[j++] = buf8[i]; 12970 } 12971 12972 err = -EIO; 12973 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { 12974 u8 hw8 = hweight8(data[i]); 12975 12976 if ((hw8 & 0x1) && parity[i]) 12977 goto out; 12978 else if (!(hw8 & 0x1) && !parity[i]) 12979 goto out; 12980 } 12981 err = 0; 12982 goto out; 12983 } 12984 12985 err = -EIO; 12986 12987 /* Bootstrap checksum at offset 0x10 */ 12988 csum = calc_crc((unsigned char *) buf, 0x10); 12989 if (csum != le32_to_cpu(buf[0x10/4])) 12990 goto out; 12991 12992 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ 12993 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); 12994 if (csum != le32_to_cpu(buf[0xfc/4])) 12995 goto out; 12996 12997 kfree(buf); 12998 12999 buf = tg3_vpd_readblock(tp, &len); 13000 if (!buf) 13001 return -ENOMEM; 13002 13003 err = pci_vpd_check_csum(buf, len); 13004 /* go on if no checksum found */ 13005 if (err == 1) 13006 err = 0; 13007 out: 13008 kfree(buf); 13009 return err; 13010 } 13011 13012 #define TG3_SERDES_TIMEOUT_SEC 2 13013 #define TG3_COPPER_TIMEOUT_SEC 6 13014 13015 static int tg3_test_link(struct tg3 *tp) 13016 { 13017 int i, max; 13018 13019 if (!netif_running(tp->dev)) 13020 return -ENODEV; 13021 13022 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 13023 max = TG3_SERDES_TIMEOUT_SEC; 13024 else 13025 max = TG3_COPPER_TIMEOUT_SEC; 13026 13027 for (i = 0; i < max; i++) { 13028 if (tp->link_up) 13029 return 0; 13030 13031 if (msleep_interruptible(1000)) 13032 break; 13033 } 13034 13035 return -EIO; 13036 } 13037 13038 /* Only test the commonly used registers */ 13039 static int tg3_test_registers(struct tg3 *tp) 13040 { 13041 int i, is_5705, is_5750; 13042 u32 offset, read_mask, write_mask, val, save_val, read_val; 13043 static struct { 13044 u16 offset; 13045 u16 flags; 13046 #define TG3_FL_5705 0x1 13047 #define TG3_FL_NOT_5705 0x2 13048 #define TG3_FL_NOT_5788 0x4 13049 #define TG3_FL_NOT_5750 0x8 13050 u32 read_mask; 13051 u32 write_mask; 13052 } reg_tbl[] = { 13053 /* MAC Control Registers */ 13054 { MAC_MODE, TG3_FL_NOT_5705, 13055 0x00000000, 0x00ef6f8c }, 13056 { MAC_MODE, TG3_FL_5705, 13057 0x00000000, 0x01ef6b8c }, 13058 { MAC_STATUS, TG3_FL_NOT_5705, 13059 0x03800107, 0x00000000 }, 13060 { MAC_STATUS, TG3_FL_5705, 13061 0x03800100, 0x00000000 }, 13062 { MAC_ADDR_0_HIGH, 0x0000, 13063 0x00000000, 0x0000ffff }, 13064 { MAC_ADDR_0_LOW, 0x0000, 13065 0x00000000, 0xffffffff }, 13066 { MAC_RX_MTU_SIZE, 0x0000, 13067 0x00000000, 0x0000ffff }, 13068 { MAC_TX_MODE, 0x0000, 13069 0x00000000, 0x00000070 }, 13070 { MAC_TX_LENGTHS, 0x0000, 13071 0x00000000, 0x00003fff }, 13072 { MAC_RX_MODE, TG3_FL_NOT_5705, 13073 0x00000000, 0x000007fc }, 13074 { MAC_RX_MODE, TG3_FL_5705, 13075 0x00000000, 0x000007dc }, 13076 { MAC_HASH_REG_0, 0x0000, 13077 0x00000000, 0xffffffff }, 13078 { MAC_HASH_REG_1, 0x0000, 13079 0x00000000, 0xffffffff }, 13080 { MAC_HASH_REG_2, 0x0000, 13081 0x00000000, 0xffffffff }, 13082 { MAC_HASH_REG_3, 0x0000, 13083 0x00000000, 0xffffffff }, 13084 13085 /* Receive Data and Receive BD Initiator Control Registers. */ 13086 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, 13087 0x00000000, 0xffffffff }, 13088 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, 13089 0x00000000, 0xffffffff }, 13090 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, 13091 0x00000000, 0x00000003 }, 13092 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, 13093 0x00000000, 0xffffffff }, 13094 { RCVDBDI_STD_BD+0, 0x0000, 13095 0x00000000, 0xffffffff }, 13096 { RCVDBDI_STD_BD+4, 0x0000, 13097 0x00000000, 0xffffffff }, 13098 { RCVDBDI_STD_BD+8, 0x0000, 13099 0x00000000, 0xffff0002 }, 13100 { RCVDBDI_STD_BD+0xc, 0x0000, 13101 0x00000000, 0xffffffff }, 13102 13103 /* Receive BD Initiator Control Registers. */ 13104 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, 13105 0x00000000, 0xffffffff }, 13106 { RCVBDI_STD_THRESH, TG3_FL_5705, 13107 0x00000000, 0x000003ff }, 13108 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, 13109 0x00000000, 0xffffffff }, 13110 13111 /* Host Coalescing Control Registers. */ 13112 { HOSTCC_MODE, TG3_FL_NOT_5705, 13113 0x00000000, 0x00000004 }, 13114 { HOSTCC_MODE, TG3_FL_5705, 13115 0x00000000, 0x000000f6 }, 13116 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, 13117 0x00000000, 0xffffffff }, 13118 { HOSTCC_RXCOL_TICKS, TG3_FL_5705, 13119 0x00000000, 0x000003ff }, 13120 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, 13121 0x00000000, 0xffffffff }, 13122 { HOSTCC_TXCOL_TICKS, TG3_FL_5705, 13123 0x00000000, 0x000003ff }, 13124 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, 13125 0x00000000, 0xffffffff }, 13126 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13127 0x00000000, 0x000000ff }, 13128 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, 13129 0x00000000, 0xffffffff }, 13130 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13131 0x00000000, 0x000000ff }, 13132 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, 13133 0x00000000, 0xffffffff }, 13134 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, 13135 0x00000000, 0xffffffff }, 13136 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13137 0x00000000, 0xffffffff }, 13138 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13139 0x00000000, 0x000000ff }, 13140 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13141 0x00000000, 0xffffffff }, 13142 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13143 0x00000000, 0x000000ff }, 13144 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, 13145 0x00000000, 0xffffffff }, 13146 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, 13147 0x00000000, 0xffffffff }, 13148 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, 13149 0x00000000, 0xffffffff }, 13150 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, 13151 0x00000000, 0xffffffff }, 13152 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, 13153 0x00000000, 0xffffffff }, 13154 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, 13155 0xffffffff, 0x00000000 }, 13156 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, 13157 0xffffffff, 0x00000000 }, 13158 13159 /* Buffer Manager Control Registers. */ 13160 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, 13161 0x00000000, 0x007fff80 }, 13162 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, 13163 0x00000000, 0x007fffff }, 13164 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, 13165 0x00000000, 0x0000003f }, 13166 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, 13167 0x00000000, 0x000001ff }, 13168 { BUFMGR_MB_HIGH_WATER, 0x0000, 13169 0x00000000, 0x000001ff }, 13170 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, 13171 0xffffffff, 0x00000000 }, 13172 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, 13173 0xffffffff, 0x00000000 }, 13174 13175 /* Mailbox Registers */ 13176 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, 13177 0x00000000, 0x000001ff }, 13178 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, 13179 0x00000000, 0x000001ff }, 13180 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, 13181 0x00000000, 0x000007ff }, 13182 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, 13183 0x00000000, 0x000001ff }, 13184 13185 { 0xffff, 0x0000, 0x00000000, 0x00000000 }, 13186 }; 13187 13188 is_5705 = is_5750 = 0; 13189 if (tg3_flag(tp, 5705_PLUS)) { 13190 is_5705 = 1; 13191 if (tg3_flag(tp, 5750_PLUS)) 13192 is_5750 = 1; 13193 } 13194 13195 for (i = 0; reg_tbl[i].offset != 0xffff; i++) { 13196 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) 13197 continue; 13198 13199 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) 13200 continue; 13201 13202 if (tg3_flag(tp, IS_5788) && 13203 (reg_tbl[i].flags & TG3_FL_NOT_5788)) 13204 continue; 13205 13206 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) 13207 continue; 13208 13209 offset = (u32) reg_tbl[i].offset; 13210 read_mask = reg_tbl[i].read_mask; 13211 write_mask = reg_tbl[i].write_mask; 13212 13213 /* Save the original register content */ 13214 save_val = tr32(offset); 13215 13216 /* Determine the read-only value. */ 13217 read_val = save_val & read_mask; 13218 13219 /* Write zero to the register, then make sure the read-only bits 13220 * are not changed and the read/write bits are all zeros. 13221 */ 13222 tw32(offset, 0); 13223 13224 val = tr32(offset); 13225 13226 /* Test the read-only and read/write bits. */ 13227 if (((val & read_mask) != read_val) || (val & write_mask)) 13228 goto out; 13229 13230 /* Write ones to all the bits defined by RdMask and WrMask, then 13231 * make sure the read-only bits are not changed and the 13232 * read/write bits are all ones. 13233 */ 13234 tw32(offset, read_mask | write_mask); 13235 13236 val = tr32(offset); 13237 13238 /* Test the read-only bits. */ 13239 if ((val & read_mask) != read_val) 13240 goto out; 13241 13242 /* Test the read/write bits. */ 13243 if ((val & write_mask) != write_mask) 13244 goto out; 13245 13246 tw32(offset, save_val); 13247 } 13248 13249 return 0; 13250 13251 out: 13252 if (netif_msg_hw(tp)) 13253 netdev_err(tp->dev, 13254 "Register test failed at offset %x\n", offset); 13255 tw32(offset, save_val); 13256 return -EIO; 13257 } 13258 13259 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) 13260 { 13261 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; 13262 int i; 13263 u32 j; 13264 13265 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { 13266 for (j = 0; j < len; j += 4) { 13267 u32 val; 13268 13269 tg3_write_mem(tp, offset + j, test_pattern[i]); 13270 tg3_read_mem(tp, offset + j, &val); 13271 if (val != test_pattern[i]) 13272 return -EIO; 13273 } 13274 } 13275 return 0; 13276 } 13277 13278 static int tg3_test_memory(struct tg3 *tp) 13279 { 13280 static struct mem_entry { 13281 u32 offset; 13282 u32 len; 13283 } mem_tbl_570x[] = { 13284 { 0x00000000, 0x00b50}, 13285 { 0x00002000, 0x1c000}, 13286 { 0xffffffff, 0x00000} 13287 }, mem_tbl_5705[] = { 13288 { 0x00000100, 0x0000c}, 13289 { 0x00000200, 0x00008}, 13290 { 0x00004000, 0x00800}, 13291 { 0x00006000, 0x01000}, 13292 { 0x00008000, 0x02000}, 13293 { 0x00010000, 0x0e000}, 13294 { 0xffffffff, 0x00000} 13295 }, mem_tbl_5755[] = { 13296 { 0x00000200, 0x00008}, 13297 { 0x00004000, 0x00800}, 13298 { 0x00006000, 0x00800}, 13299 { 0x00008000, 0x02000}, 13300 { 0x00010000, 0x0c000}, 13301 { 0xffffffff, 0x00000} 13302 }, mem_tbl_5906[] = { 13303 { 0x00000200, 0x00008}, 13304 { 0x00004000, 0x00400}, 13305 { 0x00006000, 0x00400}, 13306 { 0x00008000, 0x01000}, 13307 { 0x00010000, 0x01000}, 13308 { 0xffffffff, 0x00000} 13309 }, mem_tbl_5717[] = { 13310 { 0x00000200, 0x00008}, 13311 { 0x00010000, 0x0a000}, 13312 { 0x00020000, 0x13c00}, 13313 { 0xffffffff, 0x00000} 13314 }, mem_tbl_57765[] = { 13315 { 0x00000200, 0x00008}, 13316 { 0x00004000, 0x00800}, 13317 { 0x00006000, 0x09800}, 13318 { 0x00010000, 0x0a000}, 13319 { 0xffffffff, 0x00000} 13320 }; 13321 struct mem_entry *mem_tbl; 13322 int err = 0; 13323 int i; 13324 13325 if (tg3_flag(tp, 5717_PLUS)) 13326 mem_tbl = mem_tbl_5717; 13327 else if (tg3_flag(tp, 57765_CLASS) || 13328 tg3_asic_rev(tp) == ASIC_REV_5762) 13329 mem_tbl = mem_tbl_57765; 13330 else if (tg3_flag(tp, 5755_PLUS)) 13331 mem_tbl = mem_tbl_5755; 13332 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 13333 mem_tbl = mem_tbl_5906; 13334 else if (tg3_flag(tp, 5705_PLUS)) 13335 mem_tbl = mem_tbl_5705; 13336 else 13337 mem_tbl = mem_tbl_570x; 13338 13339 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { 13340 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); 13341 if (err) 13342 break; 13343 } 13344 13345 return err; 13346 } 13347 13348 #define TG3_TSO_MSS 500 13349 13350 #define TG3_TSO_IP_HDR_LEN 20 13351 #define TG3_TSO_TCP_HDR_LEN 20 13352 #define TG3_TSO_TCP_OPT_LEN 12 13353 13354 static const u8 tg3_tso_header[] = { 13355 0x08, 0x00, 13356 0x45, 0x00, 0x00, 0x00, 13357 0x00, 0x00, 0x40, 0x00, 13358 0x40, 0x06, 0x00, 0x00, 13359 0x0a, 0x00, 0x00, 0x01, 13360 0x0a, 0x00, 0x00, 0x02, 13361 0x0d, 0x00, 0xe0, 0x00, 13362 0x00, 0x00, 0x01, 0x00, 13363 0x00, 0x00, 0x02, 0x00, 13364 0x80, 0x10, 0x10, 0x00, 13365 0x14, 0x09, 0x00, 0x00, 13366 0x01, 0x01, 0x08, 0x0a, 13367 0x11, 0x11, 0x11, 0x11, 13368 0x11, 0x11, 0x11, 0x11, 13369 }; 13370 13371 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) 13372 { 13373 u32 rx_start_idx, rx_idx, tx_idx, opaque_key; 13374 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; 13375 u32 budget; 13376 struct sk_buff *skb; 13377 u8 *tx_data, *rx_data; 13378 dma_addr_t map; 13379 int num_pkts, tx_len, rx_len, i, err; 13380 struct tg3_rx_buffer_desc *desc; 13381 struct tg3_napi *tnapi, *rnapi; 13382 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 13383 13384 tnapi = &tp->napi[0]; 13385 rnapi = &tp->napi[0]; 13386 if (tp->irq_cnt > 1) { 13387 if (tg3_flag(tp, ENABLE_RSS)) 13388 rnapi = &tp->napi[1]; 13389 if (tg3_flag(tp, ENABLE_TSS)) 13390 tnapi = &tp->napi[1]; 13391 } 13392 coal_now = tnapi->coal_now | rnapi->coal_now; 13393 13394 err = -EIO; 13395 13396 tx_len = pktsz; 13397 skb = netdev_alloc_skb(tp->dev, tx_len); 13398 if (!skb) 13399 return -ENOMEM; 13400 13401 tx_data = skb_put(skb, tx_len); 13402 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN); 13403 memset(tx_data + ETH_ALEN, 0x0, 8); 13404 13405 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); 13406 13407 if (tso_loopback) { 13408 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; 13409 13410 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + 13411 TG3_TSO_TCP_OPT_LEN; 13412 13413 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, 13414 sizeof(tg3_tso_header)); 13415 mss = TG3_TSO_MSS; 13416 13417 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); 13418 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); 13419 13420 /* Set the total length field in the IP header */ 13421 iph->tot_len = htons((u16)(mss + hdr_len)); 13422 13423 base_flags = (TXD_FLAG_CPU_PRE_DMA | 13424 TXD_FLAG_CPU_POST_DMA); 13425 13426 if (tg3_flag(tp, HW_TSO_1) || 13427 tg3_flag(tp, HW_TSO_2) || 13428 tg3_flag(tp, HW_TSO_3)) { 13429 struct tcphdr *th; 13430 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; 13431 th = (struct tcphdr *)&tx_data[val]; 13432 th->check = 0; 13433 } else 13434 base_flags |= TXD_FLAG_TCPUDP_CSUM; 13435 13436 if (tg3_flag(tp, HW_TSO_3)) { 13437 mss |= (hdr_len & 0xc) << 12; 13438 if (hdr_len & 0x10) 13439 base_flags |= 0x00000010; 13440 base_flags |= (hdr_len & 0x3e0) << 5; 13441 } else if (tg3_flag(tp, HW_TSO_2)) 13442 mss |= hdr_len << 9; 13443 else if (tg3_flag(tp, HW_TSO_1) || 13444 tg3_asic_rev(tp) == ASIC_REV_5705) { 13445 mss |= (TG3_TSO_TCP_OPT_LEN << 9); 13446 } else { 13447 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); 13448 } 13449 13450 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); 13451 } else { 13452 num_pkts = 1; 13453 data_off = ETH_HLEN; 13454 13455 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 13456 tx_len > VLAN_ETH_FRAME_LEN) 13457 base_flags |= TXD_FLAG_JMB_PKT; 13458 } 13459 13460 for (i = data_off; i < tx_len; i++) 13461 tx_data[i] = (u8) (i & 0xff); 13462 13463 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE); 13464 if (dma_mapping_error(&tp->pdev->dev, map)) { 13465 dev_kfree_skb(skb); 13466 return -EIO; 13467 } 13468 13469 val = tnapi->tx_prod; 13470 tnapi->tx_buffers[val].skb = skb; 13471 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); 13472 13473 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13474 rnapi->coal_now); 13475 13476 udelay(10); 13477 13478 rx_start_idx = rnapi->hw_status->idx[0].rx_producer; 13479 13480 budget = tg3_tx_avail(tnapi); 13481 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, 13482 base_flags | TXD_FLAG_END, mss, 0)) { 13483 tnapi->tx_buffers[val].skb = NULL; 13484 dev_kfree_skb(skb); 13485 return -EIO; 13486 } 13487 13488 tnapi->tx_prod++; 13489 13490 /* Sync BD data before updating mailbox */ 13491 wmb(); 13492 13493 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); 13494 tr32_mailbox(tnapi->prodmbox); 13495 13496 udelay(10); 13497 13498 /* 350 usec to allow enough time on some 10/100 Mbps devices. */ 13499 for (i = 0; i < 35; i++) { 13500 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13501 coal_now); 13502 13503 udelay(10); 13504 13505 tx_idx = tnapi->hw_status->idx[0].tx_consumer; 13506 rx_idx = rnapi->hw_status->idx[0].rx_producer; 13507 if ((tx_idx == tnapi->tx_prod) && 13508 (rx_idx == (rx_start_idx + num_pkts))) 13509 break; 13510 } 13511 13512 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1); 13513 dev_kfree_skb(skb); 13514 13515 if (tx_idx != tnapi->tx_prod) 13516 goto out; 13517 13518 if (rx_idx != rx_start_idx + num_pkts) 13519 goto out; 13520 13521 val = data_off; 13522 while (rx_idx != rx_start_idx) { 13523 desc = &rnapi->rx_rcb[rx_start_idx++]; 13524 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 13525 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 13526 13527 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 13528 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) 13529 goto out; 13530 13531 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) 13532 - ETH_FCS_LEN; 13533 13534 if (!tso_loopback) { 13535 if (rx_len != tx_len) 13536 goto out; 13537 13538 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { 13539 if (opaque_key != RXD_OPAQUE_RING_STD) 13540 goto out; 13541 } else { 13542 if (opaque_key != RXD_OPAQUE_RING_JUMBO) 13543 goto out; 13544 } 13545 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 13546 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 13547 >> RXD_TCPCSUM_SHIFT != 0xffff) { 13548 goto out; 13549 } 13550 13551 if (opaque_key == RXD_OPAQUE_RING_STD) { 13552 rx_data = tpr->rx_std_buffers[desc_idx].data; 13553 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], 13554 mapping); 13555 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 13556 rx_data = tpr->rx_jmb_buffers[desc_idx].data; 13557 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], 13558 mapping); 13559 } else 13560 goto out; 13561 13562 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len, 13563 DMA_FROM_DEVICE); 13564 13565 rx_data += TG3_RX_OFFSET(tp); 13566 for (i = data_off; i < rx_len; i++, val++) { 13567 if (*(rx_data + i) != (u8) (val & 0xff)) 13568 goto out; 13569 } 13570 } 13571 13572 err = 0; 13573 13574 /* tg3_free_rings will unmap and free the rx_data */ 13575 out: 13576 return err; 13577 } 13578 13579 #define TG3_STD_LOOPBACK_FAILED 1 13580 #define TG3_JMB_LOOPBACK_FAILED 2 13581 #define TG3_TSO_LOOPBACK_FAILED 4 13582 #define TG3_LOOPBACK_FAILED \ 13583 (TG3_STD_LOOPBACK_FAILED | \ 13584 TG3_JMB_LOOPBACK_FAILED | \ 13585 TG3_TSO_LOOPBACK_FAILED) 13586 13587 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk) 13588 { 13589 int err = -EIO; 13590 u32 eee_cap; 13591 u32 jmb_pkt_sz = 9000; 13592 13593 if (tp->dma_limit) 13594 jmb_pkt_sz = tp->dma_limit - ETH_HLEN; 13595 13596 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; 13597 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 13598 13599 if (!netif_running(tp->dev)) { 13600 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13601 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13602 if (do_extlpbk) 13603 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13604 goto done; 13605 } 13606 13607 err = tg3_reset_hw(tp, true); 13608 if (err) { 13609 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13610 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13611 if (do_extlpbk) 13612 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13613 goto done; 13614 } 13615 13616 if (tg3_flag(tp, ENABLE_RSS)) { 13617 int i; 13618 13619 /* Reroute all rx packets to the 1st queue */ 13620 for (i = MAC_RSS_INDIR_TBL_0; 13621 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) 13622 tw32(i, 0x0); 13623 } 13624 13625 /* HW errata - mac loopback fails in some cases on 5780. 13626 * Normal traffic and PHY loopback are not affected by 13627 * errata. Also, the MAC loopback test is deprecated for 13628 * all newer ASIC revisions. 13629 */ 13630 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 13631 !tg3_flag(tp, CPMU_PRESENT)) { 13632 tg3_mac_loopback(tp, true); 13633 13634 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13635 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13636 13637 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13638 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13639 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13640 13641 tg3_mac_loopback(tp, false); 13642 } 13643 13644 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 13645 !tg3_flag(tp, USE_PHYLIB)) { 13646 int i; 13647 13648 tg3_phy_lpbk_set(tp, 0, false); 13649 13650 /* Wait for link */ 13651 for (i = 0; i < 100; i++) { 13652 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 13653 break; 13654 mdelay(1); 13655 } 13656 13657 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13658 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13659 if (tg3_flag(tp, TSO_CAPABLE) && 13660 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13661 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED; 13662 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13663 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13664 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13665 13666 if (do_extlpbk) { 13667 tg3_phy_lpbk_set(tp, 0, true); 13668 13669 /* All link indications report up, but the hardware 13670 * isn't really ready for about 20 msec. Double it 13671 * to be sure. 13672 */ 13673 mdelay(40); 13674 13675 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13676 data[TG3_EXT_LOOPB_TEST] |= 13677 TG3_STD_LOOPBACK_FAILED; 13678 if (tg3_flag(tp, TSO_CAPABLE) && 13679 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13680 data[TG3_EXT_LOOPB_TEST] |= 13681 TG3_TSO_LOOPBACK_FAILED; 13682 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13683 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13684 data[TG3_EXT_LOOPB_TEST] |= 13685 TG3_JMB_LOOPBACK_FAILED; 13686 } 13687 13688 /* Re-enable gphy autopowerdown. */ 13689 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 13690 tg3_phy_toggle_apd(tp, true); 13691 } 13692 13693 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] | 13694 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0; 13695 13696 done: 13697 tp->phy_flags |= eee_cap; 13698 13699 return err; 13700 } 13701 13702 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, 13703 u64 *data) 13704 { 13705 struct tg3 *tp = netdev_priv(dev); 13706 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; 13707 13708 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 13709 if (tg3_power_up(tp)) { 13710 etest->flags |= ETH_TEST_FL_FAILED; 13711 memset(data, 1, sizeof(u64) * TG3_NUM_TEST); 13712 return; 13713 } 13714 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 13715 } 13716 13717 memset(data, 0, sizeof(u64) * TG3_NUM_TEST); 13718 13719 if (tg3_test_nvram(tp) != 0) { 13720 etest->flags |= ETH_TEST_FL_FAILED; 13721 data[TG3_NVRAM_TEST] = 1; 13722 } 13723 if (!doextlpbk && tg3_test_link(tp)) { 13724 etest->flags |= ETH_TEST_FL_FAILED; 13725 data[TG3_LINK_TEST] = 1; 13726 } 13727 if (etest->flags & ETH_TEST_FL_OFFLINE) { 13728 int err, err2 = 0, irq_sync = 0; 13729 13730 if (netif_running(dev)) { 13731 tg3_phy_stop(tp); 13732 tg3_netif_stop(tp); 13733 irq_sync = 1; 13734 } 13735 13736 tg3_full_lock(tp, irq_sync); 13737 tg3_halt(tp, RESET_KIND_SUSPEND, 1); 13738 err = tg3_nvram_lock(tp); 13739 tg3_halt_cpu(tp, RX_CPU_BASE); 13740 if (!tg3_flag(tp, 5705_PLUS)) 13741 tg3_halt_cpu(tp, TX_CPU_BASE); 13742 if (!err) 13743 tg3_nvram_unlock(tp); 13744 13745 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 13746 tg3_phy_reset(tp); 13747 13748 if (tg3_test_registers(tp) != 0) { 13749 etest->flags |= ETH_TEST_FL_FAILED; 13750 data[TG3_REGISTER_TEST] = 1; 13751 } 13752 13753 if (tg3_test_memory(tp) != 0) { 13754 etest->flags |= ETH_TEST_FL_FAILED; 13755 data[TG3_MEMORY_TEST] = 1; 13756 } 13757 13758 if (doextlpbk) 13759 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 13760 13761 if (tg3_test_loopback(tp, data, doextlpbk)) 13762 etest->flags |= ETH_TEST_FL_FAILED; 13763 13764 tg3_full_unlock(tp); 13765 13766 if (tg3_test_interrupt(tp) != 0) { 13767 etest->flags |= ETH_TEST_FL_FAILED; 13768 data[TG3_INTERRUPT_TEST] = 1; 13769 } 13770 13771 tg3_full_lock(tp, 0); 13772 13773 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 13774 if (netif_running(dev)) { 13775 tg3_flag_set(tp, INIT_COMPLETE); 13776 err2 = tg3_restart_hw(tp, true); 13777 if (!err2) 13778 tg3_netif_start(tp); 13779 } 13780 13781 tg3_full_unlock(tp); 13782 13783 if (irq_sync && !err2) 13784 tg3_phy_start(tp); 13785 } 13786 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 13787 tg3_power_down_prepare(tp); 13788 13789 } 13790 13791 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 13792 { 13793 struct tg3 *tp = netdev_priv(dev); 13794 struct hwtstamp_config stmpconf; 13795 13796 if (!tg3_flag(tp, PTP_CAPABLE)) 13797 return -EOPNOTSUPP; 13798 13799 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) 13800 return -EFAULT; 13801 13802 if (stmpconf.tx_type != HWTSTAMP_TX_ON && 13803 stmpconf.tx_type != HWTSTAMP_TX_OFF) 13804 return -ERANGE; 13805 13806 switch (stmpconf.rx_filter) { 13807 case HWTSTAMP_FILTER_NONE: 13808 tp->rxptpctl = 0; 13809 break; 13810 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 13811 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13812 TG3_RX_PTP_CTL_ALL_V1_EVENTS; 13813 break; 13814 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 13815 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13816 TG3_RX_PTP_CTL_SYNC_EVNT; 13817 break; 13818 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 13819 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13820 TG3_RX_PTP_CTL_DELAY_REQ; 13821 break; 13822 case HWTSTAMP_FILTER_PTP_V2_EVENT: 13823 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13824 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13825 break; 13826 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 13827 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13828 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13829 break; 13830 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 13831 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13832 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13833 break; 13834 case HWTSTAMP_FILTER_PTP_V2_SYNC: 13835 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13836 TG3_RX_PTP_CTL_SYNC_EVNT; 13837 break; 13838 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 13839 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13840 TG3_RX_PTP_CTL_SYNC_EVNT; 13841 break; 13842 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 13843 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13844 TG3_RX_PTP_CTL_SYNC_EVNT; 13845 break; 13846 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 13847 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13848 TG3_RX_PTP_CTL_DELAY_REQ; 13849 break; 13850 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 13851 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13852 TG3_RX_PTP_CTL_DELAY_REQ; 13853 break; 13854 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 13855 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13856 TG3_RX_PTP_CTL_DELAY_REQ; 13857 break; 13858 default: 13859 return -ERANGE; 13860 } 13861 13862 if (netif_running(dev) && tp->rxptpctl) 13863 tw32(TG3_RX_PTP_CTL, 13864 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 13865 13866 if (stmpconf.tx_type == HWTSTAMP_TX_ON) 13867 tg3_flag_set(tp, TX_TSTAMP_EN); 13868 else 13869 tg3_flag_clear(tp, TX_TSTAMP_EN); 13870 13871 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13872 -EFAULT : 0; 13873 } 13874 13875 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 13876 { 13877 struct tg3 *tp = netdev_priv(dev); 13878 struct hwtstamp_config stmpconf; 13879 13880 if (!tg3_flag(tp, PTP_CAPABLE)) 13881 return -EOPNOTSUPP; 13882 13883 stmpconf.flags = 0; 13884 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ? 13885 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF); 13886 13887 switch (tp->rxptpctl) { 13888 case 0: 13889 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE; 13890 break; 13891 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS: 13892 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 13893 break; 13894 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13895 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 13896 break; 13897 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13898 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 13899 break; 13900 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13901 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 13902 break; 13903 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13904 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 13905 break; 13906 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13907 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 13908 break; 13909 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13910 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 13911 break; 13912 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13913 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC; 13914 break; 13915 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13916 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 13917 break; 13918 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13919 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 13920 break; 13921 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13922 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ; 13923 break; 13924 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13925 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 13926 break; 13927 default: 13928 WARN_ON_ONCE(1); 13929 return -ERANGE; 13930 } 13931 13932 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13933 -EFAULT : 0; 13934 } 13935 13936 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 13937 { 13938 struct mii_ioctl_data *data = if_mii(ifr); 13939 struct tg3 *tp = netdev_priv(dev); 13940 int err; 13941 13942 if (tg3_flag(tp, USE_PHYLIB)) { 13943 struct phy_device *phydev; 13944 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 13945 return -EAGAIN; 13946 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 13947 return phy_mii_ioctl(phydev, ifr, cmd); 13948 } 13949 13950 switch (cmd) { 13951 case SIOCGMIIPHY: 13952 data->phy_id = tp->phy_addr; 13953 13954 fallthrough; 13955 case SIOCGMIIREG: { 13956 u32 mii_regval; 13957 13958 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 13959 break; /* We have no PHY */ 13960 13961 if (!netif_running(dev)) 13962 return -EAGAIN; 13963 13964 spin_lock_bh(&tp->lock); 13965 err = __tg3_readphy(tp, data->phy_id & 0x1f, 13966 data->reg_num & 0x1f, &mii_regval); 13967 spin_unlock_bh(&tp->lock); 13968 13969 data->val_out = mii_regval; 13970 13971 return err; 13972 } 13973 13974 case SIOCSMIIREG: 13975 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 13976 break; /* We have no PHY */ 13977 13978 if (!netif_running(dev)) 13979 return -EAGAIN; 13980 13981 spin_lock_bh(&tp->lock); 13982 err = __tg3_writephy(tp, data->phy_id & 0x1f, 13983 data->reg_num & 0x1f, data->val_in); 13984 spin_unlock_bh(&tp->lock); 13985 13986 return err; 13987 13988 case SIOCSHWTSTAMP: 13989 return tg3_hwtstamp_set(dev, ifr); 13990 13991 case SIOCGHWTSTAMP: 13992 return tg3_hwtstamp_get(dev, ifr); 13993 13994 default: 13995 /* do nothing */ 13996 break; 13997 } 13998 return -EOPNOTSUPP; 13999 } 14000 14001 static int tg3_get_coalesce(struct net_device *dev, 14002 struct ethtool_coalesce *ec, 14003 struct kernel_ethtool_coalesce *kernel_coal, 14004 struct netlink_ext_ack *extack) 14005 { 14006 struct tg3 *tp = netdev_priv(dev); 14007 14008 memcpy(ec, &tp->coal, sizeof(*ec)); 14009 return 0; 14010 } 14011 14012 static int tg3_set_coalesce(struct net_device *dev, 14013 struct ethtool_coalesce *ec, 14014 struct kernel_ethtool_coalesce *kernel_coal, 14015 struct netlink_ext_ack *extack) 14016 { 14017 struct tg3 *tp = netdev_priv(dev); 14018 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; 14019 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; 14020 14021 if (!tg3_flag(tp, 5705_PLUS)) { 14022 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; 14023 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; 14024 max_stat_coal_ticks = MAX_STAT_COAL_TICKS; 14025 min_stat_coal_ticks = MIN_STAT_COAL_TICKS; 14026 } 14027 14028 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || 14029 (!ec->rx_coalesce_usecs) || 14030 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || 14031 (!ec->tx_coalesce_usecs) || 14032 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || 14033 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || 14034 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || 14035 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || 14036 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || 14037 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || 14038 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || 14039 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) 14040 return -EINVAL; 14041 14042 /* Only copy relevant parameters, ignore all others. */ 14043 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; 14044 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; 14045 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; 14046 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; 14047 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; 14048 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; 14049 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; 14050 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; 14051 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; 14052 14053 if (netif_running(dev)) { 14054 tg3_full_lock(tp, 0); 14055 __tg3_set_coalesce(tp, &tp->coal); 14056 tg3_full_unlock(tp); 14057 } 14058 return 0; 14059 } 14060 14061 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) 14062 { 14063 struct tg3 *tp = netdev_priv(dev); 14064 14065 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14066 netdev_warn(tp->dev, "Board does not support EEE!\n"); 14067 return -EOPNOTSUPP; 14068 } 14069 14070 if (edata->advertised != tp->eee.advertised) { 14071 netdev_warn(tp->dev, 14072 "Direct manipulation of EEE advertisement is not supported\n"); 14073 return -EINVAL; 14074 } 14075 14076 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) { 14077 netdev_warn(tp->dev, 14078 "Maximal Tx Lpi timer supported is %#x(u)\n", 14079 TG3_CPMU_DBTMR1_LNKIDLE_MAX); 14080 return -EINVAL; 14081 } 14082 14083 tp->eee = *edata; 14084 14085 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 14086 tg3_warn_mgmt_link_flap(tp); 14087 14088 if (netif_running(tp->dev)) { 14089 tg3_full_lock(tp, 0); 14090 tg3_setup_eee(tp); 14091 tg3_phy_reset(tp); 14092 tg3_full_unlock(tp); 14093 } 14094 14095 return 0; 14096 } 14097 14098 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata) 14099 { 14100 struct tg3 *tp = netdev_priv(dev); 14101 14102 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14103 netdev_warn(tp->dev, 14104 "Board does not support EEE!\n"); 14105 return -EOPNOTSUPP; 14106 } 14107 14108 *edata = tp->eee; 14109 return 0; 14110 } 14111 14112 static const struct ethtool_ops tg3_ethtool_ops = { 14113 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 14114 ETHTOOL_COALESCE_MAX_FRAMES | 14115 ETHTOOL_COALESCE_USECS_IRQ | 14116 ETHTOOL_COALESCE_MAX_FRAMES_IRQ | 14117 ETHTOOL_COALESCE_STATS_BLOCK_USECS, 14118 .get_drvinfo = tg3_get_drvinfo, 14119 .get_regs_len = tg3_get_regs_len, 14120 .get_regs = tg3_get_regs, 14121 .get_wol = tg3_get_wol, 14122 .set_wol = tg3_set_wol, 14123 .get_msglevel = tg3_get_msglevel, 14124 .set_msglevel = tg3_set_msglevel, 14125 .nway_reset = tg3_nway_reset, 14126 .get_link = ethtool_op_get_link, 14127 .get_eeprom_len = tg3_get_eeprom_len, 14128 .get_eeprom = tg3_get_eeprom, 14129 .set_eeprom = tg3_set_eeprom, 14130 .get_ringparam = tg3_get_ringparam, 14131 .set_ringparam = tg3_set_ringparam, 14132 .get_pauseparam = tg3_get_pauseparam, 14133 .set_pauseparam = tg3_set_pauseparam, 14134 .self_test = tg3_self_test, 14135 .get_strings = tg3_get_strings, 14136 .set_phys_id = tg3_set_phys_id, 14137 .get_ethtool_stats = tg3_get_ethtool_stats, 14138 .get_coalesce = tg3_get_coalesce, 14139 .set_coalesce = tg3_set_coalesce, 14140 .get_sset_count = tg3_get_sset_count, 14141 .get_rxnfc = tg3_get_rxnfc, 14142 .get_rxfh_indir_size = tg3_get_rxfh_indir_size, 14143 .get_rxfh = tg3_get_rxfh, 14144 .set_rxfh = tg3_set_rxfh, 14145 .get_channels = tg3_get_channels, 14146 .set_channels = tg3_set_channels, 14147 .get_ts_info = tg3_get_ts_info, 14148 .get_eee = tg3_get_eee, 14149 .set_eee = tg3_set_eee, 14150 .get_link_ksettings = tg3_get_link_ksettings, 14151 .set_link_ksettings = tg3_set_link_ksettings, 14152 }; 14153 14154 static void tg3_get_stats64(struct net_device *dev, 14155 struct rtnl_link_stats64 *stats) 14156 { 14157 struct tg3 *tp = netdev_priv(dev); 14158 14159 spin_lock_bh(&tp->lock); 14160 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) { 14161 *stats = tp->net_stats_prev; 14162 spin_unlock_bh(&tp->lock); 14163 return; 14164 } 14165 14166 tg3_get_nstats(tp, stats); 14167 spin_unlock_bh(&tp->lock); 14168 } 14169 14170 static void tg3_set_rx_mode(struct net_device *dev) 14171 { 14172 struct tg3 *tp = netdev_priv(dev); 14173 14174 if (!netif_running(dev)) 14175 return; 14176 14177 tg3_full_lock(tp, 0); 14178 __tg3_set_rx_mode(dev); 14179 tg3_full_unlock(tp); 14180 } 14181 14182 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, 14183 int new_mtu) 14184 { 14185 dev->mtu = new_mtu; 14186 14187 if (new_mtu > ETH_DATA_LEN) { 14188 if (tg3_flag(tp, 5780_CLASS)) { 14189 netdev_update_features(dev); 14190 tg3_flag_clear(tp, TSO_CAPABLE); 14191 } else { 14192 tg3_flag_set(tp, JUMBO_RING_ENABLE); 14193 } 14194 } else { 14195 if (tg3_flag(tp, 5780_CLASS)) { 14196 tg3_flag_set(tp, TSO_CAPABLE); 14197 netdev_update_features(dev); 14198 } 14199 tg3_flag_clear(tp, JUMBO_RING_ENABLE); 14200 } 14201 } 14202 14203 static int tg3_change_mtu(struct net_device *dev, int new_mtu) 14204 { 14205 struct tg3 *tp = netdev_priv(dev); 14206 int err; 14207 bool reset_phy = false; 14208 14209 if (!netif_running(dev)) { 14210 /* We'll just catch it later when the 14211 * device is up'd. 14212 */ 14213 tg3_set_mtu(dev, tp, new_mtu); 14214 return 0; 14215 } 14216 14217 tg3_phy_stop(tp); 14218 14219 tg3_netif_stop(tp); 14220 14221 tg3_set_mtu(dev, tp, new_mtu); 14222 14223 tg3_full_lock(tp, 1); 14224 14225 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 14226 14227 /* Reset PHY, otherwise the read DMA engine will be in a mode that 14228 * breaks all requests to 256 bytes. 14229 */ 14230 if (tg3_asic_rev(tp) == ASIC_REV_57766 || 14231 tg3_asic_rev(tp) == ASIC_REV_5717 || 14232 tg3_asic_rev(tp) == ASIC_REV_5719 || 14233 tg3_asic_rev(tp) == ASIC_REV_5720) 14234 reset_phy = true; 14235 14236 err = tg3_restart_hw(tp, reset_phy); 14237 14238 if (!err) 14239 tg3_netif_start(tp); 14240 14241 tg3_full_unlock(tp); 14242 14243 if (!err) 14244 tg3_phy_start(tp); 14245 14246 return err; 14247 } 14248 14249 static const struct net_device_ops tg3_netdev_ops = { 14250 .ndo_open = tg3_open, 14251 .ndo_stop = tg3_close, 14252 .ndo_start_xmit = tg3_start_xmit, 14253 .ndo_get_stats64 = tg3_get_stats64, 14254 .ndo_validate_addr = eth_validate_addr, 14255 .ndo_set_rx_mode = tg3_set_rx_mode, 14256 .ndo_set_mac_address = tg3_set_mac_addr, 14257 .ndo_eth_ioctl = tg3_ioctl, 14258 .ndo_tx_timeout = tg3_tx_timeout, 14259 .ndo_change_mtu = tg3_change_mtu, 14260 .ndo_fix_features = tg3_fix_features, 14261 .ndo_set_features = tg3_set_features, 14262 #ifdef CONFIG_NET_POLL_CONTROLLER 14263 .ndo_poll_controller = tg3_poll_controller, 14264 #endif 14265 }; 14266 14267 static void tg3_get_eeprom_size(struct tg3 *tp) 14268 { 14269 u32 cursize, val, magic; 14270 14271 tp->nvram_size = EEPROM_CHIP_SIZE; 14272 14273 if (tg3_nvram_read(tp, 0, &magic) != 0) 14274 return; 14275 14276 if ((magic != TG3_EEPROM_MAGIC) && 14277 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && 14278 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) 14279 return; 14280 14281 /* 14282 * Size the chip by reading offsets at increasing powers of two. 14283 * When we encounter our validation signature, we know the addressing 14284 * has wrapped around, and thus have our chip size. 14285 */ 14286 cursize = 0x10; 14287 14288 while (cursize < tp->nvram_size) { 14289 if (tg3_nvram_read(tp, cursize, &val) != 0) 14290 return; 14291 14292 if (val == magic) 14293 break; 14294 14295 cursize <<= 1; 14296 } 14297 14298 tp->nvram_size = cursize; 14299 } 14300 14301 static void tg3_get_nvram_size(struct tg3 *tp) 14302 { 14303 u32 val; 14304 14305 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) 14306 return; 14307 14308 /* Selfboot format */ 14309 if (val != TG3_EEPROM_MAGIC) { 14310 tg3_get_eeprom_size(tp); 14311 return; 14312 } 14313 14314 if (tg3_nvram_read(tp, 0xf0, &val) == 0) { 14315 if (val != 0) { 14316 /* This is confusing. We want to operate on the 14317 * 16-bit value at offset 0xf2. The tg3_nvram_read() 14318 * call will read from NVRAM and byteswap the data 14319 * according to the byteswapping settings for all 14320 * other register accesses. This ensures the data we 14321 * want will always reside in the lower 16-bits. 14322 * However, the data in NVRAM is in LE format, which 14323 * means the data from the NVRAM read will always be 14324 * opposite the endianness of the CPU. The 16-bit 14325 * byteswap then brings the data to CPU endianness. 14326 */ 14327 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; 14328 return; 14329 } 14330 } 14331 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14332 } 14333 14334 static void tg3_get_nvram_info(struct tg3 *tp) 14335 { 14336 u32 nvcfg1; 14337 14338 nvcfg1 = tr32(NVRAM_CFG1); 14339 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { 14340 tg3_flag_set(tp, FLASH); 14341 } else { 14342 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14343 tw32(NVRAM_CFG1, nvcfg1); 14344 } 14345 14346 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 14347 tg3_flag(tp, 5780_CLASS)) { 14348 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { 14349 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: 14350 tp->nvram_jedecnum = JEDEC_ATMEL; 14351 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14352 tg3_flag_set(tp, NVRAM_BUFFERED); 14353 break; 14354 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: 14355 tp->nvram_jedecnum = JEDEC_ATMEL; 14356 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; 14357 break; 14358 case FLASH_VENDOR_ATMEL_EEPROM: 14359 tp->nvram_jedecnum = JEDEC_ATMEL; 14360 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14361 tg3_flag_set(tp, NVRAM_BUFFERED); 14362 break; 14363 case FLASH_VENDOR_ST: 14364 tp->nvram_jedecnum = JEDEC_ST; 14365 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; 14366 tg3_flag_set(tp, NVRAM_BUFFERED); 14367 break; 14368 case FLASH_VENDOR_SAIFUN: 14369 tp->nvram_jedecnum = JEDEC_SAIFUN; 14370 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; 14371 break; 14372 case FLASH_VENDOR_SST_SMALL: 14373 case FLASH_VENDOR_SST_LARGE: 14374 tp->nvram_jedecnum = JEDEC_SST; 14375 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; 14376 break; 14377 } 14378 } else { 14379 tp->nvram_jedecnum = JEDEC_ATMEL; 14380 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14381 tg3_flag_set(tp, NVRAM_BUFFERED); 14382 } 14383 } 14384 14385 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) 14386 { 14387 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { 14388 case FLASH_5752PAGE_SIZE_256: 14389 tp->nvram_pagesize = 256; 14390 break; 14391 case FLASH_5752PAGE_SIZE_512: 14392 tp->nvram_pagesize = 512; 14393 break; 14394 case FLASH_5752PAGE_SIZE_1K: 14395 tp->nvram_pagesize = 1024; 14396 break; 14397 case FLASH_5752PAGE_SIZE_2K: 14398 tp->nvram_pagesize = 2048; 14399 break; 14400 case FLASH_5752PAGE_SIZE_4K: 14401 tp->nvram_pagesize = 4096; 14402 break; 14403 case FLASH_5752PAGE_SIZE_264: 14404 tp->nvram_pagesize = 264; 14405 break; 14406 case FLASH_5752PAGE_SIZE_528: 14407 tp->nvram_pagesize = 528; 14408 break; 14409 } 14410 } 14411 14412 static void tg3_get_5752_nvram_info(struct tg3 *tp) 14413 { 14414 u32 nvcfg1; 14415 14416 nvcfg1 = tr32(NVRAM_CFG1); 14417 14418 /* NVRAM protection for TPM */ 14419 if (nvcfg1 & (1 << 27)) 14420 tg3_flag_set(tp, PROTECTED_NVRAM); 14421 14422 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14423 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 14424 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: 14425 tp->nvram_jedecnum = JEDEC_ATMEL; 14426 tg3_flag_set(tp, NVRAM_BUFFERED); 14427 break; 14428 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14429 tp->nvram_jedecnum = JEDEC_ATMEL; 14430 tg3_flag_set(tp, NVRAM_BUFFERED); 14431 tg3_flag_set(tp, FLASH); 14432 break; 14433 case FLASH_5752VENDOR_ST_M45PE10: 14434 case FLASH_5752VENDOR_ST_M45PE20: 14435 case FLASH_5752VENDOR_ST_M45PE40: 14436 tp->nvram_jedecnum = JEDEC_ST; 14437 tg3_flag_set(tp, NVRAM_BUFFERED); 14438 tg3_flag_set(tp, FLASH); 14439 break; 14440 } 14441 14442 if (tg3_flag(tp, FLASH)) { 14443 tg3_nvram_get_pagesize(tp, nvcfg1); 14444 } else { 14445 /* For eeprom, set pagesize to maximum eeprom size */ 14446 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14447 14448 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14449 tw32(NVRAM_CFG1, nvcfg1); 14450 } 14451 } 14452 14453 static void tg3_get_5755_nvram_info(struct tg3 *tp) 14454 { 14455 u32 nvcfg1, protect = 0; 14456 14457 nvcfg1 = tr32(NVRAM_CFG1); 14458 14459 /* NVRAM protection for TPM */ 14460 if (nvcfg1 & (1 << 27)) { 14461 tg3_flag_set(tp, PROTECTED_NVRAM); 14462 protect = 1; 14463 } 14464 14465 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14466 switch (nvcfg1) { 14467 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14468 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14469 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14470 case FLASH_5755VENDOR_ATMEL_FLASH_5: 14471 tp->nvram_jedecnum = JEDEC_ATMEL; 14472 tg3_flag_set(tp, NVRAM_BUFFERED); 14473 tg3_flag_set(tp, FLASH); 14474 tp->nvram_pagesize = 264; 14475 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || 14476 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) 14477 tp->nvram_size = (protect ? 0x3e200 : 14478 TG3_NVRAM_SIZE_512KB); 14479 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) 14480 tp->nvram_size = (protect ? 0x1f200 : 14481 TG3_NVRAM_SIZE_256KB); 14482 else 14483 tp->nvram_size = (protect ? 0x1f200 : 14484 TG3_NVRAM_SIZE_128KB); 14485 break; 14486 case FLASH_5752VENDOR_ST_M45PE10: 14487 case FLASH_5752VENDOR_ST_M45PE20: 14488 case FLASH_5752VENDOR_ST_M45PE40: 14489 tp->nvram_jedecnum = JEDEC_ST; 14490 tg3_flag_set(tp, NVRAM_BUFFERED); 14491 tg3_flag_set(tp, FLASH); 14492 tp->nvram_pagesize = 256; 14493 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) 14494 tp->nvram_size = (protect ? 14495 TG3_NVRAM_SIZE_64KB : 14496 TG3_NVRAM_SIZE_128KB); 14497 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) 14498 tp->nvram_size = (protect ? 14499 TG3_NVRAM_SIZE_64KB : 14500 TG3_NVRAM_SIZE_256KB); 14501 else 14502 tp->nvram_size = (protect ? 14503 TG3_NVRAM_SIZE_128KB : 14504 TG3_NVRAM_SIZE_512KB); 14505 break; 14506 } 14507 } 14508 14509 static void tg3_get_5787_nvram_info(struct tg3 *tp) 14510 { 14511 u32 nvcfg1; 14512 14513 nvcfg1 = tr32(NVRAM_CFG1); 14514 14515 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14516 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: 14517 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14518 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: 14519 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14520 tp->nvram_jedecnum = JEDEC_ATMEL; 14521 tg3_flag_set(tp, NVRAM_BUFFERED); 14522 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14523 14524 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14525 tw32(NVRAM_CFG1, nvcfg1); 14526 break; 14527 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14528 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14529 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14530 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14531 tp->nvram_jedecnum = JEDEC_ATMEL; 14532 tg3_flag_set(tp, NVRAM_BUFFERED); 14533 tg3_flag_set(tp, FLASH); 14534 tp->nvram_pagesize = 264; 14535 break; 14536 case FLASH_5752VENDOR_ST_M45PE10: 14537 case FLASH_5752VENDOR_ST_M45PE20: 14538 case FLASH_5752VENDOR_ST_M45PE40: 14539 tp->nvram_jedecnum = JEDEC_ST; 14540 tg3_flag_set(tp, NVRAM_BUFFERED); 14541 tg3_flag_set(tp, FLASH); 14542 tp->nvram_pagesize = 256; 14543 break; 14544 } 14545 } 14546 14547 static void tg3_get_5761_nvram_info(struct tg3 *tp) 14548 { 14549 u32 nvcfg1, protect = 0; 14550 14551 nvcfg1 = tr32(NVRAM_CFG1); 14552 14553 /* NVRAM protection for TPM */ 14554 if (nvcfg1 & (1 << 27)) { 14555 tg3_flag_set(tp, PROTECTED_NVRAM); 14556 protect = 1; 14557 } 14558 14559 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14560 switch (nvcfg1) { 14561 case FLASH_5761VENDOR_ATMEL_ADB021D: 14562 case FLASH_5761VENDOR_ATMEL_ADB041D: 14563 case FLASH_5761VENDOR_ATMEL_ADB081D: 14564 case FLASH_5761VENDOR_ATMEL_ADB161D: 14565 case FLASH_5761VENDOR_ATMEL_MDB021D: 14566 case FLASH_5761VENDOR_ATMEL_MDB041D: 14567 case FLASH_5761VENDOR_ATMEL_MDB081D: 14568 case FLASH_5761VENDOR_ATMEL_MDB161D: 14569 tp->nvram_jedecnum = JEDEC_ATMEL; 14570 tg3_flag_set(tp, NVRAM_BUFFERED); 14571 tg3_flag_set(tp, FLASH); 14572 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14573 tp->nvram_pagesize = 256; 14574 break; 14575 case FLASH_5761VENDOR_ST_A_M45PE20: 14576 case FLASH_5761VENDOR_ST_A_M45PE40: 14577 case FLASH_5761VENDOR_ST_A_M45PE80: 14578 case FLASH_5761VENDOR_ST_A_M45PE16: 14579 case FLASH_5761VENDOR_ST_M_M45PE20: 14580 case FLASH_5761VENDOR_ST_M_M45PE40: 14581 case FLASH_5761VENDOR_ST_M_M45PE80: 14582 case FLASH_5761VENDOR_ST_M_M45PE16: 14583 tp->nvram_jedecnum = JEDEC_ST; 14584 tg3_flag_set(tp, NVRAM_BUFFERED); 14585 tg3_flag_set(tp, FLASH); 14586 tp->nvram_pagesize = 256; 14587 break; 14588 } 14589 14590 if (protect) { 14591 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); 14592 } else { 14593 switch (nvcfg1) { 14594 case FLASH_5761VENDOR_ATMEL_ADB161D: 14595 case FLASH_5761VENDOR_ATMEL_MDB161D: 14596 case FLASH_5761VENDOR_ST_A_M45PE16: 14597 case FLASH_5761VENDOR_ST_M_M45PE16: 14598 tp->nvram_size = TG3_NVRAM_SIZE_2MB; 14599 break; 14600 case FLASH_5761VENDOR_ATMEL_ADB081D: 14601 case FLASH_5761VENDOR_ATMEL_MDB081D: 14602 case FLASH_5761VENDOR_ST_A_M45PE80: 14603 case FLASH_5761VENDOR_ST_M_M45PE80: 14604 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14605 break; 14606 case FLASH_5761VENDOR_ATMEL_ADB041D: 14607 case FLASH_5761VENDOR_ATMEL_MDB041D: 14608 case FLASH_5761VENDOR_ST_A_M45PE40: 14609 case FLASH_5761VENDOR_ST_M_M45PE40: 14610 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14611 break; 14612 case FLASH_5761VENDOR_ATMEL_ADB021D: 14613 case FLASH_5761VENDOR_ATMEL_MDB021D: 14614 case FLASH_5761VENDOR_ST_A_M45PE20: 14615 case FLASH_5761VENDOR_ST_M_M45PE20: 14616 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14617 break; 14618 } 14619 } 14620 } 14621 14622 static void tg3_get_5906_nvram_info(struct tg3 *tp) 14623 { 14624 tp->nvram_jedecnum = JEDEC_ATMEL; 14625 tg3_flag_set(tp, NVRAM_BUFFERED); 14626 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14627 } 14628 14629 static void tg3_get_57780_nvram_info(struct tg3 *tp) 14630 { 14631 u32 nvcfg1; 14632 14633 nvcfg1 = tr32(NVRAM_CFG1); 14634 14635 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14636 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14637 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14638 tp->nvram_jedecnum = JEDEC_ATMEL; 14639 tg3_flag_set(tp, NVRAM_BUFFERED); 14640 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14641 14642 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14643 tw32(NVRAM_CFG1, nvcfg1); 14644 return; 14645 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14646 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14647 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14648 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14649 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14650 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14651 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14652 tp->nvram_jedecnum = JEDEC_ATMEL; 14653 tg3_flag_set(tp, NVRAM_BUFFERED); 14654 tg3_flag_set(tp, FLASH); 14655 14656 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14657 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14658 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14659 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14660 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14661 break; 14662 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14663 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14664 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14665 break; 14666 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14667 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14668 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14669 break; 14670 } 14671 break; 14672 case FLASH_5752VENDOR_ST_M45PE10: 14673 case FLASH_5752VENDOR_ST_M45PE20: 14674 case FLASH_5752VENDOR_ST_M45PE40: 14675 tp->nvram_jedecnum = JEDEC_ST; 14676 tg3_flag_set(tp, NVRAM_BUFFERED); 14677 tg3_flag_set(tp, FLASH); 14678 14679 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14680 case FLASH_5752VENDOR_ST_M45PE10: 14681 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14682 break; 14683 case FLASH_5752VENDOR_ST_M45PE20: 14684 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14685 break; 14686 case FLASH_5752VENDOR_ST_M45PE40: 14687 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14688 break; 14689 } 14690 break; 14691 default: 14692 tg3_flag_set(tp, NO_NVRAM); 14693 return; 14694 } 14695 14696 tg3_nvram_get_pagesize(tp, nvcfg1); 14697 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14698 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14699 } 14700 14701 14702 static void tg3_get_5717_nvram_info(struct tg3 *tp) 14703 { 14704 u32 nvcfg1; 14705 14706 nvcfg1 = tr32(NVRAM_CFG1); 14707 14708 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14709 case FLASH_5717VENDOR_ATMEL_EEPROM: 14710 case FLASH_5717VENDOR_MICRO_EEPROM: 14711 tp->nvram_jedecnum = JEDEC_ATMEL; 14712 tg3_flag_set(tp, NVRAM_BUFFERED); 14713 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14714 14715 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14716 tw32(NVRAM_CFG1, nvcfg1); 14717 return; 14718 case FLASH_5717VENDOR_ATMEL_MDB011D: 14719 case FLASH_5717VENDOR_ATMEL_ADB011B: 14720 case FLASH_5717VENDOR_ATMEL_ADB011D: 14721 case FLASH_5717VENDOR_ATMEL_MDB021D: 14722 case FLASH_5717VENDOR_ATMEL_ADB021B: 14723 case FLASH_5717VENDOR_ATMEL_ADB021D: 14724 case FLASH_5717VENDOR_ATMEL_45USPT: 14725 tp->nvram_jedecnum = JEDEC_ATMEL; 14726 tg3_flag_set(tp, NVRAM_BUFFERED); 14727 tg3_flag_set(tp, FLASH); 14728 14729 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14730 case FLASH_5717VENDOR_ATMEL_MDB021D: 14731 /* Detect size with tg3_nvram_get_size() */ 14732 break; 14733 case FLASH_5717VENDOR_ATMEL_ADB021B: 14734 case FLASH_5717VENDOR_ATMEL_ADB021D: 14735 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14736 break; 14737 default: 14738 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14739 break; 14740 } 14741 break; 14742 case FLASH_5717VENDOR_ST_M_M25PE10: 14743 case FLASH_5717VENDOR_ST_A_M25PE10: 14744 case FLASH_5717VENDOR_ST_M_M45PE10: 14745 case FLASH_5717VENDOR_ST_A_M45PE10: 14746 case FLASH_5717VENDOR_ST_M_M25PE20: 14747 case FLASH_5717VENDOR_ST_A_M25PE20: 14748 case FLASH_5717VENDOR_ST_M_M45PE20: 14749 case FLASH_5717VENDOR_ST_A_M45PE20: 14750 case FLASH_5717VENDOR_ST_25USPT: 14751 case FLASH_5717VENDOR_ST_45USPT: 14752 tp->nvram_jedecnum = JEDEC_ST; 14753 tg3_flag_set(tp, NVRAM_BUFFERED); 14754 tg3_flag_set(tp, FLASH); 14755 14756 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14757 case FLASH_5717VENDOR_ST_M_M25PE20: 14758 case FLASH_5717VENDOR_ST_M_M45PE20: 14759 /* Detect size with tg3_nvram_get_size() */ 14760 break; 14761 case FLASH_5717VENDOR_ST_A_M25PE20: 14762 case FLASH_5717VENDOR_ST_A_M45PE20: 14763 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14764 break; 14765 default: 14766 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14767 break; 14768 } 14769 break; 14770 default: 14771 tg3_flag_set(tp, NO_NVRAM); 14772 return; 14773 } 14774 14775 tg3_nvram_get_pagesize(tp, nvcfg1); 14776 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14777 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14778 } 14779 14780 static void tg3_get_5720_nvram_info(struct tg3 *tp) 14781 { 14782 u32 nvcfg1, nvmpinstrp, nv_status; 14783 14784 nvcfg1 = tr32(NVRAM_CFG1); 14785 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; 14786 14787 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14788 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) { 14789 tg3_flag_set(tp, NO_NVRAM); 14790 return; 14791 } 14792 14793 switch (nvmpinstrp) { 14794 case FLASH_5762_MX25L_100: 14795 case FLASH_5762_MX25L_200: 14796 case FLASH_5762_MX25L_400: 14797 case FLASH_5762_MX25L_800: 14798 case FLASH_5762_MX25L_160_320: 14799 tp->nvram_pagesize = 4096; 14800 tp->nvram_jedecnum = JEDEC_MACRONIX; 14801 tg3_flag_set(tp, NVRAM_BUFFERED); 14802 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14803 tg3_flag_set(tp, FLASH); 14804 nv_status = tr32(NVRAM_AUTOSENSE_STATUS); 14805 tp->nvram_size = 14806 (1 << (nv_status >> AUTOSENSE_DEVID & 14807 AUTOSENSE_DEVID_MASK) 14808 << AUTOSENSE_SIZE_IN_MB); 14809 return; 14810 14811 case FLASH_5762_EEPROM_HD: 14812 nvmpinstrp = FLASH_5720_EEPROM_HD; 14813 break; 14814 case FLASH_5762_EEPROM_LD: 14815 nvmpinstrp = FLASH_5720_EEPROM_LD; 14816 break; 14817 case FLASH_5720VENDOR_M_ST_M45PE20: 14818 /* This pinstrap supports multiple sizes, so force it 14819 * to read the actual size from location 0xf0. 14820 */ 14821 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT; 14822 break; 14823 } 14824 } 14825 14826 switch (nvmpinstrp) { 14827 case FLASH_5720_EEPROM_HD: 14828 case FLASH_5720_EEPROM_LD: 14829 tp->nvram_jedecnum = JEDEC_ATMEL; 14830 tg3_flag_set(tp, NVRAM_BUFFERED); 14831 14832 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14833 tw32(NVRAM_CFG1, nvcfg1); 14834 if (nvmpinstrp == FLASH_5720_EEPROM_HD) 14835 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14836 else 14837 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; 14838 return; 14839 case FLASH_5720VENDOR_M_ATMEL_DB011D: 14840 case FLASH_5720VENDOR_A_ATMEL_DB011B: 14841 case FLASH_5720VENDOR_A_ATMEL_DB011D: 14842 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14843 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14844 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14845 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14846 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14847 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14848 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14849 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14850 case FLASH_5720VENDOR_ATMEL_45USPT: 14851 tp->nvram_jedecnum = JEDEC_ATMEL; 14852 tg3_flag_set(tp, NVRAM_BUFFERED); 14853 tg3_flag_set(tp, FLASH); 14854 14855 switch (nvmpinstrp) { 14856 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14857 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14858 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14859 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14860 break; 14861 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14862 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14863 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14864 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14865 break; 14866 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14867 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14868 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14869 break; 14870 default: 14871 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14872 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14873 break; 14874 } 14875 break; 14876 case FLASH_5720VENDOR_M_ST_M25PE10: 14877 case FLASH_5720VENDOR_M_ST_M45PE10: 14878 case FLASH_5720VENDOR_A_ST_M25PE10: 14879 case FLASH_5720VENDOR_A_ST_M45PE10: 14880 case FLASH_5720VENDOR_M_ST_M25PE20: 14881 case FLASH_5720VENDOR_M_ST_M45PE20: 14882 case FLASH_5720VENDOR_A_ST_M25PE20: 14883 case FLASH_5720VENDOR_A_ST_M45PE20: 14884 case FLASH_5720VENDOR_M_ST_M25PE40: 14885 case FLASH_5720VENDOR_M_ST_M45PE40: 14886 case FLASH_5720VENDOR_A_ST_M25PE40: 14887 case FLASH_5720VENDOR_A_ST_M45PE40: 14888 case FLASH_5720VENDOR_M_ST_M25PE80: 14889 case FLASH_5720VENDOR_M_ST_M45PE80: 14890 case FLASH_5720VENDOR_A_ST_M25PE80: 14891 case FLASH_5720VENDOR_A_ST_M45PE80: 14892 case FLASH_5720VENDOR_ST_25USPT: 14893 case FLASH_5720VENDOR_ST_45USPT: 14894 tp->nvram_jedecnum = JEDEC_ST; 14895 tg3_flag_set(tp, NVRAM_BUFFERED); 14896 tg3_flag_set(tp, FLASH); 14897 14898 switch (nvmpinstrp) { 14899 case FLASH_5720VENDOR_M_ST_M25PE20: 14900 case FLASH_5720VENDOR_M_ST_M45PE20: 14901 case FLASH_5720VENDOR_A_ST_M25PE20: 14902 case FLASH_5720VENDOR_A_ST_M45PE20: 14903 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14904 break; 14905 case FLASH_5720VENDOR_M_ST_M25PE40: 14906 case FLASH_5720VENDOR_M_ST_M45PE40: 14907 case FLASH_5720VENDOR_A_ST_M25PE40: 14908 case FLASH_5720VENDOR_A_ST_M45PE40: 14909 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14910 break; 14911 case FLASH_5720VENDOR_M_ST_M25PE80: 14912 case FLASH_5720VENDOR_M_ST_M45PE80: 14913 case FLASH_5720VENDOR_A_ST_M25PE80: 14914 case FLASH_5720VENDOR_A_ST_M45PE80: 14915 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14916 break; 14917 default: 14918 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14919 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14920 break; 14921 } 14922 break; 14923 default: 14924 tg3_flag_set(tp, NO_NVRAM); 14925 return; 14926 } 14927 14928 tg3_nvram_get_pagesize(tp, nvcfg1); 14929 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14930 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14931 14932 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14933 u32 val; 14934 14935 if (tg3_nvram_read(tp, 0, &val)) 14936 return; 14937 14938 if (val != TG3_EEPROM_MAGIC && 14939 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) 14940 tg3_flag_set(tp, NO_NVRAM); 14941 } 14942 } 14943 14944 /* Chips other than 5700/5701 use the NVRAM for fetching info. */ 14945 static void tg3_nvram_init(struct tg3 *tp) 14946 { 14947 if (tg3_flag(tp, IS_SSB_CORE)) { 14948 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */ 14949 tg3_flag_clear(tp, NVRAM); 14950 tg3_flag_clear(tp, NVRAM_BUFFERED); 14951 tg3_flag_set(tp, NO_NVRAM); 14952 return; 14953 } 14954 14955 tw32_f(GRC_EEPROM_ADDR, 14956 (EEPROM_ADDR_FSM_RESET | 14957 (EEPROM_DEFAULT_CLOCK_PERIOD << 14958 EEPROM_ADDR_CLKPERD_SHIFT))); 14959 14960 msleep(1); 14961 14962 /* Enable seeprom accesses. */ 14963 tw32_f(GRC_LOCAL_CTRL, 14964 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); 14965 udelay(100); 14966 14967 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 14968 tg3_asic_rev(tp) != ASIC_REV_5701) { 14969 tg3_flag_set(tp, NVRAM); 14970 14971 if (tg3_nvram_lock(tp)) { 14972 netdev_warn(tp->dev, 14973 "Cannot get nvram lock, %s failed\n", 14974 __func__); 14975 return; 14976 } 14977 tg3_enable_nvram_access(tp); 14978 14979 tp->nvram_size = 0; 14980 14981 if (tg3_asic_rev(tp) == ASIC_REV_5752) 14982 tg3_get_5752_nvram_info(tp); 14983 else if (tg3_asic_rev(tp) == ASIC_REV_5755) 14984 tg3_get_5755_nvram_info(tp); 14985 else if (tg3_asic_rev(tp) == ASIC_REV_5787 || 14986 tg3_asic_rev(tp) == ASIC_REV_5784 || 14987 tg3_asic_rev(tp) == ASIC_REV_5785) 14988 tg3_get_5787_nvram_info(tp); 14989 else if (tg3_asic_rev(tp) == ASIC_REV_5761) 14990 tg3_get_5761_nvram_info(tp); 14991 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 14992 tg3_get_5906_nvram_info(tp); 14993 else if (tg3_asic_rev(tp) == ASIC_REV_57780 || 14994 tg3_flag(tp, 57765_CLASS)) 14995 tg3_get_57780_nvram_info(tp); 14996 else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 14997 tg3_asic_rev(tp) == ASIC_REV_5719) 14998 tg3_get_5717_nvram_info(tp); 14999 else if (tg3_asic_rev(tp) == ASIC_REV_5720 || 15000 tg3_asic_rev(tp) == ASIC_REV_5762) 15001 tg3_get_5720_nvram_info(tp); 15002 else 15003 tg3_get_nvram_info(tp); 15004 15005 if (tp->nvram_size == 0) 15006 tg3_get_nvram_size(tp); 15007 15008 tg3_disable_nvram_access(tp); 15009 tg3_nvram_unlock(tp); 15010 15011 } else { 15012 tg3_flag_clear(tp, NVRAM); 15013 tg3_flag_clear(tp, NVRAM_BUFFERED); 15014 15015 tg3_get_eeprom_size(tp); 15016 } 15017 } 15018 15019 struct subsys_tbl_ent { 15020 u16 subsys_vendor, subsys_devid; 15021 u32 phy_id; 15022 }; 15023 15024 static struct subsys_tbl_ent subsys_id_to_phy_id[] = { 15025 /* Broadcom boards. */ 15026 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15027 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, 15028 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15029 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, 15030 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15031 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, 15032 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15033 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, 15034 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15035 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, 15036 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15037 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, 15038 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15039 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, 15040 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15041 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, 15042 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15043 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, 15044 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15045 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, 15046 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15047 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, 15048 15049 /* 3com boards. */ 15050 { TG3PCI_SUBVENDOR_ID_3COM, 15051 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, 15052 { TG3PCI_SUBVENDOR_ID_3COM, 15053 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, 15054 { TG3PCI_SUBVENDOR_ID_3COM, 15055 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, 15056 { TG3PCI_SUBVENDOR_ID_3COM, 15057 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, 15058 { TG3PCI_SUBVENDOR_ID_3COM, 15059 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, 15060 15061 /* DELL boards. */ 15062 { TG3PCI_SUBVENDOR_ID_DELL, 15063 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, 15064 { TG3PCI_SUBVENDOR_ID_DELL, 15065 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, 15066 { TG3PCI_SUBVENDOR_ID_DELL, 15067 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, 15068 { TG3PCI_SUBVENDOR_ID_DELL, 15069 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, 15070 15071 /* Compaq boards. */ 15072 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15073 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, 15074 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15075 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, 15076 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15077 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, 15078 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15079 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, 15080 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15081 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, 15082 15083 /* IBM boards. */ 15084 { TG3PCI_SUBVENDOR_ID_IBM, 15085 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } 15086 }; 15087 15088 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp) 15089 { 15090 int i; 15091 15092 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { 15093 if ((subsys_id_to_phy_id[i].subsys_vendor == 15094 tp->pdev->subsystem_vendor) && 15095 (subsys_id_to_phy_id[i].subsys_devid == 15096 tp->pdev->subsystem_device)) 15097 return &subsys_id_to_phy_id[i]; 15098 } 15099 return NULL; 15100 } 15101 15102 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp) 15103 { 15104 u32 val; 15105 15106 tp->phy_id = TG3_PHY_ID_INVALID; 15107 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15108 15109 /* Assume an onboard device and WOL capable by default. */ 15110 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15111 tg3_flag_set(tp, WOL_CAP); 15112 15113 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15114 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { 15115 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15116 tg3_flag_set(tp, IS_NIC); 15117 } 15118 val = tr32(VCPU_CFGSHDW); 15119 if (val & VCPU_CFGSHDW_ASPM_DBNC) 15120 tg3_flag_set(tp, ASPM_WORKAROUND); 15121 if ((val & VCPU_CFGSHDW_WOL_ENABLE) && 15122 (val & VCPU_CFGSHDW_WOL_MAGPKT)) { 15123 tg3_flag_set(tp, WOL_ENABLE); 15124 device_set_wakeup_enable(&tp->pdev->dev, true); 15125 } 15126 goto done; 15127 } 15128 15129 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 15130 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 15131 u32 nic_cfg, led_cfg; 15132 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0; 15133 u32 nic_phy_id, ver, eeprom_phy_id; 15134 int eeprom_phy_serdes = 0; 15135 15136 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 15137 tp->nic_sram_data_cfg = nic_cfg; 15138 15139 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); 15140 ver >>= NIC_SRAM_DATA_VER_SHIFT; 15141 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 15142 tg3_asic_rev(tp) != ASIC_REV_5701 && 15143 tg3_asic_rev(tp) != ASIC_REV_5703 && 15144 (ver > 0) && (ver < 0x100)) 15145 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); 15146 15147 if (tg3_asic_rev(tp) == ASIC_REV_5785) 15148 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); 15149 15150 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 15151 tg3_asic_rev(tp) == ASIC_REV_5719 || 15152 tg3_asic_rev(tp) == ASIC_REV_5720) 15153 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5); 15154 15155 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == 15156 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) 15157 eeprom_phy_serdes = 1; 15158 15159 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); 15160 if (nic_phy_id != 0) { 15161 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; 15162 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; 15163 15164 eeprom_phy_id = (id1 >> 16) << 10; 15165 eeprom_phy_id |= (id2 & 0xfc00) << 16; 15166 eeprom_phy_id |= (id2 & 0x03ff) << 0; 15167 } else 15168 eeprom_phy_id = 0; 15169 15170 tp->phy_id = eeprom_phy_id; 15171 if (eeprom_phy_serdes) { 15172 if (!tg3_flag(tp, 5705_PLUS)) 15173 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15174 else 15175 tp->phy_flags |= TG3_PHYFLG_MII_SERDES; 15176 } 15177 15178 if (tg3_flag(tp, 5750_PLUS)) 15179 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | 15180 SHASTA_EXT_LED_MODE_MASK); 15181 else 15182 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; 15183 15184 switch (led_cfg) { 15185 default: 15186 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: 15187 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15188 break; 15189 15190 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: 15191 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15192 break; 15193 15194 case NIC_SRAM_DATA_CFG_LED_MODE_MAC: 15195 tp->led_ctrl = LED_CTRL_MODE_MAC; 15196 15197 /* Default to PHY_1_MODE if 0 (MAC_MODE) is 15198 * read on some older 5700/5701 bootcode. 15199 */ 15200 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 15201 tg3_asic_rev(tp) == ASIC_REV_5701) 15202 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15203 15204 break; 15205 15206 case SHASTA_EXT_LED_SHARED: 15207 tp->led_ctrl = LED_CTRL_MODE_SHARED; 15208 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 15209 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1) 15210 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15211 LED_CTRL_MODE_PHY_2); 15212 15213 if (tg3_flag(tp, 5717_PLUS) || 15214 tg3_asic_rev(tp) == ASIC_REV_5762) 15215 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE | 15216 LED_CTRL_BLINK_RATE_MASK; 15217 15218 break; 15219 15220 case SHASTA_EXT_LED_MAC: 15221 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; 15222 break; 15223 15224 case SHASTA_EXT_LED_COMBO: 15225 tp->led_ctrl = LED_CTRL_MODE_COMBO; 15226 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) 15227 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15228 LED_CTRL_MODE_PHY_2); 15229 break; 15230 15231 } 15232 15233 if ((tg3_asic_rev(tp) == ASIC_REV_5700 || 15234 tg3_asic_rev(tp) == ASIC_REV_5701) && 15235 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) 15236 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15237 15238 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) 15239 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15240 15241 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { 15242 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15243 if ((tp->pdev->subsystem_vendor == 15244 PCI_VENDOR_ID_ARIMA) && 15245 (tp->pdev->subsystem_device == 0x205a || 15246 tp->pdev->subsystem_device == 0x2063)) 15247 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15248 } else { 15249 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15250 tg3_flag_set(tp, IS_NIC); 15251 } 15252 15253 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 15254 tg3_flag_set(tp, ENABLE_ASF); 15255 if (tg3_flag(tp, 5750_PLUS)) 15256 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 15257 } 15258 15259 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && 15260 tg3_flag(tp, 5750_PLUS)) 15261 tg3_flag_set(tp, ENABLE_APE); 15262 15263 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && 15264 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) 15265 tg3_flag_clear(tp, WOL_CAP); 15266 15267 if (tg3_flag(tp, WOL_CAP) && 15268 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { 15269 tg3_flag_set(tp, WOL_ENABLE); 15270 device_set_wakeup_enable(&tp->pdev->dev, true); 15271 } 15272 15273 if (cfg2 & (1 << 17)) 15274 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; 15275 15276 /* serdes signal pre-emphasis in register 0x590 set by */ 15277 /* bootcode if bit 18 is set */ 15278 if (cfg2 & (1 << 18)) 15279 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; 15280 15281 if ((tg3_flag(tp, 57765_PLUS) || 15282 (tg3_asic_rev(tp) == ASIC_REV_5784 && 15283 tg3_chip_rev(tp) != CHIPREV_5784_AX)) && 15284 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 15285 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; 15286 15287 if (tg3_flag(tp, PCI_EXPRESS)) { 15288 u32 cfg3; 15289 15290 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); 15291 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 15292 !tg3_flag(tp, 57765_PLUS) && 15293 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)) 15294 tg3_flag_set(tp, ASPM_WORKAROUND); 15295 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID) 15296 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 15297 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK) 15298 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 15299 } 15300 15301 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) 15302 tg3_flag_set(tp, RGMII_INBAND_DISABLE); 15303 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) 15304 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); 15305 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) 15306 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); 15307 15308 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV) 15309 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV; 15310 } 15311 done: 15312 if (tg3_flag(tp, WOL_CAP)) 15313 device_set_wakeup_enable(&tp->pdev->dev, 15314 tg3_flag(tp, WOL_ENABLE)); 15315 else 15316 device_set_wakeup_capable(&tp->pdev->dev, false); 15317 } 15318 15319 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val) 15320 { 15321 int i, err; 15322 u32 val2, off = offset * 8; 15323 15324 err = tg3_nvram_lock(tp); 15325 if (err) 15326 return err; 15327 15328 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE); 15329 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN | 15330 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START); 15331 tg3_ape_read32(tp, TG3_APE_OTP_CTRL); 15332 udelay(10); 15333 15334 for (i = 0; i < 100; i++) { 15335 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS); 15336 if (val2 & APE_OTP_STATUS_CMD_DONE) { 15337 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA); 15338 break; 15339 } 15340 udelay(10); 15341 } 15342 15343 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0); 15344 15345 tg3_nvram_unlock(tp); 15346 if (val2 & APE_OTP_STATUS_CMD_DONE) 15347 return 0; 15348 15349 return -EBUSY; 15350 } 15351 15352 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd) 15353 { 15354 int i; 15355 u32 val; 15356 15357 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); 15358 tw32(OTP_CTRL, cmd); 15359 15360 /* Wait for up to 1 ms for command to execute. */ 15361 for (i = 0; i < 100; i++) { 15362 val = tr32(OTP_STATUS); 15363 if (val & OTP_STATUS_CMD_DONE) 15364 break; 15365 udelay(10); 15366 } 15367 15368 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; 15369 } 15370 15371 /* Read the gphy configuration from the OTP region of the chip. The gphy 15372 * configuration is a 32-bit value that straddles the alignment boundary. 15373 * We do two 32-bit reads and then shift and merge the results. 15374 */ 15375 static u32 tg3_read_otp_phycfg(struct tg3 *tp) 15376 { 15377 u32 bhalf_otp, thalf_otp; 15378 15379 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); 15380 15381 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) 15382 return 0; 15383 15384 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); 15385 15386 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15387 return 0; 15388 15389 thalf_otp = tr32(OTP_READ_DATA); 15390 15391 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); 15392 15393 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15394 return 0; 15395 15396 bhalf_otp = tr32(OTP_READ_DATA); 15397 15398 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); 15399 } 15400 15401 static void tg3_phy_init_link_config(struct tg3 *tp) 15402 { 15403 u32 adv = ADVERTISED_Autoneg; 15404 15405 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 15406 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV)) 15407 adv |= ADVERTISED_1000baseT_Half; 15408 adv |= ADVERTISED_1000baseT_Full; 15409 } 15410 15411 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 15412 adv |= ADVERTISED_100baseT_Half | 15413 ADVERTISED_100baseT_Full | 15414 ADVERTISED_10baseT_Half | 15415 ADVERTISED_10baseT_Full | 15416 ADVERTISED_TP; 15417 else 15418 adv |= ADVERTISED_FIBRE; 15419 15420 tp->link_config.advertising = adv; 15421 tp->link_config.speed = SPEED_UNKNOWN; 15422 tp->link_config.duplex = DUPLEX_UNKNOWN; 15423 tp->link_config.autoneg = AUTONEG_ENABLE; 15424 tp->link_config.active_speed = SPEED_UNKNOWN; 15425 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 15426 15427 tp->old_link = -1; 15428 } 15429 15430 static int tg3_phy_probe(struct tg3 *tp) 15431 { 15432 u32 hw_phy_id_1, hw_phy_id_2; 15433 u32 hw_phy_id, hw_phy_id_masked; 15434 int err; 15435 15436 /* flow control autonegotiation is default behavior */ 15437 tg3_flag_set(tp, PAUSE_AUTONEG); 15438 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; 15439 15440 if (tg3_flag(tp, ENABLE_APE)) { 15441 switch (tp->pci_fn) { 15442 case 0: 15443 tp->phy_ape_lock = TG3_APE_LOCK_PHY0; 15444 break; 15445 case 1: 15446 tp->phy_ape_lock = TG3_APE_LOCK_PHY1; 15447 break; 15448 case 2: 15449 tp->phy_ape_lock = TG3_APE_LOCK_PHY2; 15450 break; 15451 case 3: 15452 tp->phy_ape_lock = TG3_APE_LOCK_PHY3; 15453 break; 15454 } 15455 } 15456 15457 if (!tg3_flag(tp, ENABLE_ASF) && 15458 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15459 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 15460 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 15461 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 15462 15463 if (tg3_flag(tp, USE_PHYLIB)) 15464 return tg3_phy_init(tp); 15465 15466 /* Reading the PHY ID register can conflict with ASF 15467 * firmware access to the PHY hardware. 15468 */ 15469 err = 0; 15470 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { 15471 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; 15472 } else { 15473 /* Now read the physical PHY_ID from the chip and verify 15474 * that it is sane. If it doesn't look good, we fall back 15475 * to either the hard-coded table based PHY_ID and failing 15476 * that the value found in the eeprom area. 15477 */ 15478 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); 15479 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); 15480 15481 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; 15482 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; 15483 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; 15484 15485 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; 15486 } 15487 15488 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { 15489 tp->phy_id = hw_phy_id; 15490 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) 15491 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15492 else 15493 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; 15494 } else { 15495 if (tp->phy_id != TG3_PHY_ID_INVALID) { 15496 /* Do nothing, phy ID already set up in 15497 * tg3_get_eeprom_hw_cfg(). 15498 */ 15499 } else { 15500 struct subsys_tbl_ent *p; 15501 15502 /* No eeprom signature? Try the hardcoded 15503 * subsys device table. 15504 */ 15505 p = tg3_lookup_by_subsys(tp); 15506 if (p) { 15507 tp->phy_id = p->phy_id; 15508 } else if (!tg3_flag(tp, IS_SSB_CORE)) { 15509 /* For now we saw the IDs 0xbc050cd0, 15510 * 0xbc050f80 and 0xbc050c30 on devices 15511 * connected to an BCM4785 and there are 15512 * probably more. Just assume that the phy is 15513 * supported when it is connected to a SSB core 15514 * for now. 15515 */ 15516 return -ENODEV; 15517 } 15518 15519 if (!tp->phy_id || 15520 tp->phy_id == TG3_PHY_ID_BCM8002) 15521 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15522 } 15523 } 15524 15525 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15526 (tg3_asic_rev(tp) == ASIC_REV_5719 || 15527 tg3_asic_rev(tp) == ASIC_REV_5720 || 15528 tg3_asic_rev(tp) == ASIC_REV_57766 || 15529 tg3_asic_rev(tp) == ASIC_REV_5762 || 15530 (tg3_asic_rev(tp) == ASIC_REV_5717 && 15531 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || 15532 (tg3_asic_rev(tp) == ASIC_REV_57765 && 15533 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) { 15534 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 15535 15536 tp->eee.supported = SUPPORTED_100baseT_Full | 15537 SUPPORTED_1000baseT_Full; 15538 tp->eee.advertised = ADVERTISED_100baseT_Full | 15539 ADVERTISED_1000baseT_Full; 15540 tp->eee.eee_enabled = 1; 15541 tp->eee.tx_lpi_enabled = 1; 15542 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; 15543 } 15544 15545 tg3_phy_init_link_config(tp); 15546 15547 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 15548 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15549 !tg3_flag(tp, ENABLE_APE) && 15550 !tg3_flag(tp, ENABLE_ASF)) { 15551 u32 bmsr, dummy; 15552 15553 tg3_readphy(tp, MII_BMSR, &bmsr); 15554 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 15555 (bmsr & BMSR_LSTATUS)) 15556 goto skip_phy_reset; 15557 15558 err = tg3_phy_reset(tp); 15559 if (err) 15560 return err; 15561 15562 tg3_phy_set_wirespeed(tp); 15563 15564 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) { 15565 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, 15566 tp->link_config.flowctrl); 15567 15568 tg3_writephy(tp, MII_BMCR, 15569 BMCR_ANENABLE | BMCR_ANRESTART); 15570 } 15571 } 15572 15573 skip_phy_reset: 15574 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 15575 err = tg3_init_5401phy_dsp(tp); 15576 if (err) 15577 return err; 15578 15579 err = tg3_init_5401phy_dsp(tp); 15580 } 15581 15582 return err; 15583 } 15584 15585 static void tg3_read_vpd(struct tg3 *tp) 15586 { 15587 u8 *vpd_data; 15588 unsigned int len, vpdlen; 15589 int i; 15590 15591 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); 15592 if (!vpd_data) 15593 goto out_no_vpd; 15594 15595 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15596 PCI_VPD_RO_KEYWORD_MFR_ID, &len); 15597 if (i < 0) 15598 goto partno; 15599 15600 if (len != 4 || memcmp(vpd_data + i, "1028", 4)) 15601 goto partno; 15602 15603 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15604 PCI_VPD_RO_KEYWORD_VENDOR0, &len); 15605 if (i < 0) 15606 goto partno; 15607 15608 memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); 15609 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i); 15610 15611 partno: 15612 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15613 PCI_VPD_RO_KEYWORD_PARTNO, &len); 15614 if (i < 0) 15615 goto out_not_found; 15616 15617 if (len > TG3_BPN_SIZE) 15618 goto out_not_found; 15619 15620 memcpy(tp->board_part_number, &vpd_data[i], len); 15621 15622 out_not_found: 15623 kfree(vpd_data); 15624 if (tp->board_part_number[0]) 15625 return; 15626 15627 out_no_vpd: 15628 if (tg3_asic_rev(tp) == ASIC_REV_5717) { 15629 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 15630 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C) 15631 strcpy(tp->board_part_number, "BCM5717"); 15632 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) 15633 strcpy(tp->board_part_number, "BCM5718"); 15634 else 15635 goto nomatch; 15636 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) { 15637 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) 15638 strcpy(tp->board_part_number, "BCM57780"); 15639 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) 15640 strcpy(tp->board_part_number, "BCM57760"); 15641 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) 15642 strcpy(tp->board_part_number, "BCM57790"); 15643 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) 15644 strcpy(tp->board_part_number, "BCM57788"); 15645 else 15646 goto nomatch; 15647 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) { 15648 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) 15649 strcpy(tp->board_part_number, "BCM57761"); 15650 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) 15651 strcpy(tp->board_part_number, "BCM57765"); 15652 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) 15653 strcpy(tp->board_part_number, "BCM57781"); 15654 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) 15655 strcpy(tp->board_part_number, "BCM57785"); 15656 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) 15657 strcpy(tp->board_part_number, "BCM57791"); 15658 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) 15659 strcpy(tp->board_part_number, "BCM57795"); 15660 else 15661 goto nomatch; 15662 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) { 15663 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) 15664 strcpy(tp->board_part_number, "BCM57762"); 15665 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) 15666 strcpy(tp->board_part_number, "BCM57766"); 15667 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782) 15668 strcpy(tp->board_part_number, "BCM57782"); 15669 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 15670 strcpy(tp->board_part_number, "BCM57786"); 15671 else 15672 goto nomatch; 15673 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15674 strcpy(tp->board_part_number, "BCM95906"); 15675 } else { 15676 nomatch: 15677 strcpy(tp->board_part_number, "none"); 15678 } 15679 } 15680 15681 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) 15682 { 15683 u32 val; 15684 15685 if (tg3_nvram_read(tp, offset, &val) || 15686 (val & 0xfc000000) != 0x0c000000 || 15687 tg3_nvram_read(tp, offset + 4, &val) || 15688 val != 0) 15689 return 0; 15690 15691 return 1; 15692 } 15693 15694 static void tg3_read_bc_ver(struct tg3 *tp) 15695 { 15696 u32 val, offset, start, ver_offset; 15697 int i, dst_off; 15698 bool newver = false; 15699 15700 if (tg3_nvram_read(tp, 0xc, &offset) || 15701 tg3_nvram_read(tp, 0x4, &start)) 15702 return; 15703 15704 offset = tg3_nvram_logical_addr(tp, offset); 15705 15706 if (tg3_nvram_read(tp, offset, &val)) 15707 return; 15708 15709 if ((val & 0xfc000000) == 0x0c000000) { 15710 if (tg3_nvram_read(tp, offset + 4, &val)) 15711 return; 15712 15713 if (val == 0) 15714 newver = true; 15715 } 15716 15717 dst_off = strlen(tp->fw_ver); 15718 15719 if (newver) { 15720 if (TG3_VER_SIZE - dst_off < 16 || 15721 tg3_nvram_read(tp, offset + 8, &ver_offset)) 15722 return; 15723 15724 offset = offset + ver_offset - start; 15725 for (i = 0; i < 16; i += 4) { 15726 __be32 v; 15727 if (tg3_nvram_read_be32(tp, offset + i, &v)) 15728 return; 15729 15730 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); 15731 } 15732 } else { 15733 u32 major, minor; 15734 15735 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) 15736 return; 15737 15738 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> 15739 TG3_NVM_BCVER_MAJSFT; 15740 minor = ver_offset & TG3_NVM_BCVER_MINMSK; 15741 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, 15742 "v%d.%02d", major, minor); 15743 } 15744 } 15745 15746 static void tg3_read_hwsb_ver(struct tg3 *tp) 15747 { 15748 u32 val, major, minor; 15749 15750 /* Use native endian representation */ 15751 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) 15752 return; 15753 15754 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> 15755 TG3_NVM_HWSB_CFG1_MAJSFT; 15756 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> 15757 TG3_NVM_HWSB_CFG1_MINSFT; 15758 15759 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); 15760 } 15761 15762 static void tg3_read_sb_ver(struct tg3 *tp, u32 val) 15763 { 15764 u32 offset, major, minor, build; 15765 15766 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); 15767 15768 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) 15769 return; 15770 15771 switch (val & TG3_EEPROM_SB_REVISION_MASK) { 15772 case TG3_EEPROM_SB_REVISION_0: 15773 offset = TG3_EEPROM_SB_F1R0_EDH_OFF; 15774 break; 15775 case TG3_EEPROM_SB_REVISION_2: 15776 offset = TG3_EEPROM_SB_F1R2_EDH_OFF; 15777 break; 15778 case TG3_EEPROM_SB_REVISION_3: 15779 offset = TG3_EEPROM_SB_F1R3_EDH_OFF; 15780 break; 15781 case TG3_EEPROM_SB_REVISION_4: 15782 offset = TG3_EEPROM_SB_F1R4_EDH_OFF; 15783 break; 15784 case TG3_EEPROM_SB_REVISION_5: 15785 offset = TG3_EEPROM_SB_F1R5_EDH_OFF; 15786 break; 15787 case TG3_EEPROM_SB_REVISION_6: 15788 offset = TG3_EEPROM_SB_F1R6_EDH_OFF; 15789 break; 15790 default: 15791 return; 15792 } 15793 15794 if (tg3_nvram_read(tp, offset, &val)) 15795 return; 15796 15797 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> 15798 TG3_EEPROM_SB_EDH_BLD_SHFT; 15799 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> 15800 TG3_EEPROM_SB_EDH_MAJ_SHFT; 15801 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; 15802 15803 if (minor > 99 || build > 26) 15804 return; 15805 15806 offset = strlen(tp->fw_ver); 15807 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, 15808 " v%d.%02d", major, minor); 15809 15810 if (build > 0) { 15811 offset = strlen(tp->fw_ver); 15812 if (offset < TG3_VER_SIZE - 1) 15813 tp->fw_ver[offset] = 'a' + build - 1; 15814 } 15815 } 15816 15817 static void tg3_read_mgmtfw_ver(struct tg3 *tp) 15818 { 15819 u32 val, offset, start; 15820 int i, vlen; 15821 15822 for (offset = TG3_NVM_DIR_START; 15823 offset < TG3_NVM_DIR_END; 15824 offset += TG3_NVM_DIRENT_SIZE) { 15825 if (tg3_nvram_read(tp, offset, &val)) 15826 return; 15827 15828 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) 15829 break; 15830 } 15831 15832 if (offset == TG3_NVM_DIR_END) 15833 return; 15834 15835 if (!tg3_flag(tp, 5705_PLUS)) 15836 start = 0x08000000; 15837 else if (tg3_nvram_read(tp, offset - 4, &start)) 15838 return; 15839 15840 if (tg3_nvram_read(tp, offset + 4, &offset) || 15841 !tg3_fw_img_is_valid(tp, offset) || 15842 tg3_nvram_read(tp, offset + 8, &val)) 15843 return; 15844 15845 offset += val - start; 15846 15847 vlen = strlen(tp->fw_ver); 15848 15849 tp->fw_ver[vlen++] = ','; 15850 tp->fw_ver[vlen++] = ' '; 15851 15852 for (i = 0; i < 4; i++) { 15853 __be32 v; 15854 if (tg3_nvram_read_be32(tp, offset, &v)) 15855 return; 15856 15857 offset += sizeof(v); 15858 15859 if (vlen > TG3_VER_SIZE - sizeof(v)) { 15860 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); 15861 break; 15862 } 15863 15864 memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); 15865 vlen += sizeof(v); 15866 } 15867 } 15868 15869 static void tg3_probe_ncsi(struct tg3 *tp) 15870 { 15871 u32 apedata; 15872 15873 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 15874 if (apedata != APE_SEG_SIG_MAGIC) 15875 return; 15876 15877 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 15878 if (!(apedata & APE_FW_STATUS_READY)) 15879 return; 15880 15881 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) 15882 tg3_flag_set(tp, APE_HAS_NCSI); 15883 } 15884 15885 static void tg3_read_dash_ver(struct tg3 *tp) 15886 { 15887 int vlen; 15888 u32 apedata; 15889 char *fwtype; 15890 15891 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); 15892 15893 if (tg3_flag(tp, APE_HAS_NCSI)) 15894 fwtype = "NCSI"; 15895 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725) 15896 fwtype = "SMASH"; 15897 else 15898 fwtype = "DASH"; 15899 15900 vlen = strlen(tp->fw_ver); 15901 15902 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", 15903 fwtype, 15904 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, 15905 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, 15906 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, 15907 (apedata & APE_FW_VERSION_BLDMSK)); 15908 } 15909 15910 static void tg3_read_otp_ver(struct tg3 *tp) 15911 { 15912 u32 val, val2; 15913 15914 if (tg3_asic_rev(tp) != ASIC_REV_5762) 15915 return; 15916 15917 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) && 15918 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) && 15919 TG3_OTP_MAGIC0_VALID(val)) { 15920 u64 val64 = (u64) val << 32 | val2; 15921 u32 ver = 0; 15922 int i, vlen; 15923 15924 for (i = 0; i < 7; i++) { 15925 if ((val64 & 0xff) == 0) 15926 break; 15927 ver = val64 & 0xff; 15928 val64 >>= 8; 15929 } 15930 vlen = strlen(tp->fw_ver); 15931 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver); 15932 } 15933 } 15934 15935 static void tg3_read_fw_ver(struct tg3 *tp) 15936 { 15937 u32 val; 15938 bool vpd_vers = false; 15939 15940 if (tp->fw_ver[0] != 0) 15941 vpd_vers = true; 15942 15943 if (tg3_flag(tp, NO_NVRAM)) { 15944 strcat(tp->fw_ver, "sb"); 15945 tg3_read_otp_ver(tp); 15946 return; 15947 } 15948 15949 if (tg3_nvram_read(tp, 0, &val)) 15950 return; 15951 15952 if (val == TG3_EEPROM_MAGIC) 15953 tg3_read_bc_ver(tp); 15954 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) 15955 tg3_read_sb_ver(tp, val); 15956 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 15957 tg3_read_hwsb_ver(tp); 15958 15959 if (tg3_flag(tp, ENABLE_ASF)) { 15960 if (tg3_flag(tp, ENABLE_APE)) { 15961 tg3_probe_ncsi(tp); 15962 if (!vpd_vers) 15963 tg3_read_dash_ver(tp); 15964 } else if (!vpd_vers) { 15965 tg3_read_mgmtfw_ver(tp); 15966 } 15967 } 15968 15969 tp->fw_ver[TG3_VER_SIZE - 1] = 0; 15970 } 15971 15972 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) 15973 { 15974 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 15975 return TG3_RX_RET_MAX_SIZE_5717; 15976 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) 15977 return TG3_RX_RET_MAX_SIZE_5700; 15978 else 15979 return TG3_RX_RET_MAX_SIZE_5705; 15980 } 15981 15982 static const struct pci_device_id tg3_write_reorder_chipsets[] = { 15983 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 15984 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, 15985 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, 15986 { }, 15987 }; 15988 15989 static struct pci_dev *tg3_find_peer(struct tg3 *tp) 15990 { 15991 struct pci_dev *peer; 15992 unsigned int func, devnr = tp->pdev->devfn & ~7; 15993 15994 for (func = 0; func < 8; func++) { 15995 peer = pci_get_slot(tp->pdev->bus, devnr | func); 15996 if (peer && peer != tp->pdev) 15997 break; 15998 pci_dev_put(peer); 15999 } 16000 /* 5704 can be configured in single-port mode, set peer to 16001 * tp->pdev in that case. 16002 */ 16003 if (!peer) { 16004 peer = tp->pdev; 16005 return peer; 16006 } 16007 16008 /* 16009 * We don't need to keep the refcount elevated; there's no way 16010 * to remove one half of this device without removing the other 16011 */ 16012 pci_dev_put(peer); 16013 16014 return peer; 16015 } 16016 16017 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) 16018 { 16019 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT; 16020 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) { 16021 u32 reg; 16022 16023 /* All devices that use the alternate 16024 * ASIC REV location have a CPMU. 16025 */ 16026 tg3_flag_set(tp, CPMU_PRESENT); 16027 16028 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 16029 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 16030 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 16031 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 16032 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 16033 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 16034 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 16035 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 16036 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 16037 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 16038 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) 16039 reg = TG3PCI_GEN2_PRODID_ASICREV; 16040 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || 16041 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || 16042 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || 16043 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || 16044 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || 16045 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || 16046 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 || 16047 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 || 16048 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 || 16049 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 16050 reg = TG3PCI_GEN15_PRODID_ASICREV; 16051 else 16052 reg = TG3PCI_PRODID_ASICREV; 16053 16054 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id); 16055 } 16056 16057 /* Wrong chip ID in 5752 A0. This code can be removed later 16058 * as A0 is not in production. 16059 */ 16060 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW) 16061 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; 16062 16063 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0) 16064 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0; 16065 16066 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16067 tg3_asic_rev(tp) == ASIC_REV_5719 || 16068 tg3_asic_rev(tp) == ASIC_REV_5720) 16069 tg3_flag_set(tp, 5717_PLUS); 16070 16071 if (tg3_asic_rev(tp) == ASIC_REV_57765 || 16072 tg3_asic_rev(tp) == ASIC_REV_57766) 16073 tg3_flag_set(tp, 57765_CLASS); 16074 16075 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) || 16076 tg3_asic_rev(tp) == ASIC_REV_5762) 16077 tg3_flag_set(tp, 57765_PLUS); 16078 16079 /* Intentionally exclude ASIC_REV_5906 */ 16080 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16081 tg3_asic_rev(tp) == ASIC_REV_5787 || 16082 tg3_asic_rev(tp) == ASIC_REV_5784 || 16083 tg3_asic_rev(tp) == ASIC_REV_5761 || 16084 tg3_asic_rev(tp) == ASIC_REV_5785 || 16085 tg3_asic_rev(tp) == ASIC_REV_57780 || 16086 tg3_flag(tp, 57765_PLUS)) 16087 tg3_flag_set(tp, 5755_PLUS); 16088 16089 if (tg3_asic_rev(tp) == ASIC_REV_5780 || 16090 tg3_asic_rev(tp) == ASIC_REV_5714) 16091 tg3_flag_set(tp, 5780_CLASS); 16092 16093 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16094 tg3_asic_rev(tp) == ASIC_REV_5752 || 16095 tg3_asic_rev(tp) == ASIC_REV_5906 || 16096 tg3_flag(tp, 5755_PLUS) || 16097 tg3_flag(tp, 5780_CLASS)) 16098 tg3_flag_set(tp, 5750_PLUS); 16099 16100 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 16101 tg3_flag(tp, 5750_PLUS)) 16102 tg3_flag_set(tp, 5705_PLUS); 16103 } 16104 16105 static bool tg3_10_100_only_device(struct tg3 *tp, 16106 const struct pci_device_id *ent) 16107 { 16108 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK; 16109 16110 if ((tg3_asic_rev(tp) == ASIC_REV_5703 && 16111 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 16112 (tp->phy_flags & TG3_PHYFLG_IS_FET)) 16113 return true; 16114 16115 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) { 16116 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 16117 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100) 16118 return true; 16119 } else { 16120 return true; 16121 } 16122 } 16123 16124 return false; 16125 } 16126 16127 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) 16128 { 16129 u32 misc_ctrl_reg; 16130 u32 pci_state_reg, grc_misc_cfg; 16131 u32 val; 16132 u16 pci_cmd; 16133 int err; 16134 16135 /* Force memory write invalidate off. If we leave it on, 16136 * then on 5700_BX chips we have to enable a workaround. 16137 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary 16138 * to match the cacheline size. The Broadcom driver have this 16139 * workaround but turns MWI off all the times so never uses 16140 * it. This seems to suggest that the workaround is insufficient. 16141 */ 16142 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16143 pci_cmd &= ~PCI_COMMAND_INVALIDATE; 16144 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16145 16146 /* Important! -- Make sure register accesses are byteswapped 16147 * correctly. Also, for those chips that require it, make 16148 * sure that indirect register accesses are enabled before 16149 * the first operation. 16150 */ 16151 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16152 &misc_ctrl_reg); 16153 tp->misc_host_ctrl |= (misc_ctrl_reg & 16154 MISC_HOST_CTRL_CHIPREV); 16155 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16156 tp->misc_host_ctrl); 16157 16158 tg3_detect_asic_rev(tp, misc_ctrl_reg); 16159 16160 /* If we have 5702/03 A1 or A2 on certain ICH chipsets, 16161 * we need to disable memory and use config. cycles 16162 * only to access all registers. The 5702/03 chips 16163 * can mistakenly decode the special cycles from the 16164 * ICH chipsets as memory write cycles, causing corruption 16165 * of register and memory space. Only certain ICH bridges 16166 * will drive special cycles with non-zero data during the 16167 * address phase which can fall within the 5703's address 16168 * range. This is not an ICH bug as the PCI spec allows 16169 * non-zero address during special cycles. However, only 16170 * these ICH bridges are known to drive non-zero addresses 16171 * during special cycles. 16172 * 16173 * Since special cycles do not cross PCI bridges, we only 16174 * enable this workaround if the 5703 is on the secondary 16175 * bus of these ICH bridges. 16176 */ 16177 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) || 16178 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) { 16179 static struct tg3_dev_id { 16180 u32 vendor; 16181 u32 device; 16182 u32 rev; 16183 } ich_chipsets[] = { 16184 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, 16185 PCI_ANY_ID }, 16186 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, 16187 PCI_ANY_ID }, 16188 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, 16189 0xa }, 16190 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, 16191 PCI_ANY_ID }, 16192 { }, 16193 }; 16194 struct tg3_dev_id *pci_id = &ich_chipsets[0]; 16195 struct pci_dev *bridge = NULL; 16196 16197 while (pci_id->vendor != 0) { 16198 bridge = pci_get_device(pci_id->vendor, pci_id->device, 16199 bridge); 16200 if (!bridge) { 16201 pci_id++; 16202 continue; 16203 } 16204 if (pci_id->rev != PCI_ANY_ID) { 16205 if (bridge->revision > pci_id->rev) 16206 continue; 16207 } 16208 if (bridge->subordinate && 16209 (bridge->subordinate->number == 16210 tp->pdev->bus->number)) { 16211 tg3_flag_set(tp, ICH_WORKAROUND); 16212 pci_dev_put(bridge); 16213 break; 16214 } 16215 } 16216 } 16217 16218 if (tg3_asic_rev(tp) == ASIC_REV_5701) { 16219 static struct tg3_dev_id { 16220 u32 vendor; 16221 u32 device; 16222 } bridge_chipsets[] = { 16223 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, 16224 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, 16225 { }, 16226 }; 16227 struct tg3_dev_id *pci_id = &bridge_chipsets[0]; 16228 struct pci_dev *bridge = NULL; 16229 16230 while (pci_id->vendor != 0) { 16231 bridge = pci_get_device(pci_id->vendor, 16232 pci_id->device, 16233 bridge); 16234 if (!bridge) { 16235 pci_id++; 16236 continue; 16237 } 16238 if (bridge->subordinate && 16239 (bridge->subordinate->number <= 16240 tp->pdev->bus->number) && 16241 (bridge->subordinate->busn_res.end >= 16242 tp->pdev->bus->number)) { 16243 tg3_flag_set(tp, 5701_DMA_BUG); 16244 pci_dev_put(bridge); 16245 break; 16246 } 16247 } 16248 } 16249 16250 /* The EPB bridge inside 5714, 5715, and 5780 cannot support 16251 * DMA addresses > 40-bit. This bridge may have other additional 16252 * 57xx devices behind it in some 4-port NIC designs for example. 16253 * Any tg3 device found behind the bridge will also need the 40-bit 16254 * DMA workaround. 16255 */ 16256 if (tg3_flag(tp, 5780_CLASS)) { 16257 tg3_flag_set(tp, 40BIT_DMA_BUG); 16258 tp->msi_cap = tp->pdev->msi_cap; 16259 } else { 16260 struct pci_dev *bridge = NULL; 16261 16262 do { 16263 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, 16264 PCI_DEVICE_ID_SERVERWORKS_EPB, 16265 bridge); 16266 if (bridge && bridge->subordinate && 16267 (bridge->subordinate->number <= 16268 tp->pdev->bus->number) && 16269 (bridge->subordinate->busn_res.end >= 16270 tp->pdev->bus->number)) { 16271 tg3_flag_set(tp, 40BIT_DMA_BUG); 16272 pci_dev_put(bridge); 16273 break; 16274 } 16275 } while (bridge); 16276 } 16277 16278 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16279 tg3_asic_rev(tp) == ASIC_REV_5714) 16280 tp->pdev_peer = tg3_find_peer(tp); 16281 16282 /* Determine TSO capabilities */ 16283 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0) 16284 ; /* Do nothing. HW bug. */ 16285 else if (tg3_flag(tp, 57765_PLUS)) 16286 tg3_flag_set(tp, HW_TSO_3); 16287 else if (tg3_flag(tp, 5755_PLUS) || 16288 tg3_asic_rev(tp) == ASIC_REV_5906) 16289 tg3_flag_set(tp, HW_TSO_2); 16290 else if (tg3_flag(tp, 5750_PLUS)) { 16291 tg3_flag_set(tp, HW_TSO_1); 16292 tg3_flag_set(tp, TSO_BUG); 16293 if (tg3_asic_rev(tp) == ASIC_REV_5750 && 16294 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2) 16295 tg3_flag_clear(tp, TSO_BUG); 16296 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 && 16297 tg3_asic_rev(tp) != ASIC_REV_5701 && 16298 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 16299 tg3_flag_set(tp, FW_TSO); 16300 tg3_flag_set(tp, TSO_BUG); 16301 if (tg3_asic_rev(tp) == ASIC_REV_5705) 16302 tp->fw_needed = FIRMWARE_TG3TSO5; 16303 else 16304 tp->fw_needed = FIRMWARE_TG3TSO; 16305 } 16306 16307 /* Selectively allow TSO based on operating conditions */ 16308 if (tg3_flag(tp, HW_TSO_1) || 16309 tg3_flag(tp, HW_TSO_2) || 16310 tg3_flag(tp, HW_TSO_3) || 16311 tg3_flag(tp, FW_TSO)) { 16312 /* For firmware TSO, assume ASF is disabled. 16313 * We'll disable TSO later if we discover ASF 16314 * is enabled in tg3_get_eeprom_hw_cfg(). 16315 */ 16316 tg3_flag_set(tp, TSO_CAPABLE); 16317 } else { 16318 tg3_flag_clear(tp, TSO_CAPABLE); 16319 tg3_flag_clear(tp, TSO_BUG); 16320 tp->fw_needed = NULL; 16321 } 16322 16323 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) 16324 tp->fw_needed = FIRMWARE_TG3; 16325 16326 if (tg3_asic_rev(tp) == ASIC_REV_57766) 16327 tp->fw_needed = FIRMWARE_TG357766; 16328 16329 tp->irq_max = 1; 16330 16331 if (tg3_flag(tp, 5750_PLUS)) { 16332 tg3_flag_set(tp, SUPPORT_MSI); 16333 if (tg3_chip_rev(tp) == CHIPREV_5750_AX || 16334 tg3_chip_rev(tp) == CHIPREV_5750_BX || 16335 (tg3_asic_rev(tp) == ASIC_REV_5714 && 16336 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 && 16337 tp->pdev_peer == tp->pdev)) 16338 tg3_flag_clear(tp, SUPPORT_MSI); 16339 16340 if (tg3_flag(tp, 5755_PLUS) || 16341 tg3_asic_rev(tp) == ASIC_REV_5906) { 16342 tg3_flag_set(tp, 1SHOT_MSI); 16343 } 16344 16345 if (tg3_flag(tp, 57765_PLUS)) { 16346 tg3_flag_set(tp, SUPPORT_MSIX); 16347 tp->irq_max = TG3_IRQ_MAX_VECS; 16348 } 16349 } 16350 16351 tp->txq_max = 1; 16352 tp->rxq_max = 1; 16353 if (tp->irq_max > 1) { 16354 tp->rxq_max = TG3_RSS_MAX_NUM_QS; 16355 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS); 16356 16357 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 16358 tg3_asic_rev(tp) == ASIC_REV_5720) 16359 tp->txq_max = tp->irq_max - 1; 16360 } 16361 16362 if (tg3_flag(tp, 5755_PLUS) || 16363 tg3_asic_rev(tp) == ASIC_REV_5906) 16364 tg3_flag_set(tp, SHORT_DMA_BUG); 16365 16366 if (tg3_asic_rev(tp) == ASIC_REV_5719) 16367 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; 16368 16369 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16370 tg3_asic_rev(tp) == ASIC_REV_5719 || 16371 tg3_asic_rev(tp) == ASIC_REV_5720 || 16372 tg3_asic_rev(tp) == ASIC_REV_5762) 16373 tg3_flag_set(tp, LRG_PROD_RING_CAP); 16374 16375 if (tg3_flag(tp, 57765_PLUS) && 16376 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0) 16377 tg3_flag_set(tp, USE_JUMBO_BDFLAG); 16378 16379 if (!tg3_flag(tp, 5705_PLUS) || 16380 tg3_flag(tp, 5780_CLASS) || 16381 tg3_flag(tp, USE_JUMBO_BDFLAG)) 16382 tg3_flag_set(tp, JUMBO_CAPABLE); 16383 16384 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16385 &pci_state_reg); 16386 16387 if (pci_is_pcie(tp->pdev)) { 16388 u16 lnkctl; 16389 16390 tg3_flag_set(tp, PCI_EXPRESS); 16391 16392 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl); 16393 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { 16394 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16395 tg3_flag_clear(tp, HW_TSO_2); 16396 tg3_flag_clear(tp, TSO_CAPABLE); 16397 } 16398 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 16399 tg3_asic_rev(tp) == ASIC_REV_5761 || 16400 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 || 16401 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1) 16402 tg3_flag_set(tp, CLKREQ_BUG); 16403 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) { 16404 tg3_flag_set(tp, L1PLLPD_EN); 16405 } 16406 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) { 16407 /* BCM5785 devices are effectively PCIe devices, and should 16408 * follow PCIe codepaths, but do not have a PCIe capabilities 16409 * section. 16410 */ 16411 tg3_flag_set(tp, PCI_EXPRESS); 16412 } else if (!tg3_flag(tp, 5705_PLUS) || 16413 tg3_flag(tp, 5780_CLASS)) { 16414 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); 16415 if (!tp->pcix_cap) { 16416 dev_err(&tp->pdev->dev, 16417 "Cannot find PCI-X capability, aborting\n"); 16418 return -EIO; 16419 } 16420 16421 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) 16422 tg3_flag_set(tp, PCIX_MODE); 16423 } 16424 16425 /* If we have an AMD 762 or VIA K8T800 chipset, write 16426 * reordering to the mailbox registers done by the host 16427 * controller can cause major troubles. We read back from 16428 * every mailbox register write to force the writes to be 16429 * posted to the chip in order. 16430 */ 16431 if (pci_dev_present(tg3_write_reorder_chipsets) && 16432 !tg3_flag(tp, PCI_EXPRESS)) 16433 tg3_flag_set(tp, MBOX_WRITE_REORDER); 16434 16435 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 16436 &tp->pci_cacheline_sz); 16437 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16438 &tp->pci_lat_timer); 16439 if (tg3_asic_rev(tp) == ASIC_REV_5703 && 16440 tp->pci_lat_timer < 64) { 16441 tp->pci_lat_timer = 64; 16442 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16443 tp->pci_lat_timer); 16444 } 16445 16446 /* Important! -- It is critical that the PCI-X hw workaround 16447 * situation is decided before the first MMIO register access. 16448 */ 16449 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) { 16450 /* 5700 BX chips need to have their TX producer index 16451 * mailboxes written twice to workaround a bug. 16452 */ 16453 tg3_flag_set(tp, TXD_MBOX_HWBUG); 16454 16455 /* If we are in PCI-X mode, enable register write workaround. 16456 * 16457 * The workaround is to use indirect register accesses 16458 * for all chip writes not to mailbox registers. 16459 */ 16460 if (tg3_flag(tp, PCIX_MODE)) { 16461 u32 pm_reg; 16462 16463 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16464 16465 /* The chip can have it's power management PCI config 16466 * space registers clobbered due to this bug. 16467 * So explicitly force the chip into D0 here. 16468 */ 16469 pci_read_config_dword(tp->pdev, 16470 tp->pdev->pm_cap + PCI_PM_CTRL, 16471 &pm_reg); 16472 pm_reg &= ~PCI_PM_CTRL_STATE_MASK; 16473 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; 16474 pci_write_config_dword(tp->pdev, 16475 tp->pdev->pm_cap + PCI_PM_CTRL, 16476 pm_reg); 16477 16478 /* Also, force SERR#/PERR# in PCI command. */ 16479 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16480 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; 16481 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16482 } 16483 } 16484 16485 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) 16486 tg3_flag_set(tp, PCI_HIGH_SPEED); 16487 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) 16488 tg3_flag_set(tp, PCI_32BIT); 16489 16490 /* Chip-specific fixup from Broadcom driver */ 16491 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) && 16492 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { 16493 pci_state_reg |= PCISTATE_RETRY_SAME_DMA; 16494 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); 16495 } 16496 16497 /* Default fast path register access methods */ 16498 tp->read32 = tg3_read32; 16499 tp->write32 = tg3_write32; 16500 tp->read32_mbox = tg3_read32; 16501 tp->write32_mbox = tg3_write32; 16502 tp->write32_tx_mbox = tg3_write32; 16503 tp->write32_rx_mbox = tg3_write32; 16504 16505 /* Various workaround register access methods */ 16506 if (tg3_flag(tp, PCIX_TARGET_HWBUG)) 16507 tp->write32 = tg3_write_indirect_reg32; 16508 else if (tg3_asic_rev(tp) == ASIC_REV_5701 || 16509 (tg3_flag(tp, PCI_EXPRESS) && 16510 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) { 16511 /* 16512 * Back to back register writes can cause problems on these 16513 * chips, the workaround is to read back all reg writes 16514 * except those to mailbox regs. 16515 * 16516 * See tg3_write_indirect_reg32(). 16517 */ 16518 tp->write32 = tg3_write_flush_reg32; 16519 } 16520 16521 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { 16522 tp->write32_tx_mbox = tg3_write32_tx_mbox; 16523 if (tg3_flag(tp, MBOX_WRITE_REORDER)) 16524 tp->write32_rx_mbox = tg3_write_flush_reg32; 16525 } 16526 16527 if (tg3_flag(tp, ICH_WORKAROUND)) { 16528 tp->read32 = tg3_read_indirect_reg32; 16529 tp->write32 = tg3_write_indirect_reg32; 16530 tp->read32_mbox = tg3_read_indirect_mbox; 16531 tp->write32_mbox = tg3_write_indirect_mbox; 16532 tp->write32_tx_mbox = tg3_write_indirect_mbox; 16533 tp->write32_rx_mbox = tg3_write_indirect_mbox; 16534 16535 iounmap(tp->regs); 16536 tp->regs = NULL; 16537 16538 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16539 pci_cmd &= ~PCI_COMMAND_MEMORY; 16540 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16541 } 16542 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16543 tp->read32_mbox = tg3_read32_mbox_5906; 16544 tp->write32_mbox = tg3_write32_mbox_5906; 16545 tp->write32_tx_mbox = tg3_write32_mbox_5906; 16546 tp->write32_rx_mbox = tg3_write32_mbox_5906; 16547 } 16548 16549 if (tp->write32 == tg3_write_indirect_reg32 || 16550 (tg3_flag(tp, PCIX_MODE) && 16551 (tg3_asic_rev(tp) == ASIC_REV_5700 || 16552 tg3_asic_rev(tp) == ASIC_REV_5701))) 16553 tg3_flag_set(tp, SRAM_USE_CONFIG); 16554 16555 /* The memory arbiter has to be enabled in order for SRAM accesses 16556 * to succeed. Normally on powerup the tg3 chip firmware will make 16557 * sure it is enabled, but other entities such as system netboot 16558 * code might disable it. 16559 */ 16560 val = tr32(MEMARB_MODE); 16561 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 16562 16563 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; 16564 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16565 tg3_flag(tp, 5780_CLASS)) { 16566 if (tg3_flag(tp, PCIX_MODE)) { 16567 pci_read_config_dword(tp->pdev, 16568 tp->pcix_cap + PCI_X_STATUS, 16569 &val); 16570 tp->pci_fn = val & 0x7; 16571 } 16572 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16573 tg3_asic_rev(tp) == ASIC_REV_5719 || 16574 tg3_asic_rev(tp) == ASIC_REV_5720) { 16575 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); 16576 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG) 16577 val = tr32(TG3_CPMU_STATUS); 16578 16579 if (tg3_asic_rev(tp) == ASIC_REV_5717) 16580 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0; 16581 else 16582 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >> 16583 TG3_CPMU_STATUS_FSHFT_5719; 16584 } 16585 16586 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 16587 tp->write32_tx_mbox = tg3_write_flush_reg32; 16588 tp->write32_rx_mbox = tg3_write_flush_reg32; 16589 } 16590 16591 /* Get eeprom hw config before calling tg3_set_power_state(). 16592 * In particular, the TG3_FLAG_IS_NIC flag must be 16593 * determined before calling tg3_set_power_state() so that 16594 * we know whether or not to switch out of Vaux power. 16595 * When the flag is set, it means that GPIO1 is used for eeprom 16596 * write protect and also implies that it is a LOM where GPIOs 16597 * are not used to switch power. 16598 */ 16599 tg3_get_eeprom_hw_cfg(tp); 16600 16601 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) { 16602 tg3_flag_clear(tp, TSO_CAPABLE); 16603 tg3_flag_clear(tp, TSO_BUG); 16604 tp->fw_needed = NULL; 16605 } 16606 16607 if (tg3_flag(tp, ENABLE_APE)) { 16608 /* Allow reads and writes to the 16609 * APE register and memory space. 16610 */ 16611 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | 16612 PCISTATE_ALLOW_APE_SHMEM_WR | 16613 PCISTATE_ALLOW_APE_PSPACE_WR; 16614 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, 16615 pci_state_reg); 16616 16617 tg3_ape_lock_init(tp); 16618 tp->ape_hb_interval = 16619 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC); 16620 } 16621 16622 /* Set up tp->grc_local_ctrl before calling 16623 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high 16624 * will bring 5700's external PHY out of reset. 16625 * It is also used as eeprom write protect on LOMs. 16626 */ 16627 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; 16628 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16629 tg3_flag(tp, EEPROM_WRITE_PROT)) 16630 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 16631 GRC_LCLCTRL_GPIO_OUTPUT1); 16632 /* Unused GPIO3 must be driven as output on 5752 because there 16633 * are no pull-up resistors on unused GPIO pins. 16634 */ 16635 else if (tg3_asic_rev(tp) == ASIC_REV_5752) 16636 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 16637 16638 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16639 tg3_asic_rev(tp) == ASIC_REV_57780 || 16640 tg3_flag(tp, 57765_CLASS)) 16641 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16642 16643 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 16644 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 16645 /* Turn off the debug UART. */ 16646 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16647 if (tg3_flag(tp, IS_NIC)) 16648 /* Keep VMain power. */ 16649 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 16650 GRC_LCLCTRL_GPIO_OUTPUT0; 16651 } 16652 16653 if (tg3_asic_rev(tp) == ASIC_REV_5762) 16654 tp->grc_local_ctrl |= 16655 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL; 16656 16657 /* Switch out of Vaux if it is a NIC */ 16658 tg3_pwrsrc_switch_to_vmain(tp); 16659 16660 /* Derive initial jumbo mode from MTU assigned in 16661 * ether_setup() via the alloc_etherdev() call 16662 */ 16663 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) 16664 tg3_flag_set(tp, JUMBO_RING_ENABLE); 16665 16666 /* Determine WakeOnLan speed to use. */ 16667 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16668 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16669 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16670 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) { 16671 tg3_flag_clear(tp, WOL_SPEED_100MB); 16672 } else { 16673 tg3_flag_set(tp, WOL_SPEED_100MB); 16674 } 16675 16676 if (tg3_asic_rev(tp) == ASIC_REV_5906) 16677 tp->phy_flags |= TG3_PHYFLG_IS_FET; 16678 16679 /* A few boards don't want Ethernet@WireSpeed phy feature */ 16680 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16681 (tg3_asic_rev(tp) == ASIC_REV_5705 && 16682 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) && 16683 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) || 16684 (tp->phy_flags & TG3_PHYFLG_IS_FET) || 16685 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 16686 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; 16687 16688 if (tg3_chip_rev(tp) == CHIPREV_5703_AX || 16689 tg3_chip_rev(tp) == CHIPREV_5704_AX) 16690 tp->phy_flags |= TG3_PHYFLG_ADC_BUG; 16691 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) 16692 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; 16693 16694 if (tg3_flag(tp, 5705_PLUS) && 16695 !(tp->phy_flags & TG3_PHYFLG_IS_FET) && 16696 tg3_asic_rev(tp) != ASIC_REV_5785 && 16697 tg3_asic_rev(tp) != ASIC_REV_57780 && 16698 !tg3_flag(tp, 57765_PLUS)) { 16699 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16700 tg3_asic_rev(tp) == ASIC_REV_5787 || 16701 tg3_asic_rev(tp) == ASIC_REV_5784 || 16702 tg3_asic_rev(tp) == ASIC_REV_5761) { 16703 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && 16704 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) 16705 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; 16706 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) 16707 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; 16708 } else 16709 tp->phy_flags |= TG3_PHYFLG_BER_BUG; 16710 } 16711 16712 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 16713 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 16714 tp->phy_otp = tg3_read_otp_phycfg(tp); 16715 if (tp->phy_otp == 0) 16716 tp->phy_otp = TG3_OTP_DEFAULT; 16717 } 16718 16719 if (tg3_flag(tp, CPMU_PRESENT)) 16720 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; 16721 else 16722 tp->mi_mode = MAC_MI_MODE_BASE; 16723 16724 tp->coalesce_mode = 0; 16725 if (tg3_chip_rev(tp) != CHIPREV_5700_AX && 16726 tg3_chip_rev(tp) != CHIPREV_5700_BX) 16727 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 16728 16729 /* Set these bits to enable statistics workaround. */ 16730 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16731 tg3_asic_rev(tp) == ASIC_REV_5762 || 16732 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 16733 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) { 16734 tp->coalesce_mode |= HOSTCC_MODE_ATTN; 16735 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; 16736 } 16737 16738 if (tg3_asic_rev(tp) == ASIC_REV_5785 || 16739 tg3_asic_rev(tp) == ASIC_REV_57780) 16740 tg3_flag_set(tp, USE_PHYLIB); 16741 16742 err = tg3_mdio_init(tp); 16743 if (err) 16744 return err; 16745 16746 /* Initialize data/descriptor byte/word swapping. */ 16747 val = tr32(GRC_MODE); 16748 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 16749 tg3_asic_rev(tp) == ASIC_REV_5762) 16750 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | 16751 GRC_MODE_WORD_SWAP_B2HRX_DATA | 16752 GRC_MODE_B2HRX_ENABLE | 16753 GRC_MODE_HTX2B_ENABLE | 16754 GRC_MODE_HOST_STACKUP); 16755 else 16756 val &= GRC_MODE_HOST_STACKUP; 16757 16758 tw32(GRC_MODE, val | tp->grc_mode); 16759 16760 tg3_switch_clocks(tp); 16761 16762 /* Clear this out for sanity. */ 16763 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 16764 16765 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ 16766 tw32(TG3PCI_REG_BASE_ADDR, 0); 16767 16768 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16769 &pci_state_reg); 16770 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && 16771 !tg3_flag(tp, PCIX_TARGET_HWBUG)) { 16772 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16773 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16774 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 || 16775 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) { 16776 void __iomem *sram_base; 16777 16778 /* Write some dummy words into the SRAM status block 16779 * area, see if it reads back correctly. If the return 16780 * value is bad, force enable the PCIX workaround. 16781 */ 16782 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; 16783 16784 writel(0x00000000, sram_base); 16785 writel(0x00000000, sram_base + 4); 16786 writel(0xffffffff, sram_base + 4); 16787 if (readl(sram_base) != 0x00000000) 16788 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16789 } 16790 } 16791 16792 udelay(50); 16793 tg3_nvram_init(tp); 16794 16795 /* If the device has an NVRAM, no need to load patch firmware */ 16796 if (tg3_asic_rev(tp) == ASIC_REV_57766 && 16797 !tg3_flag(tp, NO_NVRAM)) 16798 tp->fw_needed = NULL; 16799 16800 grc_misc_cfg = tr32(GRC_MISC_CFG); 16801 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; 16802 16803 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 16804 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || 16805 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 16806 tg3_flag_set(tp, IS_5788); 16807 16808 if (!tg3_flag(tp, IS_5788) && 16809 tg3_asic_rev(tp) != ASIC_REV_5700) 16810 tg3_flag_set(tp, TAGGED_STATUS); 16811 if (tg3_flag(tp, TAGGED_STATUS)) { 16812 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | 16813 HOSTCC_MODE_CLRTICK_TXBD); 16814 16815 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; 16816 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16817 tp->misc_host_ctrl); 16818 } 16819 16820 /* Preserve the APE MAC_MODE bits */ 16821 if (tg3_flag(tp, ENABLE_APE)) 16822 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 16823 else 16824 tp->mac_mode = 0; 16825 16826 if (tg3_10_100_only_device(tp, ent)) 16827 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; 16828 16829 err = tg3_phy_probe(tp); 16830 if (err) { 16831 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); 16832 /* ... but do not return immediately ... */ 16833 tg3_mdio_fini(tp); 16834 } 16835 16836 tg3_read_vpd(tp); 16837 tg3_read_fw_ver(tp); 16838 16839 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 16840 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16841 } else { 16842 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16843 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16844 else 16845 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16846 } 16847 16848 /* 5700 {AX,BX} chips have a broken status block link 16849 * change bit implementation, so we must use the 16850 * status register in those cases. 16851 */ 16852 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16853 tg3_flag_set(tp, USE_LINKCHG_REG); 16854 else 16855 tg3_flag_clear(tp, USE_LINKCHG_REG); 16856 16857 /* The led_ctrl is set during tg3_phy_probe, here we might 16858 * have to force the link status polling mechanism based 16859 * upon subsystem IDs. 16860 */ 16861 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 16862 tg3_asic_rev(tp) == ASIC_REV_5701 && 16863 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 16864 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16865 tg3_flag_set(tp, USE_LINKCHG_REG); 16866 } 16867 16868 /* For all SERDES we poll the MAC status register. */ 16869 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 16870 tg3_flag_set(tp, POLL_SERDES); 16871 else 16872 tg3_flag_clear(tp, POLL_SERDES); 16873 16874 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF)) 16875 tg3_flag_set(tp, POLL_CPMU_LINK); 16876 16877 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN; 16878 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; 16879 if (tg3_asic_rev(tp) == ASIC_REV_5701 && 16880 tg3_flag(tp, PCIX_MODE)) { 16881 tp->rx_offset = NET_SKB_PAD; 16882 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 16883 tp->rx_copy_thresh = ~(u16)0; 16884 #endif 16885 } 16886 16887 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; 16888 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; 16889 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; 16890 16891 tp->rx_std_max_post = tp->rx_std_ring_mask + 1; 16892 16893 /* Increment the rx prod index on the rx std ring by at most 16894 * 8 for these chips to workaround hw errata. 16895 */ 16896 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16897 tg3_asic_rev(tp) == ASIC_REV_5752 || 16898 tg3_asic_rev(tp) == ASIC_REV_5755) 16899 tp->rx_std_max_post = 8; 16900 16901 if (tg3_flag(tp, ASPM_WORKAROUND)) 16902 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & 16903 PCIE_PWR_MGMT_L1_THRESH_MSK; 16904 16905 return err; 16906 } 16907 16908 static int tg3_get_device_address(struct tg3 *tp, u8 *addr) 16909 { 16910 u32 hi, lo, mac_offset; 16911 int addr_ok = 0; 16912 int err; 16913 16914 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr)) 16915 return 0; 16916 16917 if (tg3_flag(tp, IS_SSB_CORE)) { 16918 err = ssb_gige_get_macaddr(tp->pdev, addr); 16919 if (!err && is_valid_ether_addr(addr)) 16920 return 0; 16921 } 16922 16923 mac_offset = 0x7c; 16924 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16925 tg3_flag(tp, 5780_CLASS)) { 16926 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 16927 mac_offset = 0xcc; 16928 if (tg3_nvram_lock(tp)) 16929 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); 16930 else 16931 tg3_nvram_unlock(tp); 16932 } else if (tg3_flag(tp, 5717_PLUS)) { 16933 if (tp->pci_fn & 1) 16934 mac_offset = 0xcc; 16935 if (tp->pci_fn > 1) 16936 mac_offset += 0x18c; 16937 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) 16938 mac_offset = 0x10; 16939 16940 /* First try to get it from MAC address mailbox. */ 16941 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); 16942 if ((hi >> 16) == 0x484b) { 16943 addr[0] = (hi >> 8) & 0xff; 16944 addr[1] = (hi >> 0) & 0xff; 16945 16946 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); 16947 addr[2] = (lo >> 24) & 0xff; 16948 addr[3] = (lo >> 16) & 0xff; 16949 addr[4] = (lo >> 8) & 0xff; 16950 addr[5] = (lo >> 0) & 0xff; 16951 16952 /* Some old bootcode may report a 0 MAC address in SRAM */ 16953 addr_ok = is_valid_ether_addr(addr); 16954 } 16955 if (!addr_ok) { 16956 /* Next, try NVRAM. */ 16957 if (!tg3_flag(tp, NO_NVRAM) && 16958 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && 16959 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { 16960 memcpy(&addr[0], ((char *)&hi) + 2, 2); 16961 memcpy(&addr[2], (char *)&lo, sizeof(lo)); 16962 } 16963 /* Finally just fetch it out of the MAC control regs. */ 16964 else { 16965 hi = tr32(MAC_ADDR_0_HIGH); 16966 lo = tr32(MAC_ADDR_0_LOW); 16967 16968 addr[5] = lo & 0xff; 16969 addr[4] = (lo >> 8) & 0xff; 16970 addr[3] = (lo >> 16) & 0xff; 16971 addr[2] = (lo >> 24) & 0xff; 16972 addr[1] = hi & 0xff; 16973 addr[0] = (hi >> 8) & 0xff; 16974 } 16975 } 16976 16977 if (!is_valid_ether_addr(addr)) 16978 return -EINVAL; 16979 return 0; 16980 } 16981 16982 #define BOUNDARY_SINGLE_CACHELINE 1 16983 #define BOUNDARY_MULTI_CACHELINE 2 16984 16985 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) 16986 { 16987 int cacheline_size; 16988 u8 byte; 16989 int goal; 16990 16991 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); 16992 if (byte == 0) 16993 cacheline_size = 1024; 16994 else 16995 cacheline_size = (int) byte * 4; 16996 16997 /* On 5703 and later chips, the boundary bits have no 16998 * effect. 16999 */ 17000 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17001 tg3_asic_rev(tp) != ASIC_REV_5701 && 17002 !tg3_flag(tp, PCI_EXPRESS)) 17003 goto out; 17004 17005 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) 17006 goal = BOUNDARY_MULTI_CACHELINE; 17007 #else 17008 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) 17009 goal = BOUNDARY_SINGLE_CACHELINE; 17010 #else 17011 goal = 0; 17012 #endif 17013 #endif 17014 17015 if (tg3_flag(tp, 57765_PLUS)) { 17016 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 17017 goto out; 17018 } 17019 17020 if (!goal) 17021 goto out; 17022 17023 /* PCI controllers on most RISC systems tend to disconnect 17024 * when a device tries to burst across a cache-line boundary. 17025 * Therefore, letting tg3 do so just wastes PCI bandwidth. 17026 * 17027 * Unfortunately, for PCI-E there are only limited 17028 * write-side controls for this, and thus for reads 17029 * we will still get the disconnects. We'll also waste 17030 * these PCI cycles for both read and write for chips 17031 * other than 5700 and 5701 which do not implement the 17032 * boundary bits. 17033 */ 17034 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { 17035 switch (cacheline_size) { 17036 case 16: 17037 case 32: 17038 case 64: 17039 case 128: 17040 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17041 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | 17042 DMA_RWCTRL_WRITE_BNDRY_128_PCIX); 17043 } else { 17044 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17045 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17046 } 17047 break; 17048 17049 case 256: 17050 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | 17051 DMA_RWCTRL_WRITE_BNDRY_256_PCIX); 17052 break; 17053 17054 default: 17055 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17056 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17057 break; 17058 } 17059 } else if (tg3_flag(tp, PCI_EXPRESS)) { 17060 switch (cacheline_size) { 17061 case 16: 17062 case 32: 17063 case 64: 17064 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17065 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17066 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; 17067 break; 17068 } 17069 fallthrough; 17070 case 128: 17071 default: 17072 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17073 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; 17074 break; 17075 } 17076 } else { 17077 switch (cacheline_size) { 17078 case 16: 17079 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17080 val |= (DMA_RWCTRL_READ_BNDRY_16 | 17081 DMA_RWCTRL_WRITE_BNDRY_16); 17082 break; 17083 } 17084 fallthrough; 17085 case 32: 17086 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17087 val |= (DMA_RWCTRL_READ_BNDRY_32 | 17088 DMA_RWCTRL_WRITE_BNDRY_32); 17089 break; 17090 } 17091 fallthrough; 17092 case 64: 17093 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17094 val |= (DMA_RWCTRL_READ_BNDRY_64 | 17095 DMA_RWCTRL_WRITE_BNDRY_64); 17096 break; 17097 } 17098 fallthrough; 17099 case 128: 17100 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17101 val |= (DMA_RWCTRL_READ_BNDRY_128 | 17102 DMA_RWCTRL_WRITE_BNDRY_128); 17103 break; 17104 } 17105 fallthrough; 17106 case 256: 17107 val |= (DMA_RWCTRL_READ_BNDRY_256 | 17108 DMA_RWCTRL_WRITE_BNDRY_256); 17109 break; 17110 case 512: 17111 val |= (DMA_RWCTRL_READ_BNDRY_512 | 17112 DMA_RWCTRL_WRITE_BNDRY_512); 17113 break; 17114 case 1024: 17115 default: 17116 val |= (DMA_RWCTRL_READ_BNDRY_1024 | 17117 DMA_RWCTRL_WRITE_BNDRY_1024); 17118 break; 17119 } 17120 } 17121 17122 out: 17123 return val; 17124 } 17125 17126 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, 17127 int size, bool to_device) 17128 { 17129 struct tg3_internal_buffer_desc test_desc; 17130 u32 sram_dma_descs; 17131 int i, ret; 17132 17133 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; 17134 17135 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); 17136 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); 17137 tw32(RDMAC_STATUS, 0); 17138 tw32(WDMAC_STATUS, 0); 17139 17140 tw32(BUFMGR_MODE, 0); 17141 tw32(FTQ_RESET, 0); 17142 17143 test_desc.addr_hi = ((u64) buf_dma) >> 32; 17144 test_desc.addr_lo = buf_dma & 0xffffffff; 17145 test_desc.nic_mbuf = 0x00002100; 17146 test_desc.len = size; 17147 17148 /* 17149 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz 17150 * the *second* time the tg3 driver was getting loaded after an 17151 * initial scan. 17152 * 17153 * Broadcom tells me: 17154 * ...the DMA engine is connected to the GRC block and a DMA 17155 * reset may affect the GRC block in some unpredictable way... 17156 * The behavior of resets to individual blocks has not been tested. 17157 * 17158 * Broadcom noted the GRC reset will also reset all sub-components. 17159 */ 17160 if (to_device) { 17161 test_desc.cqid_sqid = (13 << 8) | 2; 17162 17163 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); 17164 udelay(40); 17165 } else { 17166 test_desc.cqid_sqid = (16 << 8) | 7; 17167 17168 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); 17169 udelay(40); 17170 } 17171 test_desc.flags = 0x00000005; 17172 17173 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { 17174 u32 val; 17175 17176 val = *(((u32 *)&test_desc) + i); 17177 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 17178 sram_dma_descs + (i * sizeof(u32))); 17179 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 17180 } 17181 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 17182 17183 if (to_device) 17184 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); 17185 else 17186 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); 17187 17188 ret = -ENODEV; 17189 for (i = 0; i < 40; i++) { 17190 u32 val; 17191 17192 if (to_device) 17193 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); 17194 else 17195 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); 17196 if ((val & 0xffff) == sram_dma_descs) { 17197 ret = 0; 17198 break; 17199 } 17200 17201 udelay(100); 17202 } 17203 17204 return ret; 17205 } 17206 17207 #define TEST_BUFFER_SIZE 0x2000 17208 17209 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = { 17210 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, 17211 { }, 17212 }; 17213 17214 static int tg3_test_dma(struct tg3 *tp) 17215 { 17216 dma_addr_t buf_dma; 17217 u32 *buf, saved_dma_rwctrl; 17218 int ret = 0; 17219 17220 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, 17221 &buf_dma, GFP_KERNEL); 17222 if (!buf) { 17223 ret = -ENOMEM; 17224 goto out_nofree; 17225 } 17226 17227 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 17228 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 17229 17230 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 17231 17232 if (tg3_flag(tp, 57765_PLUS)) 17233 goto out; 17234 17235 if (tg3_flag(tp, PCI_EXPRESS)) { 17236 /* DMA read watermark not used on PCIE */ 17237 tp->dma_rwctrl |= 0x00180000; 17238 } else if (!tg3_flag(tp, PCIX_MODE)) { 17239 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 17240 tg3_asic_rev(tp) == ASIC_REV_5750) 17241 tp->dma_rwctrl |= 0x003f0000; 17242 else 17243 tp->dma_rwctrl |= 0x003f000f; 17244 } else { 17245 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17246 tg3_asic_rev(tp) == ASIC_REV_5704) { 17247 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); 17248 u32 read_water = 0x7; 17249 17250 /* If the 5704 is behind the EPB bridge, we can 17251 * do the less restrictive ONE_DMA workaround for 17252 * better performance. 17253 */ 17254 if (tg3_flag(tp, 40BIT_DMA_BUG) && 17255 tg3_asic_rev(tp) == ASIC_REV_5704) 17256 tp->dma_rwctrl |= 0x8000; 17257 else if (ccval == 0x6 || ccval == 0x7) 17258 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17259 17260 if (tg3_asic_rev(tp) == ASIC_REV_5703) 17261 read_water = 4; 17262 /* Set bit 23 to enable PCIX hw bug fix */ 17263 tp->dma_rwctrl |= 17264 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | 17265 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | 17266 (1 << 23); 17267 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) { 17268 /* 5780 always in PCIX mode */ 17269 tp->dma_rwctrl |= 0x00144000; 17270 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) { 17271 /* 5714 always in PCIX mode */ 17272 tp->dma_rwctrl |= 0x00148000; 17273 } else { 17274 tp->dma_rwctrl |= 0x001b000f; 17275 } 17276 } 17277 if (tg3_flag(tp, ONE_DMA_AT_ONCE)) 17278 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17279 17280 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17281 tg3_asic_rev(tp) == ASIC_REV_5704) 17282 tp->dma_rwctrl &= 0xfffffff0; 17283 17284 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 17285 tg3_asic_rev(tp) == ASIC_REV_5701) { 17286 /* Remove this if it causes problems for some boards. */ 17287 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; 17288 17289 /* On 5700/5701 chips, we need to set this bit. 17290 * Otherwise the chip will issue cacheline transactions 17291 * to streamable DMA memory with not all the byte 17292 * enables turned on. This is an error on several 17293 * RISC PCI controllers, in particular sparc64. 17294 * 17295 * On 5703/5704 chips, this bit has been reassigned 17296 * a different meaning. In particular, it is used 17297 * on those chips to enable a PCI-X workaround. 17298 */ 17299 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; 17300 } 17301 17302 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17303 17304 17305 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17306 tg3_asic_rev(tp) != ASIC_REV_5701) 17307 goto out; 17308 17309 /* It is best to perform DMA test with maximum write burst size 17310 * to expose the 5700/5701 write DMA bug. 17311 */ 17312 saved_dma_rwctrl = tp->dma_rwctrl; 17313 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17314 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17315 17316 while (1) { 17317 u32 *p = buf, i; 17318 17319 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) 17320 p[i] = i; 17321 17322 /* Send the buffer to the chip. */ 17323 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true); 17324 if (ret) { 17325 dev_err(&tp->pdev->dev, 17326 "%s: Buffer write failed. err = %d\n", 17327 __func__, ret); 17328 break; 17329 } 17330 17331 /* Now read it back. */ 17332 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false); 17333 if (ret) { 17334 dev_err(&tp->pdev->dev, "%s: Buffer read failed. " 17335 "err = %d\n", __func__, ret); 17336 break; 17337 } 17338 17339 /* Verify it. */ 17340 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { 17341 if (p[i] == i) 17342 continue; 17343 17344 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17345 DMA_RWCTRL_WRITE_BNDRY_16) { 17346 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17347 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17348 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17349 break; 17350 } else { 17351 dev_err(&tp->pdev->dev, 17352 "%s: Buffer corrupted on read back! " 17353 "(%d != %d)\n", __func__, p[i], i); 17354 ret = -ENODEV; 17355 goto out; 17356 } 17357 } 17358 17359 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { 17360 /* Success. */ 17361 ret = 0; 17362 break; 17363 } 17364 } 17365 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17366 DMA_RWCTRL_WRITE_BNDRY_16) { 17367 /* DMA test passed without adjusting DMA boundary, 17368 * now look for chipsets that are known to expose the 17369 * DMA bug without failing the test. 17370 */ 17371 if (pci_dev_present(tg3_dma_wait_state_chipsets)) { 17372 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17373 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17374 } else { 17375 /* Safe to use the calculated DMA boundary. */ 17376 tp->dma_rwctrl = saved_dma_rwctrl; 17377 } 17378 17379 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17380 } 17381 17382 out: 17383 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); 17384 out_nofree: 17385 return ret; 17386 } 17387 17388 static void tg3_init_bufmgr_config(struct tg3 *tp) 17389 { 17390 if (tg3_flag(tp, 57765_PLUS)) { 17391 tp->bufmgr_config.mbuf_read_dma_low_water = 17392 DEFAULT_MB_RDMA_LOW_WATER_5705; 17393 tp->bufmgr_config.mbuf_mac_rx_low_water = 17394 DEFAULT_MB_MACRX_LOW_WATER_57765; 17395 tp->bufmgr_config.mbuf_high_water = 17396 DEFAULT_MB_HIGH_WATER_57765; 17397 17398 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17399 DEFAULT_MB_RDMA_LOW_WATER_5705; 17400 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17401 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; 17402 tp->bufmgr_config.mbuf_high_water_jumbo = 17403 DEFAULT_MB_HIGH_WATER_JUMBO_57765; 17404 } else if (tg3_flag(tp, 5705_PLUS)) { 17405 tp->bufmgr_config.mbuf_read_dma_low_water = 17406 DEFAULT_MB_RDMA_LOW_WATER_5705; 17407 tp->bufmgr_config.mbuf_mac_rx_low_water = 17408 DEFAULT_MB_MACRX_LOW_WATER_5705; 17409 tp->bufmgr_config.mbuf_high_water = 17410 DEFAULT_MB_HIGH_WATER_5705; 17411 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 17412 tp->bufmgr_config.mbuf_mac_rx_low_water = 17413 DEFAULT_MB_MACRX_LOW_WATER_5906; 17414 tp->bufmgr_config.mbuf_high_water = 17415 DEFAULT_MB_HIGH_WATER_5906; 17416 } 17417 17418 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17419 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; 17420 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17421 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; 17422 tp->bufmgr_config.mbuf_high_water_jumbo = 17423 DEFAULT_MB_HIGH_WATER_JUMBO_5780; 17424 } else { 17425 tp->bufmgr_config.mbuf_read_dma_low_water = 17426 DEFAULT_MB_RDMA_LOW_WATER; 17427 tp->bufmgr_config.mbuf_mac_rx_low_water = 17428 DEFAULT_MB_MACRX_LOW_WATER; 17429 tp->bufmgr_config.mbuf_high_water = 17430 DEFAULT_MB_HIGH_WATER; 17431 17432 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17433 DEFAULT_MB_RDMA_LOW_WATER_JUMBO; 17434 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17435 DEFAULT_MB_MACRX_LOW_WATER_JUMBO; 17436 tp->bufmgr_config.mbuf_high_water_jumbo = 17437 DEFAULT_MB_HIGH_WATER_JUMBO; 17438 } 17439 17440 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; 17441 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; 17442 } 17443 17444 static char *tg3_phy_string(struct tg3 *tp) 17445 { 17446 switch (tp->phy_id & TG3_PHY_ID_MASK) { 17447 case TG3_PHY_ID_BCM5400: return "5400"; 17448 case TG3_PHY_ID_BCM5401: return "5401"; 17449 case TG3_PHY_ID_BCM5411: return "5411"; 17450 case TG3_PHY_ID_BCM5701: return "5701"; 17451 case TG3_PHY_ID_BCM5703: return "5703"; 17452 case TG3_PHY_ID_BCM5704: return "5704"; 17453 case TG3_PHY_ID_BCM5705: return "5705"; 17454 case TG3_PHY_ID_BCM5750: return "5750"; 17455 case TG3_PHY_ID_BCM5752: return "5752"; 17456 case TG3_PHY_ID_BCM5714: return "5714"; 17457 case TG3_PHY_ID_BCM5780: return "5780"; 17458 case TG3_PHY_ID_BCM5755: return "5755"; 17459 case TG3_PHY_ID_BCM5787: return "5787"; 17460 case TG3_PHY_ID_BCM5784: return "5784"; 17461 case TG3_PHY_ID_BCM5756: return "5722/5756"; 17462 case TG3_PHY_ID_BCM5906: return "5906"; 17463 case TG3_PHY_ID_BCM5761: return "5761"; 17464 case TG3_PHY_ID_BCM5718C: return "5718C"; 17465 case TG3_PHY_ID_BCM5718S: return "5718S"; 17466 case TG3_PHY_ID_BCM57765: return "57765"; 17467 case TG3_PHY_ID_BCM5719C: return "5719C"; 17468 case TG3_PHY_ID_BCM5720C: return "5720C"; 17469 case TG3_PHY_ID_BCM5762: return "5762C"; 17470 case TG3_PHY_ID_BCM8002: return "8002/serdes"; 17471 case 0: return "serdes"; 17472 default: return "unknown"; 17473 } 17474 } 17475 17476 static char *tg3_bus_string(struct tg3 *tp, char *str) 17477 { 17478 if (tg3_flag(tp, PCI_EXPRESS)) { 17479 strcpy(str, "PCI Express"); 17480 return str; 17481 } else if (tg3_flag(tp, PCIX_MODE)) { 17482 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; 17483 17484 strcpy(str, "PCIX:"); 17485 17486 if ((clock_ctrl == 7) || 17487 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == 17488 GRC_MISC_CFG_BOARD_ID_5704CIOBE)) 17489 strcat(str, "133MHz"); 17490 else if (clock_ctrl == 0) 17491 strcat(str, "33MHz"); 17492 else if (clock_ctrl == 2) 17493 strcat(str, "50MHz"); 17494 else if (clock_ctrl == 4) 17495 strcat(str, "66MHz"); 17496 else if (clock_ctrl == 6) 17497 strcat(str, "100MHz"); 17498 } else { 17499 strcpy(str, "PCI:"); 17500 if (tg3_flag(tp, PCI_HIGH_SPEED)) 17501 strcat(str, "66MHz"); 17502 else 17503 strcat(str, "33MHz"); 17504 } 17505 if (tg3_flag(tp, PCI_32BIT)) 17506 strcat(str, ":32-bit"); 17507 else 17508 strcat(str, ":64-bit"); 17509 return str; 17510 } 17511 17512 static void tg3_init_coal(struct tg3 *tp) 17513 { 17514 struct ethtool_coalesce *ec = &tp->coal; 17515 17516 memset(ec, 0, sizeof(*ec)); 17517 ec->cmd = ETHTOOL_GCOALESCE; 17518 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; 17519 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; 17520 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; 17521 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; 17522 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; 17523 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; 17524 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; 17525 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; 17526 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; 17527 17528 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | 17529 HOSTCC_MODE_CLRTICK_TXBD)) { 17530 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; 17531 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; 17532 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; 17533 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; 17534 } 17535 17536 if (tg3_flag(tp, 5705_PLUS)) { 17537 ec->rx_coalesce_usecs_irq = 0; 17538 ec->tx_coalesce_usecs_irq = 0; 17539 ec->stats_block_coalesce_usecs = 0; 17540 } 17541 } 17542 17543 static int tg3_init_one(struct pci_dev *pdev, 17544 const struct pci_device_id *ent) 17545 { 17546 struct net_device *dev; 17547 struct tg3 *tp; 17548 int i, err; 17549 u32 sndmbx, rcvmbx, intmbx; 17550 char str[40]; 17551 u64 dma_mask, persist_dma_mask; 17552 netdev_features_t features = 0; 17553 u8 addr[ETH_ALEN] __aligned(2); 17554 17555 err = pci_enable_device(pdev); 17556 if (err) { 17557 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 17558 return err; 17559 } 17560 17561 err = pci_request_regions(pdev, DRV_MODULE_NAME); 17562 if (err) { 17563 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 17564 goto err_out_disable_pdev; 17565 } 17566 17567 pci_set_master(pdev); 17568 17569 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); 17570 if (!dev) { 17571 err = -ENOMEM; 17572 goto err_out_free_res; 17573 } 17574 17575 SET_NETDEV_DEV(dev, &pdev->dev); 17576 17577 tp = netdev_priv(dev); 17578 tp->pdev = pdev; 17579 tp->dev = dev; 17580 tp->rx_mode = TG3_DEF_RX_MODE; 17581 tp->tx_mode = TG3_DEF_TX_MODE; 17582 tp->irq_sync = 1; 17583 tp->pcierr_recovery = false; 17584 17585 if (tg3_debug > 0) 17586 tp->msg_enable = tg3_debug; 17587 else 17588 tp->msg_enable = TG3_DEF_MSG_ENABLE; 17589 17590 if (pdev_is_ssb_gige_core(pdev)) { 17591 tg3_flag_set(tp, IS_SSB_CORE); 17592 if (ssb_gige_must_flush_posted_writes(pdev)) 17593 tg3_flag_set(tp, FLUSH_POSTED_WRITES); 17594 if (ssb_gige_one_dma_at_once(pdev)) 17595 tg3_flag_set(tp, ONE_DMA_AT_ONCE); 17596 if (ssb_gige_have_roboswitch(pdev)) { 17597 tg3_flag_set(tp, USE_PHYLIB); 17598 tg3_flag_set(tp, ROBOSWITCH); 17599 } 17600 if (ssb_gige_is_rgmii(pdev)) 17601 tg3_flag_set(tp, RGMII_MODE); 17602 } 17603 17604 /* The word/byte swap controls here control register access byte 17605 * swapping. DMA data byte swapping is controlled in the GRC_MODE 17606 * setting below. 17607 */ 17608 tp->misc_host_ctrl = 17609 MISC_HOST_CTRL_MASK_PCI_INT | 17610 MISC_HOST_CTRL_WORD_SWAP | 17611 MISC_HOST_CTRL_INDIR_ACCESS | 17612 MISC_HOST_CTRL_PCISTATE_RW; 17613 17614 /* The NONFRM (non-frame) byte/word swap controls take effect 17615 * on descriptor entries, anything which isn't packet data. 17616 * 17617 * The StrongARM chips on the board (one for tx, one for rx) 17618 * are running in big-endian mode. 17619 */ 17620 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | 17621 GRC_MODE_WSWAP_NONFRM_DATA); 17622 #ifdef __BIG_ENDIAN 17623 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; 17624 #endif 17625 spin_lock_init(&tp->lock); 17626 spin_lock_init(&tp->indirect_lock); 17627 INIT_WORK(&tp->reset_task, tg3_reset_task); 17628 17629 tp->regs = pci_ioremap_bar(pdev, BAR_0); 17630 if (!tp->regs) { 17631 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 17632 err = -ENOMEM; 17633 goto err_out_free_dev; 17634 } 17635 17636 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 17637 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || 17638 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || 17639 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || 17640 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 17641 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 17642 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 17643 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 17644 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 17645 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 17646 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 17647 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 17648 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 17649 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 17650 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) { 17651 tg3_flag_set(tp, ENABLE_APE); 17652 tp->aperegs = pci_ioremap_bar(pdev, BAR_2); 17653 if (!tp->aperegs) { 17654 dev_err(&pdev->dev, 17655 "Cannot map APE registers, aborting\n"); 17656 err = -ENOMEM; 17657 goto err_out_iounmap; 17658 } 17659 } 17660 17661 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 17662 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 17663 17664 dev->ethtool_ops = &tg3_ethtool_ops; 17665 dev->watchdog_timeo = TG3_TX_TIMEOUT; 17666 dev->netdev_ops = &tg3_netdev_ops; 17667 dev->irq = pdev->irq; 17668 17669 err = tg3_get_invariants(tp, ent); 17670 if (err) { 17671 dev_err(&pdev->dev, 17672 "Problem fetching invariants of chip, aborting\n"); 17673 goto err_out_apeunmap; 17674 } 17675 17676 /* The EPB bridge inside 5714, 5715, and 5780 and any 17677 * device behind the EPB cannot support DMA addresses > 40-bit. 17678 * On 64-bit systems with IOMMU, use 40-bit dma_mask. 17679 * On 64-bit systems without IOMMU, use 64-bit dma_mask and 17680 * do DMA address check in tg3_start_xmit(). 17681 */ 17682 if (tg3_flag(tp, IS_5788)) 17683 persist_dma_mask = dma_mask = DMA_BIT_MASK(32); 17684 else if (tg3_flag(tp, 40BIT_DMA_BUG)) { 17685 persist_dma_mask = dma_mask = DMA_BIT_MASK(40); 17686 #ifdef CONFIG_HIGHMEM 17687 dma_mask = DMA_BIT_MASK(64); 17688 #endif 17689 } else 17690 persist_dma_mask = dma_mask = DMA_BIT_MASK(64); 17691 17692 /* Configure DMA attributes. */ 17693 if (dma_mask > DMA_BIT_MASK(32)) { 17694 err = dma_set_mask(&pdev->dev, dma_mask); 17695 if (!err) { 17696 features |= NETIF_F_HIGHDMA; 17697 err = dma_set_coherent_mask(&pdev->dev, 17698 persist_dma_mask); 17699 if (err < 0) { 17700 dev_err(&pdev->dev, "Unable to obtain 64 bit " 17701 "DMA for consistent allocations\n"); 17702 goto err_out_apeunmap; 17703 } 17704 } 17705 } 17706 if (err || dma_mask == DMA_BIT_MASK(32)) { 17707 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 17708 if (err) { 17709 dev_err(&pdev->dev, 17710 "No usable DMA configuration, aborting\n"); 17711 goto err_out_apeunmap; 17712 } 17713 } 17714 17715 tg3_init_bufmgr_config(tp); 17716 17717 /* 5700 B0 chips do not support checksumming correctly due 17718 * to hardware bugs. 17719 */ 17720 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) { 17721 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 17722 17723 if (tg3_flag(tp, 5755_PLUS)) 17724 features |= NETIF_F_IPV6_CSUM; 17725 } 17726 17727 /* TSO is on by default on chips that support hardware TSO. 17728 * Firmware TSO on older chips gives lower performance, so it 17729 * is off by default, but can be enabled using ethtool. 17730 */ 17731 if ((tg3_flag(tp, HW_TSO_1) || 17732 tg3_flag(tp, HW_TSO_2) || 17733 tg3_flag(tp, HW_TSO_3)) && 17734 (features & NETIF_F_IP_CSUM)) 17735 features |= NETIF_F_TSO; 17736 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { 17737 if (features & NETIF_F_IPV6_CSUM) 17738 features |= NETIF_F_TSO6; 17739 if (tg3_flag(tp, HW_TSO_3) || 17740 tg3_asic_rev(tp) == ASIC_REV_5761 || 17741 (tg3_asic_rev(tp) == ASIC_REV_5784 && 17742 tg3_chip_rev(tp) != CHIPREV_5784_AX) || 17743 tg3_asic_rev(tp) == ASIC_REV_5785 || 17744 tg3_asic_rev(tp) == ASIC_REV_57780) 17745 features |= NETIF_F_TSO_ECN; 17746 } 17747 17748 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | 17749 NETIF_F_HW_VLAN_CTAG_RX; 17750 dev->vlan_features |= features; 17751 17752 /* 17753 * Add loopback capability only for a subset of devices that support 17754 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY 17755 * loopback for the remaining devices. 17756 */ 17757 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 17758 !tg3_flag(tp, CPMU_PRESENT)) 17759 /* Add the loopback capability */ 17760 features |= NETIF_F_LOOPBACK; 17761 17762 dev->hw_features |= features; 17763 dev->priv_flags |= IFF_UNICAST_FLT; 17764 17765 /* MTU range: 60 - 9000 or 1500, depending on hardware */ 17766 dev->min_mtu = TG3_MIN_MTU; 17767 dev->max_mtu = TG3_MAX_MTU(tp); 17768 17769 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 && 17770 !tg3_flag(tp, TSO_CAPABLE) && 17771 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 17772 tg3_flag_set(tp, MAX_RXPEND_64); 17773 tp->rx_pending = 63; 17774 } 17775 17776 err = tg3_get_device_address(tp, addr); 17777 if (err) { 17778 dev_err(&pdev->dev, 17779 "Could not obtain valid ethernet address, aborting\n"); 17780 goto err_out_apeunmap; 17781 } 17782 eth_hw_addr_set(dev, addr); 17783 17784 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; 17785 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; 17786 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; 17787 for (i = 0; i < tp->irq_max; i++) { 17788 struct tg3_napi *tnapi = &tp->napi[i]; 17789 17790 tnapi->tp = tp; 17791 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; 17792 17793 tnapi->int_mbox = intmbx; 17794 if (i <= 4) 17795 intmbx += 0x8; 17796 else 17797 intmbx += 0x4; 17798 17799 tnapi->consmbox = rcvmbx; 17800 tnapi->prodmbox = sndmbx; 17801 17802 if (i) 17803 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); 17804 else 17805 tnapi->coal_now = HOSTCC_MODE_NOW; 17806 17807 if (!tg3_flag(tp, SUPPORT_MSIX)) 17808 break; 17809 17810 /* 17811 * If we support MSIX, we'll be using RSS. If we're using 17812 * RSS, the first vector only handles link interrupts and the 17813 * remaining vectors handle rx and tx interrupts. Reuse the 17814 * mailbox values for the next iteration. The values we setup 17815 * above are still useful for the single vectored mode. 17816 */ 17817 if (!i) 17818 continue; 17819 17820 rcvmbx += 0x8; 17821 17822 if (sndmbx & 0x4) 17823 sndmbx -= 0x4; 17824 else 17825 sndmbx += 0xc; 17826 } 17827 17828 /* 17829 * Reset chip in case UNDI or EFI driver did not shutdown 17830 * DMA self test will enable WDMAC and we'll see (spurious) 17831 * pending DMA on the PCI bus at that point. 17832 */ 17833 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || 17834 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 17835 tg3_full_lock(tp, 0); 17836 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 17837 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 17838 tg3_full_unlock(tp); 17839 } 17840 17841 err = tg3_test_dma(tp); 17842 if (err) { 17843 dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); 17844 goto err_out_apeunmap; 17845 } 17846 17847 tg3_init_coal(tp); 17848 17849 pci_set_drvdata(pdev, dev); 17850 17851 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 17852 tg3_asic_rev(tp) == ASIC_REV_5720 || 17853 tg3_asic_rev(tp) == ASIC_REV_5762) 17854 tg3_flag_set(tp, PTP_CAPABLE); 17855 17856 tg3_timer_init(tp); 17857 17858 tg3_carrier_off(tp); 17859 17860 err = register_netdev(dev); 17861 if (err) { 17862 dev_err(&pdev->dev, "Cannot register net device, aborting\n"); 17863 goto err_out_apeunmap; 17864 } 17865 17866 if (tg3_flag(tp, PTP_CAPABLE)) { 17867 tg3_ptp_init(tp); 17868 tp->ptp_clock = ptp_clock_register(&tp->ptp_info, 17869 &tp->pdev->dev); 17870 if (IS_ERR(tp->ptp_clock)) 17871 tp->ptp_clock = NULL; 17872 } 17873 17874 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", 17875 tp->board_part_number, 17876 tg3_chip_rev_id(tp), 17877 tg3_bus_string(tp, str), 17878 dev->dev_addr); 17879 17880 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) { 17881 char *ethtype; 17882 17883 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 17884 ethtype = "10/100Base-TX"; 17885 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 17886 ethtype = "1000Base-SX"; 17887 else 17888 ethtype = "10/100/1000Base-T"; 17889 17890 netdev_info(dev, "attached PHY is %s (%s Ethernet) " 17891 "(WireSpeed[%d], EEE[%d])\n", 17892 tg3_phy_string(tp), ethtype, 17893 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, 17894 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); 17895 } 17896 17897 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", 17898 (dev->features & NETIF_F_RXCSUM) != 0, 17899 tg3_flag(tp, USE_LINKCHG_REG) != 0, 17900 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, 17901 tg3_flag(tp, ENABLE_ASF) != 0, 17902 tg3_flag(tp, TSO_CAPABLE) != 0); 17903 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", 17904 tp->dma_rwctrl, 17905 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : 17906 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); 17907 17908 pci_save_state(pdev); 17909 17910 return 0; 17911 17912 err_out_apeunmap: 17913 if (tp->aperegs) { 17914 iounmap(tp->aperegs); 17915 tp->aperegs = NULL; 17916 } 17917 17918 err_out_iounmap: 17919 if (tp->regs) { 17920 iounmap(tp->regs); 17921 tp->regs = NULL; 17922 } 17923 17924 err_out_free_dev: 17925 free_netdev(dev); 17926 17927 err_out_free_res: 17928 pci_release_regions(pdev); 17929 17930 err_out_disable_pdev: 17931 if (pci_is_enabled(pdev)) 17932 pci_disable_device(pdev); 17933 return err; 17934 } 17935 17936 static void tg3_remove_one(struct pci_dev *pdev) 17937 { 17938 struct net_device *dev = pci_get_drvdata(pdev); 17939 17940 if (dev) { 17941 struct tg3 *tp = netdev_priv(dev); 17942 17943 tg3_ptp_fini(tp); 17944 17945 release_firmware(tp->fw); 17946 17947 tg3_reset_task_cancel(tp); 17948 17949 if (tg3_flag(tp, USE_PHYLIB)) { 17950 tg3_phy_fini(tp); 17951 tg3_mdio_fini(tp); 17952 } 17953 17954 unregister_netdev(dev); 17955 if (tp->aperegs) { 17956 iounmap(tp->aperegs); 17957 tp->aperegs = NULL; 17958 } 17959 if (tp->regs) { 17960 iounmap(tp->regs); 17961 tp->regs = NULL; 17962 } 17963 free_netdev(dev); 17964 pci_release_regions(pdev); 17965 pci_disable_device(pdev); 17966 } 17967 } 17968 17969 #ifdef CONFIG_PM_SLEEP 17970 static int tg3_suspend(struct device *device) 17971 { 17972 struct net_device *dev = dev_get_drvdata(device); 17973 struct tg3 *tp = netdev_priv(dev); 17974 int err = 0; 17975 17976 rtnl_lock(); 17977 17978 if (!netif_running(dev)) 17979 goto unlock; 17980 17981 tg3_reset_task_cancel(tp); 17982 tg3_phy_stop(tp); 17983 tg3_netif_stop(tp); 17984 17985 tg3_timer_stop(tp); 17986 17987 tg3_full_lock(tp, 1); 17988 tg3_disable_ints(tp); 17989 tg3_full_unlock(tp); 17990 17991 netif_device_detach(dev); 17992 17993 tg3_full_lock(tp, 0); 17994 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 17995 tg3_flag_clear(tp, INIT_COMPLETE); 17996 tg3_full_unlock(tp); 17997 17998 err = tg3_power_down_prepare(tp); 17999 if (err) { 18000 int err2; 18001 18002 tg3_full_lock(tp, 0); 18003 18004 tg3_flag_set(tp, INIT_COMPLETE); 18005 err2 = tg3_restart_hw(tp, true); 18006 if (err2) 18007 goto out; 18008 18009 tg3_timer_start(tp); 18010 18011 netif_device_attach(dev); 18012 tg3_netif_start(tp); 18013 18014 out: 18015 tg3_full_unlock(tp); 18016 18017 if (!err2) 18018 tg3_phy_start(tp); 18019 } 18020 18021 unlock: 18022 rtnl_unlock(); 18023 return err; 18024 } 18025 18026 static int tg3_resume(struct device *device) 18027 { 18028 struct net_device *dev = dev_get_drvdata(device); 18029 struct tg3 *tp = netdev_priv(dev); 18030 int err = 0; 18031 18032 rtnl_lock(); 18033 18034 if (!netif_running(dev)) 18035 goto unlock; 18036 18037 netif_device_attach(dev); 18038 18039 tg3_full_lock(tp, 0); 18040 18041 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18042 18043 tg3_flag_set(tp, INIT_COMPLETE); 18044 err = tg3_restart_hw(tp, 18045 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); 18046 if (err) 18047 goto out; 18048 18049 tg3_timer_start(tp); 18050 18051 tg3_netif_start(tp); 18052 18053 out: 18054 tg3_full_unlock(tp); 18055 18056 if (!err) 18057 tg3_phy_start(tp); 18058 18059 unlock: 18060 rtnl_unlock(); 18061 return err; 18062 } 18063 #endif /* CONFIG_PM_SLEEP */ 18064 18065 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); 18066 18067 static void tg3_shutdown(struct pci_dev *pdev) 18068 { 18069 struct net_device *dev = pci_get_drvdata(pdev); 18070 struct tg3 *tp = netdev_priv(dev); 18071 18072 tg3_reset_task_cancel(tp); 18073 18074 rtnl_lock(); 18075 18076 netif_device_detach(dev); 18077 18078 if (netif_running(dev)) 18079 dev_close(dev); 18080 18081 tg3_power_down(tp); 18082 18083 rtnl_unlock(); 18084 18085 pci_disable_device(pdev); 18086 } 18087 18088 /** 18089 * tg3_io_error_detected - called when PCI error is detected 18090 * @pdev: Pointer to PCI device 18091 * @state: The current pci connection state 18092 * 18093 * This function is called after a PCI bus error affecting 18094 * this device has been detected. 18095 */ 18096 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, 18097 pci_channel_state_t state) 18098 { 18099 struct net_device *netdev = pci_get_drvdata(pdev); 18100 struct tg3 *tp = netdev_priv(netdev); 18101 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; 18102 18103 netdev_info(netdev, "PCI I/O error detected\n"); 18104 18105 /* Want to make sure that the reset task doesn't run */ 18106 tg3_reset_task_cancel(tp); 18107 18108 rtnl_lock(); 18109 18110 /* Could be second call or maybe we don't have netdev yet */ 18111 if (!netdev || tp->pcierr_recovery || !netif_running(netdev)) 18112 goto done; 18113 18114 /* We needn't recover from permanent error */ 18115 if (state == pci_channel_io_frozen) 18116 tp->pcierr_recovery = true; 18117 18118 tg3_phy_stop(tp); 18119 18120 tg3_netif_stop(tp); 18121 18122 tg3_timer_stop(tp); 18123 18124 netif_device_detach(netdev); 18125 18126 /* Clean up software state, even if MMIO is blocked */ 18127 tg3_full_lock(tp, 0); 18128 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 18129 tg3_full_unlock(tp); 18130 18131 done: 18132 if (state == pci_channel_io_perm_failure) { 18133 if (netdev) { 18134 tg3_napi_enable(tp); 18135 dev_close(netdev); 18136 } 18137 err = PCI_ERS_RESULT_DISCONNECT; 18138 } else { 18139 pci_disable_device(pdev); 18140 } 18141 18142 rtnl_unlock(); 18143 18144 return err; 18145 } 18146 18147 /** 18148 * tg3_io_slot_reset - called after the pci bus has been reset. 18149 * @pdev: Pointer to PCI device 18150 * 18151 * Restart the card from scratch, as if from a cold-boot. 18152 * At this point, the card has exprienced a hard reset, 18153 * followed by fixups by BIOS, and has its config space 18154 * set up identically to what it was at cold boot. 18155 */ 18156 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) 18157 { 18158 struct net_device *netdev = pci_get_drvdata(pdev); 18159 struct tg3 *tp = netdev_priv(netdev); 18160 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 18161 int err; 18162 18163 rtnl_lock(); 18164 18165 if (pci_enable_device(pdev)) { 18166 dev_err(&pdev->dev, 18167 "Cannot re-enable PCI device after reset.\n"); 18168 goto done; 18169 } 18170 18171 pci_set_master(pdev); 18172 pci_restore_state(pdev); 18173 pci_save_state(pdev); 18174 18175 if (!netdev || !netif_running(netdev)) { 18176 rc = PCI_ERS_RESULT_RECOVERED; 18177 goto done; 18178 } 18179 18180 err = tg3_power_up(tp); 18181 if (err) 18182 goto done; 18183 18184 rc = PCI_ERS_RESULT_RECOVERED; 18185 18186 done: 18187 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { 18188 tg3_napi_enable(tp); 18189 dev_close(netdev); 18190 } 18191 rtnl_unlock(); 18192 18193 return rc; 18194 } 18195 18196 /** 18197 * tg3_io_resume - called when traffic can start flowing again. 18198 * @pdev: Pointer to PCI device 18199 * 18200 * This callback is called when the error recovery driver tells 18201 * us that its OK to resume normal operation. 18202 */ 18203 static void tg3_io_resume(struct pci_dev *pdev) 18204 { 18205 struct net_device *netdev = pci_get_drvdata(pdev); 18206 struct tg3 *tp = netdev_priv(netdev); 18207 int err; 18208 18209 rtnl_lock(); 18210 18211 if (!netdev || !netif_running(netdev)) 18212 goto done; 18213 18214 tg3_full_lock(tp, 0); 18215 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18216 tg3_flag_set(tp, INIT_COMPLETE); 18217 err = tg3_restart_hw(tp, true); 18218 if (err) { 18219 tg3_full_unlock(tp); 18220 netdev_err(netdev, "Cannot restart hardware after reset.\n"); 18221 goto done; 18222 } 18223 18224 netif_device_attach(netdev); 18225 18226 tg3_timer_start(tp); 18227 18228 tg3_netif_start(tp); 18229 18230 tg3_full_unlock(tp); 18231 18232 tg3_phy_start(tp); 18233 18234 done: 18235 tp->pcierr_recovery = false; 18236 rtnl_unlock(); 18237 } 18238 18239 static const struct pci_error_handlers tg3_err_handler = { 18240 .error_detected = tg3_io_error_detected, 18241 .slot_reset = tg3_io_slot_reset, 18242 .resume = tg3_io_resume 18243 }; 18244 18245 static struct pci_driver tg3_driver = { 18246 .name = DRV_MODULE_NAME, 18247 .id_table = tg3_pci_tbl, 18248 .probe = tg3_init_one, 18249 .remove = tg3_remove_one, 18250 .err_handler = &tg3_err_handler, 18251 .driver.pm = &tg3_pm_ops, 18252 .shutdown = tg3_shutdown, 18253 }; 18254 18255 module_pci_driver(tg3_driver); 18256