1 /* 2 * tg3.c: Broadcom Tigon3 ethernet driver. 3 * 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 6 * Copyright (C) 2004 Sun Microsystems Inc. 7 * Copyright (C) 2005-2016 Broadcom Corporation. 8 * Copyright (C) 2016-2017 Broadcom Limited. 9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" 10 * refers to Broadcom Inc. and/or its subsidiaries. 11 * 12 * Firmware is: 13 * Derived from proprietary unpublished source code, 14 * Copyright (C) 2000-2016 Broadcom Corporation. 15 * Copyright (C) 2016-2017 Broadcom Ltd. 16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" 17 * refers to Broadcom Inc. and/or its subsidiaries. 18 * 19 * Permission is hereby granted for the distribution of this firmware 20 * data in hexadecimal or equivalent format, provided this copyright 21 * notice is accompanying it. 22 */ 23 24 25 #include <linux/module.h> 26 #include <linux/moduleparam.h> 27 #include <linux/stringify.h> 28 #include <linux/kernel.h> 29 #include <linux/sched/signal.h> 30 #include <linux/types.h> 31 #include <linux/compiler.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/in.h> 35 #include <linux/interrupt.h> 36 #include <linux/ioport.h> 37 #include <linux/pci.h> 38 #include <linux/netdevice.h> 39 #include <linux/etherdevice.h> 40 #include <linux/skbuff.h> 41 #include <linux/ethtool.h> 42 #include <linux/mdio.h> 43 #include <linux/mii.h> 44 #include <linux/phy.h> 45 #include <linux/brcmphy.h> 46 #include <linux/if.h> 47 #include <linux/if_vlan.h> 48 #include <linux/ip.h> 49 #include <linux/tcp.h> 50 #include <linux/workqueue.h> 51 #include <linux/prefetch.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/firmware.h> 54 #include <linux/ssb/ssb_driver_gige.h> 55 #include <linux/hwmon.h> 56 #include <linux/hwmon-sysfs.h> 57 #include <linux/crc32poly.h> 58 59 #include <net/checksum.h> 60 #include <net/gso.h> 61 #include <net/ip.h> 62 63 #include <linux/io.h> 64 #include <asm/byteorder.h> 65 #include <linux/uaccess.h> 66 67 #include <uapi/linux/net_tstamp.h> 68 #include <linux/ptp_clock_kernel.h> 69 70 #define BAR_0 0 71 #define BAR_2 2 72 73 #include "tg3.h" 74 75 /* Functions & macros to verify TG3_FLAGS types */ 76 77 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) 78 { 79 return test_bit(flag, bits); 80 } 81 82 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) 83 { 84 set_bit(flag, bits); 85 } 86 87 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) 88 { 89 clear_bit(flag, bits); 90 } 91 92 #define tg3_flag(tp, flag) \ 93 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) 94 #define tg3_flag_set(tp, flag) \ 95 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) 96 #define tg3_flag_clear(tp, flag) \ 97 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) 98 99 #define DRV_MODULE_NAME "tg3" 100 /* DO NOT UPDATE TG3_*_NUM defines */ 101 #define TG3_MAJ_NUM 3 102 #define TG3_MIN_NUM 137 103 104 #define RESET_KIND_SHUTDOWN 0 105 #define RESET_KIND_INIT 1 106 #define RESET_KIND_SUSPEND 2 107 108 #define TG3_DEF_RX_MODE 0 109 #define TG3_DEF_TX_MODE 0 110 #define TG3_DEF_MSG_ENABLE \ 111 (NETIF_MSG_DRV | \ 112 NETIF_MSG_PROBE | \ 113 NETIF_MSG_LINK | \ 114 NETIF_MSG_TIMER | \ 115 NETIF_MSG_IFDOWN | \ 116 NETIF_MSG_IFUP | \ 117 NETIF_MSG_RX_ERR | \ 118 NETIF_MSG_TX_ERR) 119 120 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100 121 122 /* length of time before we decide the hardware is borked, 123 * and dev->tx_timeout() should be called to fix the problem 124 */ 125 126 #define TG3_TX_TIMEOUT (5 * HZ) 127 128 /* hardware minimum and maximum for a single frame's data payload */ 129 #define TG3_MIN_MTU ETH_ZLEN 130 #define TG3_MAX_MTU(tp) \ 131 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) 132 133 /* These numbers seem to be hard coded in the NIC firmware somehow. 134 * You can't change the ring sizes, but you can change where you place 135 * them in the NIC onboard memory. 136 */ 137 #define TG3_RX_STD_RING_SIZE(tp) \ 138 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 139 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) 140 #define TG3_DEF_RX_RING_PENDING 200 141 #define TG3_RX_JMB_RING_SIZE(tp) \ 142 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 143 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) 144 #define TG3_DEF_RX_JUMBO_RING_PENDING 100 145 146 /* Do not place this n-ring entries value into the tp struct itself, 147 * we really want to expose these constants to GCC so that modulo et 148 * al. operations are done with shifts and masks instead of with 149 * hw multiply/modulo instructions. Another solution would be to 150 * replace things like '% foo' with '& (foo - 1)'. 151 */ 152 153 #define TG3_TX_RING_SIZE 512 154 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) 155 156 #define TG3_RX_STD_RING_BYTES(tp) \ 157 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) 158 #define TG3_RX_JMB_RING_BYTES(tp) \ 159 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) 160 #define TG3_RX_RCB_RING_BYTES(tp) \ 161 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) 162 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 163 TG3_TX_RING_SIZE) 164 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 165 166 #define TG3_DMA_BYTE_ENAB 64 167 168 #define TG3_RX_STD_DMA_SZ 1536 169 #define TG3_RX_JMB_DMA_SZ 9046 170 171 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) 172 173 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) 174 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 175 176 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \ 177 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) 178 179 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ 180 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) 181 182 /* Due to a hardware bug, the 5701 can only DMA to memory addresses 183 * that are at least dword aligned when used in PCIX mode. The driver 184 * works around this bug by double copying the packet. This workaround 185 * is built into the normal double copy length check for efficiency. 186 * 187 * However, the double copy is only necessary on those architectures 188 * where unaligned memory accesses are inefficient. For those architectures 189 * where unaligned memory accesses incur little penalty, we can reintegrate 190 * the 5701 in the normal rx path. Doing so saves a device structure 191 * dereference by hardcoding the double copy threshold in place. 192 */ 193 #define TG3_RX_COPY_THRESHOLD 256 194 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 195 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD 196 #else 197 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) 198 #endif 199 200 #if (NET_IP_ALIGN != 0) 201 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset) 202 #else 203 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD) 204 #endif 205 206 /* minimum number of free TX descriptors required to wake up TX process */ 207 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 208 #define TG3_TX_BD_DMA_MAX_2K 2048 209 #define TG3_TX_BD_DMA_MAX_4K 4096 210 211 #define TG3_RAW_IP_ALIGN 2 212 213 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3) 214 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1) 215 216 #define TG3_FW_UPDATE_TIMEOUT_SEC 5 217 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2) 218 219 #define FIRMWARE_TG3 "tigon/tg3.bin" 220 #define FIRMWARE_TG357766 "tigon/tg357766.bin" 221 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" 222 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" 223 224 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); 225 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); 226 MODULE_LICENSE("GPL"); 227 MODULE_FIRMWARE(FIRMWARE_TG3); 228 MODULE_FIRMWARE(FIRMWARE_TG357766); 229 MODULE_FIRMWARE(FIRMWARE_TG3TSO); 230 MODULE_FIRMWARE(FIRMWARE_TG3TSO5); 231 232 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ 233 module_param(tg3_debug, int, 0); 234 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 235 236 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001 237 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002 238 239 static const struct pci_device_id tg3_pci_tbl[] = { 240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, 241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, 242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, 243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, 244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, 245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, 246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, 247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, 248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, 249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, 250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, 251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, 252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, 253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, 254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, 255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, 256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, 257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, 258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901), 259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 260 TG3_DRV_DATA_FLAG_5705_10_100}, 261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2), 262 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 263 TG3_DRV_DATA_FLAG_5705_10_100}, 264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, 265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F), 266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 267 TG3_DRV_DATA_FLAG_5705_10_100}, 268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, 269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, 270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, 271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, 272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, 273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F), 274 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, 276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, 277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, 278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, 279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F), 280 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, 282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, 283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, 284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, 285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, 286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, 287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, 288 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M, 289 PCI_VENDOR_ID_LENOVO, 290 TG3PCI_SUBDEVICE_ID_LENOVO_5787M), 291 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, 293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F), 294 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, 296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, 297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, 298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, 299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, 300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, 301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, 302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, 303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, 304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, 305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, 306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, 307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, 308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, 310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, 311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, 312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, 313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 314 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A), 315 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 316 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 317 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B), 318 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, 320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, 321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790), 322 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, 325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)}, 326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, 327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, 328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, 329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, 330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, 331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791), 332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795), 334 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, 336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, 337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)}, 338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)}, 339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)}, 340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)}, 341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)}, 342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)}, 343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)}, 344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)}, 345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)}, 346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)}, 347 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, 351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, 352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, 353 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, 354 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ 355 {} 356 }; 357 358 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); 359 360 static const struct { 361 const char string[ETH_GSTRING_LEN]; 362 } ethtool_stats_keys[] = { 363 { "rx_octets" }, 364 { "rx_fragments" }, 365 { "rx_ucast_packets" }, 366 { "rx_mcast_packets" }, 367 { "rx_bcast_packets" }, 368 { "rx_fcs_errors" }, 369 { "rx_align_errors" }, 370 { "rx_xon_pause_rcvd" }, 371 { "rx_xoff_pause_rcvd" }, 372 { "rx_mac_ctrl_rcvd" }, 373 { "rx_xoff_entered" }, 374 { "rx_frame_too_long_errors" }, 375 { "rx_jabbers" }, 376 { "rx_undersize_packets" }, 377 { "rx_in_length_errors" }, 378 { "rx_out_length_errors" }, 379 { "rx_64_or_less_octet_packets" }, 380 { "rx_65_to_127_octet_packets" }, 381 { "rx_128_to_255_octet_packets" }, 382 { "rx_256_to_511_octet_packets" }, 383 { "rx_512_to_1023_octet_packets" }, 384 { "rx_1024_to_1522_octet_packets" }, 385 { "rx_1523_to_2047_octet_packets" }, 386 { "rx_2048_to_4095_octet_packets" }, 387 { "rx_4096_to_8191_octet_packets" }, 388 { "rx_8192_to_9022_octet_packets" }, 389 390 { "tx_octets" }, 391 { "tx_collisions" }, 392 393 { "tx_xon_sent" }, 394 { "tx_xoff_sent" }, 395 { "tx_flow_control" }, 396 { "tx_mac_errors" }, 397 { "tx_single_collisions" }, 398 { "tx_mult_collisions" }, 399 { "tx_deferred" }, 400 { "tx_excessive_collisions" }, 401 { "tx_late_collisions" }, 402 { "tx_collide_2times" }, 403 { "tx_collide_3times" }, 404 { "tx_collide_4times" }, 405 { "tx_collide_5times" }, 406 { "tx_collide_6times" }, 407 { "tx_collide_7times" }, 408 { "tx_collide_8times" }, 409 { "tx_collide_9times" }, 410 { "tx_collide_10times" }, 411 { "tx_collide_11times" }, 412 { "tx_collide_12times" }, 413 { "tx_collide_13times" }, 414 { "tx_collide_14times" }, 415 { "tx_collide_15times" }, 416 { "tx_ucast_packets" }, 417 { "tx_mcast_packets" }, 418 { "tx_bcast_packets" }, 419 { "tx_carrier_sense_errors" }, 420 { "tx_discards" }, 421 { "tx_errors" }, 422 423 { "dma_writeq_full" }, 424 { "dma_write_prioq_full" }, 425 { "rxbds_empty" }, 426 { "rx_discards" }, 427 { "rx_errors" }, 428 { "rx_threshold_hit" }, 429 430 { "dma_readq_full" }, 431 { "dma_read_prioq_full" }, 432 { "tx_comp_queue_full" }, 433 434 { "ring_set_send_prod_index" }, 435 { "ring_status_update" }, 436 { "nic_irqs" }, 437 { "nic_avoided_irqs" }, 438 { "nic_tx_threshold_hit" }, 439 440 { "mbuf_lwm_thresh_hit" }, 441 }; 442 443 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) 444 #define TG3_NVRAM_TEST 0 445 #define TG3_LINK_TEST 1 446 #define TG3_REGISTER_TEST 2 447 #define TG3_MEMORY_TEST 3 448 #define TG3_MAC_LOOPB_TEST 4 449 #define TG3_PHY_LOOPB_TEST 5 450 #define TG3_EXT_LOOPB_TEST 6 451 #define TG3_INTERRUPT_TEST 7 452 453 454 static const struct { 455 const char string[ETH_GSTRING_LEN]; 456 } ethtool_test_keys[] = { 457 [TG3_NVRAM_TEST] = { "nvram test (online) " }, 458 [TG3_LINK_TEST] = { "link test (online) " }, 459 [TG3_REGISTER_TEST] = { "register test (offline)" }, 460 [TG3_MEMORY_TEST] = { "memory test (offline)" }, 461 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" }, 462 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" }, 463 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" }, 464 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" }, 465 }; 466 467 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) 468 469 470 static void tg3_write32(struct tg3 *tp, u32 off, u32 val) 471 { 472 writel(val, tp->regs + off); 473 } 474 475 static u32 tg3_read32(struct tg3 *tp, u32 off) 476 { 477 return readl(tp->regs + off); 478 } 479 480 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) 481 { 482 writel(val, tp->aperegs + off); 483 } 484 485 static u32 tg3_ape_read32(struct tg3 *tp, u32 off) 486 { 487 return readl(tp->aperegs + off); 488 } 489 490 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 491 { 492 unsigned long flags; 493 494 spin_lock_irqsave(&tp->indirect_lock, flags); 495 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 496 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 497 spin_unlock_irqrestore(&tp->indirect_lock, flags); 498 } 499 500 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) 501 { 502 writel(val, tp->regs + off); 503 readl(tp->regs + off); 504 } 505 506 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) 507 { 508 unsigned long flags; 509 u32 val; 510 511 spin_lock_irqsave(&tp->indirect_lock, flags); 512 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 513 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 514 spin_unlock_irqrestore(&tp->indirect_lock, flags); 515 return val; 516 } 517 518 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) 519 { 520 unsigned long flags; 521 522 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { 523 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + 524 TG3_64BIT_REG_LOW, val); 525 return; 526 } 527 if (off == TG3_RX_STD_PROD_IDX_REG) { 528 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + 529 TG3_64BIT_REG_LOW, val); 530 return; 531 } 532 533 spin_lock_irqsave(&tp->indirect_lock, flags); 534 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 535 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 536 spin_unlock_irqrestore(&tp->indirect_lock, flags); 537 538 /* In indirect mode when disabling interrupts, we also need 539 * to clear the interrupt bit in the GRC local ctrl register. 540 */ 541 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && 542 (val == 0x1)) { 543 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, 544 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); 545 } 546 } 547 548 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) 549 { 550 unsigned long flags; 551 u32 val; 552 553 spin_lock_irqsave(&tp->indirect_lock, flags); 554 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 555 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 556 spin_unlock_irqrestore(&tp->indirect_lock, flags); 557 return val; 558 } 559 560 /* usec_wait specifies the wait time in usec when writing to certain registers 561 * where it is unsafe to read back the register without some delay. 562 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. 563 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. 564 */ 565 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) 566 { 567 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) 568 /* Non-posted methods */ 569 tp->write32(tp, off, val); 570 else { 571 /* Posted method */ 572 tg3_write32(tp, off, val); 573 if (usec_wait) 574 udelay(usec_wait); 575 tp->read32(tp, off); 576 } 577 /* Wait again after the read for the posted method to guarantee that 578 * the wait time is met. 579 */ 580 if (usec_wait) 581 udelay(usec_wait); 582 } 583 584 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) 585 { 586 tp->write32_mbox(tp, off, val); 587 if (tg3_flag(tp, FLUSH_POSTED_WRITES) || 588 (!tg3_flag(tp, MBOX_WRITE_REORDER) && 589 !tg3_flag(tp, ICH_WORKAROUND))) 590 tp->read32_mbox(tp, off); 591 } 592 593 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) 594 { 595 void __iomem *mbox = tp->regs + off; 596 writel(val, mbox); 597 if (tg3_flag(tp, TXD_MBOX_HWBUG)) 598 writel(val, mbox); 599 if (tg3_flag(tp, MBOX_WRITE_REORDER) || 600 tg3_flag(tp, FLUSH_POSTED_WRITES)) 601 readl(mbox); 602 } 603 604 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) 605 { 606 return readl(tp->regs + off + GRCMBOX_BASE); 607 } 608 609 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) 610 { 611 writel(val, tp->regs + off + GRCMBOX_BASE); 612 } 613 614 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) 615 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) 616 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) 617 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) 618 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg) 619 620 #define tw32(reg, val) tp->write32(tp, reg, val) 621 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) 622 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) 623 #define tr32(reg) tp->read32(tp, reg) 624 625 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 626 { 627 unsigned long flags; 628 629 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 630 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) 631 return; 632 633 spin_lock_irqsave(&tp->indirect_lock, flags); 634 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 635 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 637 638 /* Always leave this as zero. */ 639 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 640 } else { 641 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 642 tw32_f(TG3PCI_MEM_WIN_DATA, val); 643 644 /* Always leave this as zero. */ 645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 646 } 647 spin_unlock_irqrestore(&tp->indirect_lock, flags); 648 } 649 650 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 651 { 652 unsigned long flags; 653 654 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 655 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { 656 *val = 0; 657 return; 658 } 659 660 spin_lock_irqsave(&tp->indirect_lock, flags); 661 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 662 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 663 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 664 665 /* Always leave this as zero. */ 666 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 667 } else { 668 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 669 *val = tr32(TG3PCI_MEM_WIN_DATA); 670 671 /* Always leave this as zero. */ 672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 673 } 674 spin_unlock_irqrestore(&tp->indirect_lock, flags); 675 } 676 677 static void tg3_ape_lock_init(struct tg3 *tp) 678 { 679 int i; 680 u32 regbase, bit; 681 682 if (tg3_asic_rev(tp) == ASIC_REV_5761) 683 regbase = TG3_APE_LOCK_GRANT; 684 else 685 regbase = TG3_APE_PER_LOCK_GRANT; 686 687 /* Make sure the driver hasn't any stale locks. */ 688 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) { 689 switch (i) { 690 case TG3_APE_LOCK_PHY0: 691 case TG3_APE_LOCK_PHY1: 692 case TG3_APE_LOCK_PHY2: 693 case TG3_APE_LOCK_PHY3: 694 bit = APE_LOCK_GRANT_DRIVER; 695 break; 696 default: 697 if (!tp->pci_fn) 698 bit = APE_LOCK_GRANT_DRIVER; 699 else 700 bit = 1 << tp->pci_fn; 701 } 702 tg3_ape_write32(tp, regbase + 4 * i, bit); 703 } 704 705 } 706 707 static int tg3_ape_lock(struct tg3 *tp, int locknum) 708 { 709 int i, off; 710 int ret = 0; 711 u32 status, req, gnt, bit; 712 713 if (!tg3_flag(tp, ENABLE_APE)) 714 return 0; 715 716 switch (locknum) { 717 case TG3_APE_LOCK_GPIO: 718 if (tg3_asic_rev(tp) == ASIC_REV_5761) 719 return 0; 720 fallthrough; 721 case TG3_APE_LOCK_GRC: 722 case TG3_APE_LOCK_MEM: 723 if (!tp->pci_fn) 724 bit = APE_LOCK_REQ_DRIVER; 725 else 726 bit = 1 << tp->pci_fn; 727 break; 728 case TG3_APE_LOCK_PHY0: 729 case TG3_APE_LOCK_PHY1: 730 case TG3_APE_LOCK_PHY2: 731 case TG3_APE_LOCK_PHY3: 732 bit = APE_LOCK_REQ_DRIVER; 733 break; 734 default: 735 return -EINVAL; 736 } 737 738 if (tg3_asic_rev(tp) == ASIC_REV_5761) { 739 req = TG3_APE_LOCK_REQ; 740 gnt = TG3_APE_LOCK_GRANT; 741 } else { 742 req = TG3_APE_PER_LOCK_REQ; 743 gnt = TG3_APE_PER_LOCK_GRANT; 744 } 745 746 off = 4 * locknum; 747 748 tg3_ape_write32(tp, req + off, bit); 749 750 /* Wait for up to 1 millisecond to acquire lock. */ 751 for (i = 0; i < 100; i++) { 752 status = tg3_ape_read32(tp, gnt + off); 753 if (status == bit) 754 break; 755 if (pci_channel_offline(tp->pdev)) 756 break; 757 758 udelay(10); 759 } 760 761 if (status != bit) { 762 /* Revoke the lock request. */ 763 tg3_ape_write32(tp, gnt + off, bit); 764 ret = -EBUSY; 765 } 766 767 return ret; 768 } 769 770 static void tg3_ape_unlock(struct tg3 *tp, int locknum) 771 { 772 u32 gnt, bit; 773 774 if (!tg3_flag(tp, ENABLE_APE)) 775 return; 776 777 switch (locknum) { 778 case TG3_APE_LOCK_GPIO: 779 if (tg3_asic_rev(tp) == ASIC_REV_5761) 780 return; 781 fallthrough; 782 case TG3_APE_LOCK_GRC: 783 case TG3_APE_LOCK_MEM: 784 if (!tp->pci_fn) 785 bit = APE_LOCK_GRANT_DRIVER; 786 else 787 bit = 1 << tp->pci_fn; 788 break; 789 case TG3_APE_LOCK_PHY0: 790 case TG3_APE_LOCK_PHY1: 791 case TG3_APE_LOCK_PHY2: 792 case TG3_APE_LOCK_PHY3: 793 bit = APE_LOCK_GRANT_DRIVER; 794 break; 795 default: 796 return; 797 } 798 799 if (tg3_asic_rev(tp) == ASIC_REV_5761) 800 gnt = TG3_APE_LOCK_GRANT; 801 else 802 gnt = TG3_APE_PER_LOCK_GRANT; 803 804 tg3_ape_write32(tp, gnt + 4 * locknum, bit); 805 } 806 807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us) 808 { 809 u32 apedata; 810 811 while (timeout_us) { 812 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) 813 return -EBUSY; 814 815 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 816 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 817 break; 818 819 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 820 821 udelay(10); 822 timeout_us -= (timeout_us > 10) ? 10 : timeout_us; 823 } 824 825 return timeout_us ? 0 : -EBUSY; 826 } 827 828 #ifdef CONFIG_TIGON3_HWMON 829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us) 830 { 831 u32 i, apedata; 832 833 for (i = 0; i < timeout_us / 10; i++) { 834 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 835 836 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 837 break; 838 839 udelay(10); 840 } 841 842 return i == timeout_us / 10; 843 } 844 845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, 846 u32 len) 847 { 848 int err; 849 u32 i, bufoff, msgoff, maxlen, apedata; 850 851 if (!tg3_flag(tp, APE_HAS_NCSI)) 852 return 0; 853 854 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 855 if (apedata != APE_SEG_SIG_MAGIC) 856 return -ENODEV; 857 858 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 859 if (!(apedata & APE_FW_STATUS_READY)) 860 return -EAGAIN; 861 862 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) + 863 TG3_APE_SHMEM_BASE; 864 msgoff = bufoff + 2 * sizeof(u32); 865 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN); 866 867 while (len) { 868 u32 length; 869 870 /* Cap xfer sizes to scratchpad limits. */ 871 length = (len > maxlen) ? maxlen : len; 872 len -= length; 873 874 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 875 if (!(apedata & APE_FW_STATUS_READY)) 876 return -EAGAIN; 877 878 /* Wait for up to 1 msec for APE to service previous event. */ 879 err = tg3_ape_event_lock(tp, 1000); 880 if (err) 881 return err; 882 883 apedata = APE_EVENT_STATUS_DRIVER_EVNT | 884 APE_EVENT_STATUS_SCRTCHPD_READ | 885 APE_EVENT_STATUS_EVENT_PENDING; 886 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata); 887 888 tg3_ape_write32(tp, bufoff, base_off); 889 tg3_ape_write32(tp, bufoff + sizeof(u32), length); 890 891 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 892 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 893 894 base_off += length; 895 896 if (tg3_ape_wait_for_event(tp, 30000)) 897 return -EAGAIN; 898 899 for (i = 0; length; i += 4, length -= 4) { 900 u32 val = tg3_ape_read32(tp, msgoff + i); 901 memcpy(data, &val, sizeof(u32)); 902 data++; 903 } 904 } 905 906 return 0; 907 } 908 #endif 909 910 static int tg3_ape_send_event(struct tg3 *tp, u32 event) 911 { 912 int err; 913 u32 apedata; 914 915 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 916 if (apedata != APE_SEG_SIG_MAGIC) 917 return -EAGAIN; 918 919 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 920 if (!(apedata & APE_FW_STATUS_READY)) 921 return -EAGAIN; 922 923 /* Wait for up to 20 millisecond for APE to service previous event. */ 924 err = tg3_ape_event_lock(tp, 20000); 925 if (err) 926 return err; 927 928 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, 929 event | APE_EVENT_STATUS_EVENT_PENDING); 930 931 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 932 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 933 934 return 0; 935 } 936 937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) 938 { 939 u32 event; 940 u32 apedata; 941 942 if (!tg3_flag(tp, ENABLE_APE)) 943 return; 944 945 switch (kind) { 946 case RESET_KIND_INIT: 947 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); 948 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 949 APE_HOST_SEG_SIG_MAGIC); 950 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, 951 APE_HOST_SEG_LEN_MAGIC); 952 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); 953 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); 954 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, 955 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); 956 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, 957 APE_HOST_BEHAV_NO_PHYLOCK); 958 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, 959 TG3_APE_HOST_DRVR_STATE_START); 960 961 event = APE_EVENT_STATUS_STATE_START; 962 break; 963 case RESET_KIND_SHUTDOWN: 964 if (device_may_wakeup(&tp->pdev->dev) && 965 tg3_flag(tp, WOL_ENABLE)) { 966 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, 967 TG3_APE_HOST_WOL_SPEED_AUTO); 968 apedata = TG3_APE_HOST_DRVR_STATE_WOL; 969 } else 970 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; 971 972 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); 973 974 event = APE_EVENT_STATUS_STATE_UNLOAD; 975 break; 976 default: 977 return; 978 } 979 980 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; 981 982 tg3_ape_send_event(tp, event); 983 } 984 985 static void tg3_send_ape_heartbeat(struct tg3 *tp, 986 unsigned long interval) 987 { 988 /* Check if hb interval has exceeded */ 989 if (!tg3_flag(tp, ENABLE_APE) || 990 time_before(jiffies, tp->ape_hb_jiffies + interval)) 991 return; 992 993 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); 994 tp->ape_hb_jiffies = jiffies; 995 } 996 997 static void tg3_disable_ints(struct tg3 *tp) 998 { 999 int i; 1000 1001 tw32(TG3PCI_MISC_HOST_CTRL, 1002 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); 1003 for (i = 0; i < tp->irq_max; i++) 1004 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); 1005 } 1006 1007 static void tg3_enable_ints(struct tg3 *tp) 1008 { 1009 int i; 1010 1011 tp->irq_sync = 0; 1012 wmb(); 1013 1014 tw32(TG3PCI_MISC_HOST_CTRL, 1015 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 1016 1017 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; 1018 for (i = 0; i < tp->irq_cnt; i++) { 1019 struct tg3_napi *tnapi = &tp->napi[i]; 1020 1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1022 if (tg3_flag(tp, 1SHOT_MSI)) 1023 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1024 1025 tp->coal_now |= tnapi->coal_now; 1026 } 1027 1028 /* Force an initial interrupt */ 1029 if (!tg3_flag(tp, TAGGED_STATUS) && 1030 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) 1031 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 1032 else 1033 tw32(HOSTCC_MODE, tp->coal_now); 1034 1035 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); 1036 } 1037 1038 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) 1039 { 1040 struct tg3 *tp = tnapi->tp; 1041 struct tg3_hw_status *sblk = tnapi->hw_status; 1042 unsigned int work_exists = 0; 1043 1044 /* check for phy events */ 1045 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 1046 if (sblk->status & SD_STATUS_LINK_CHG) 1047 work_exists = 1; 1048 } 1049 1050 /* check for TX work to do */ 1051 if (sblk->idx[0].tx_consumer != tnapi->tx_cons) 1052 work_exists = 1; 1053 1054 /* check for RX work to do */ 1055 if (tnapi->rx_rcb_prod_idx && 1056 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 1057 work_exists = 1; 1058 1059 return work_exists; 1060 } 1061 1062 /* tg3_int_reenable 1063 * similar to tg3_enable_ints, but it accurately determines whether there 1064 * is new work pending and can return without flushing the PIO write 1065 * which reenables interrupts 1066 */ 1067 static void tg3_int_reenable(struct tg3_napi *tnapi) 1068 { 1069 struct tg3 *tp = tnapi->tp; 1070 1071 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 1072 1073 /* When doing tagged status, this work check is unnecessary. 1074 * The last_tag we write above tells the chip which piece of 1075 * work we've completed. 1076 */ 1077 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) 1078 tw32(HOSTCC_MODE, tp->coalesce_mode | 1079 HOSTCC_MODE_ENABLE | tnapi->coal_now); 1080 } 1081 1082 static void tg3_switch_clocks(struct tg3 *tp) 1083 { 1084 u32 clock_ctrl; 1085 u32 orig_clock_ctrl; 1086 1087 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) 1088 return; 1089 1090 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); 1091 1092 orig_clock_ctrl = clock_ctrl; 1093 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | 1094 CLOCK_CTRL_CLKRUN_OENABLE | 1095 0x1f); 1096 tp->pci_clock_ctrl = clock_ctrl; 1097 1098 if (tg3_flag(tp, 5705_PLUS)) { 1099 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { 1100 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1101 clock_ctrl | CLOCK_CTRL_625_CORE, 40); 1102 } 1103 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { 1104 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1105 clock_ctrl | 1106 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), 1107 40); 1108 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1109 clock_ctrl | (CLOCK_CTRL_ALTCLK), 1110 40); 1111 } 1112 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); 1113 } 1114 1115 #define PHY_BUSY_LOOPS 5000 1116 1117 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg, 1118 u32 *val) 1119 { 1120 u32 frame_val; 1121 unsigned int loops; 1122 int ret; 1123 1124 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1125 tw32_f(MAC_MI_MODE, 1126 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1127 udelay(80); 1128 } 1129 1130 tg3_ape_lock(tp, tp->phy_ape_lock); 1131 1132 *val = 0x0; 1133 1134 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1135 MI_COM_PHY_ADDR_MASK); 1136 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1137 MI_COM_REG_ADDR_MASK); 1138 frame_val |= (MI_COM_CMD_READ | MI_COM_START); 1139 1140 tw32_f(MAC_MI_COM, frame_val); 1141 1142 loops = PHY_BUSY_LOOPS; 1143 while (loops != 0) { 1144 udelay(10); 1145 frame_val = tr32(MAC_MI_COM); 1146 1147 if ((frame_val & MI_COM_BUSY) == 0) { 1148 udelay(5); 1149 frame_val = tr32(MAC_MI_COM); 1150 break; 1151 } 1152 loops -= 1; 1153 } 1154 1155 ret = -EBUSY; 1156 if (loops != 0) { 1157 *val = frame_val & MI_COM_DATA_MASK; 1158 ret = 0; 1159 } 1160 1161 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1162 tw32_f(MAC_MI_MODE, tp->mi_mode); 1163 udelay(80); 1164 } 1165 1166 tg3_ape_unlock(tp, tp->phy_ape_lock); 1167 1168 return ret; 1169 } 1170 1171 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) 1172 { 1173 return __tg3_readphy(tp, tp->phy_addr, reg, val); 1174 } 1175 1176 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg, 1177 u32 val) 1178 { 1179 u32 frame_val; 1180 unsigned int loops; 1181 int ret; 1182 1183 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 1184 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) 1185 return 0; 1186 1187 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1188 tw32_f(MAC_MI_MODE, 1189 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1190 udelay(80); 1191 } 1192 1193 tg3_ape_lock(tp, tp->phy_ape_lock); 1194 1195 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1196 MI_COM_PHY_ADDR_MASK); 1197 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1198 MI_COM_REG_ADDR_MASK); 1199 frame_val |= (val & MI_COM_DATA_MASK); 1200 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); 1201 1202 tw32_f(MAC_MI_COM, frame_val); 1203 1204 loops = PHY_BUSY_LOOPS; 1205 while (loops != 0) { 1206 udelay(10); 1207 frame_val = tr32(MAC_MI_COM); 1208 if ((frame_val & MI_COM_BUSY) == 0) { 1209 udelay(5); 1210 frame_val = tr32(MAC_MI_COM); 1211 break; 1212 } 1213 loops -= 1; 1214 } 1215 1216 ret = -EBUSY; 1217 if (loops != 0) 1218 ret = 0; 1219 1220 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1221 tw32_f(MAC_MI_MODE, tp->mi_mode); 1222 udelay(80); 1223 } 1224 1225 tg3_ape_unlock(tp, tp->phy_ape_lock); 1226 1227 return ret; 1228 } 1229 1230 static int tg3_writephy(struct tg3 *tp, int reg, u32 val) 1231 { 1232 return __tg3_writephy(tp, tp->phy_addr, reg, val); 1233 } 1234 1235 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) 1236 { 1237 int err; 1238 1239 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1240 if (err) 1241 goto done; 1242 1243 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1244 if (err) 1245 goto done; 1246 1247 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1248 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1249 if (err) 1250 goto done; 1251 1252 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); 1253 1254 done: 1255 return err; 1256 } 1257 1258 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) 1259 { 1260 int err; 1261 1262 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1263 if (err) 1264 goto done; 1265 1266 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1267 if (err) 1268 goto done; 1269 1270 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1271 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1272 if (err) 1273 goto done; 1274 1275 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); 1276 1277 done: 1278 return err; 1279 } 1280 1281 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) 1282 { 1283 int err; 1284 1285 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1286 if (!err) 1287 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); 1288 1289 return err; 1290 } 1291 1292 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) 1293 { 1294 int err; 1295 1296 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1297 if (!err) 1298 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); 1299 1300 return err; 1301 } 1302 1303 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) 1304 { 1305 int err; 1306 1307 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 1308 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | 1309 MII_TG3_AUXCTL_SHDWSEL_MISC); 1310 if (!err) 1311 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); 1312 1313 return err; 1314 } 1315 1316 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) 1317 { 1318 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) 1319 set |= MII_TG3_AUXCTL_MISC_WREN; 1320 1321 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); 1322 } 1323 1324 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) 1325 { 1326 u32 val; 1327 int err; 1328 1329 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 1330 1331 if (err) 1332 return err; 1333 1334 if (enable) 1335 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1336 else 1337 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1338 1339 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 1340 val | MII_TG3_AUXCTL_ACTL_TX_6DB); 1341 1342 return err; 1343 } 1344 1345 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val) 1346 { 1347 return tg3_writephy(tp, MII_TG3_MISC_SHDW, 1348 reg | val | MII_TG3_MISC_SHDW_WREN); 1349 } 1350 1351 static int tg3_bmcr_reset(struct tg3 *tp) 1352 { 1353 u32 phy_control; 1354 int limit, err; 1355 1356 /* OK, reset it, and poll the BMCR_RESET bit until it 1357 * clears or we time out. 1358 */ 1359 phy_control = BMCR_RESET; 1360 err = tg3_writephy(tp, MII_BMCR, phy_control); 1361 if (err != 0) 1362 return -EBUSY; 1363 1364 limit = 5000; 1365 while (limit--) { 1366 err = tg3_readphy(tp, MII_BMCR, &phy_control); 1367 if (err != 0) 1368 return -EBUSY; 1369 1370 if ((phy_control & BMCR_RESET) == 0) { 1371 udelay(40); 1372 break; 1373 } 1374 udelay(10); 1375 } 1376 if (limit < 0) 1377 return -EBUSY; 1378 1379 return 0; 1380 } 1381 1382 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) 1383 { 1384 struct tg3 *tp = bp->priv; 1385 u32 val; 1386 1387 spin_lock_bh(&tp->lock); 1388 1389 if (__tg3_readphy(tp, mii_id, reg, &val)) 1390 val = -EIO; 1391 1392 spin_unlock_bh(&tp->lock); 1393 1394 return val; 1395 } 1396 1397 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) 1398 { 1399 struct tg3 *tp = bp->priv; 1400 u32 ret = 0; 1401 1402 spin_lock_bh(&tp->lock); 1403 1404 if (__tg3_writephy(tp, mii_id, reg, val)) 1405 ret = -EIO; 1406 1407 spin_unlock_bh(&tp->lock); 1408 1409 return ret; 1410 } 1411 1412 static void tg3_mdio_config_5785(struct tg3 *tp) 1413 { 1414 u32 val; 1415 struct phy_device *phydev; 1416 1417 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1418 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1419 case PHY_ID_BCM50610: 1420 case PHY_ID_BCM50610M: 1421 val = MAC_PHYCFG2_50610_LED_MODES; 1422 break; 1423 case PHY_ID_BCMAC131: 1424 val = MAC_PHYCFG2_AC131_LED_MODES; 1425 break; 1426 case PHY_ID_RTL8211C: 1427 val = MAC_PHYCFG2_RTL8211C_LED_MODES; 1428 break; 1429 case PHY_ID_RTL8201E: 1430 val = MAC_PHYCFG2_RTL8201E_LED_MODES; 1431 break; 1432 default: 1433 return; 1434 } 1435 1436 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { 1437 tw32(MAC_PHYCFG2, val); 1438 1439 val = tr32(MAC_PHYCFG1); 1440 val &= ~(MAC_PHYCFG1_RGMII_INT | 1441 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); 1442 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; 1443 tw32(MAC_PHYCFG1, val); 1444 1445 return; 1446 } 1447 1448 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) 1449 val |= MAC_PHYCFG2_EMODE_MASK_MASK | 1450 MAC_PHYCFG2_FMODE_MASK_MASK | 1451 MAC_PHYCFG2_GMODE_MASK_MASK | 1452 MAC_PHYCFG2_ACT_MASK_MASK | 1453 MAC_PHYCFG2_QUAL_MASK_MASK | 1454 MAC_PHYCFG2_INBAND_ENABLE; 1455 1456 tw32(MAC_PHYCFG2, val); 1457 1458 val = tr32(MAC_PHYCFG1); 1459 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | 1460 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); 1461 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1462 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1463 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; 1464 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1465 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; 1466 } 1467 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | 1468 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; 1469 tw32(MAC_PHYCFG1, val); 1470 1471 val = tr32(MAC_EXT_RGMII_MODE); 1472 val &= ~(MAC_RGMII_MODE_RX_INT_B | 1473 MAC_RGMII_MODE_RX_QUALITY | 1474 MAC_RGMII_MODE_RX_ACTIVITY | 1475 MAC_RGMII_MODE_RX_ENG_DET | 1476 MAC_RGMII_MODE_TX_ENABLE | 1477 MAC_RGMII_MODE_TX_LOWPWR | 1478 MAC_RGMII_MODE_TX_RESET); 1479 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1480 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1481 val |= MAC_RGMII_MODE_RX_INT_B | 1482 MAC_RGMII_MODE_RX_QUALITY | 1483 MAC_RGMII_MODE_RX_ACTIVITY | 1484 MAC_RGMII_MODE_RX_ENG_DET; 1485 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1486 val |= MAC_RGMII_MODE_TX_ENABLE | 1487 MAC_RGMII_MODE_TX_LOWPWR | 1488 MAC_RGMII_MODE_TX_RESET; 1489 } 1490 tw32(MAC_EXT_RGMII_MODE, val); 1491 } 1492 1493 static void tg3_mdio_start(struct tg3 *tp) 1494 { 1495 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; 1496 tw32_f(MAC_MI_MODE, tp->mi_mode); 1497 udelay(80); 1498 1499 if (tg3_flag(tp, MDIOBUS_INITED) && 1500 tg3_asic_rev(tp) == ASIC_REV_5785) 1501 tg3_mdio_config_5785(tp); 1502 } 1503 1504 static int tg3_mdio_init(struct tg3 *tp) 1505 { 1506 int i; 1507 u32 reg; 1508 struct phy_device *phydev; 1509 1510 if (tg3_flag(tp, 5717_PLUS)) { 1511 u32 is_serdes; 1512 1513 tp->phy_addr = tp->pci_fn + 1; 1514 1515 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) 1516 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; 1517 else 1518 is_serdes = tr32(TG3_CPMU_PHY_STRAP) & 1519 TG3_CPMU_PHY_STRAP_IS_SERDES; 1520 if (is_serdes) 1521 tp->phy_addr += 7; 1522 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) { 1523 int addr; 1524 1525 addr = ssb_gige_get_phyaddr(tp->pdev); 1526 if (addr < 0) 1527 return addr; 1528 tp->phy_addr = addr; 1529 } else 1530 tp->phy_addr = TG3_PHY_MII_ADDR; 1531 1532 tg3_mdio_start(tp); 1533 1534 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) 1535 return 0; 1536 1537 tp->mdio_bus = mdiobus_alloc(); 1538 if (tp->mdio_bus == NULL) 1539 return -ENOMEM; 1540 1541 tp->mdio_bus->name = "tg3 mdio bus"; 1542 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev)); 1543 tp->mdio_bus->priv = tp; 1544 tp->mdio_bus->parent = &tp->pdev->dev; 1545 tp->mdio_bus->read = &tg3_mdio_read; 1546 tp->mdio_bus->write = &tg3_mdio_write; 1547 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); 1548 1549 /* The bus registration will look for all the PHYs on the mdio bus. 1550 * Unfortunately, it does not ensure the PHY is powered up before 1551 * accessing the PHY ID registers. A chip reset is the 1552 * quickest way to bring the device back to an operational state.. 1553 */ 1554 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) 1555 tg3_bmcr_reset(tp); 1556 1557 i = mdiobus_register(tp->mdio_bus); 1558 if (i) { 1559 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); 1560 mdiobus_free(tp->mdio_bus); 1561 return i; 1562 } 1563 1564 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1565 1566 if (!phydev || !phydev->drv) { 1567 dev_warn(&tp->pdev->dev, "No PHY devices\n"); 1568 mdiobus_unregister(tp->mdio_bus); 1569 mdiobus_free(tp->mdio_bus); 1570 return -ENODEV; 1571 } 1572 1573 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1574 case PHY_ID_BCM57780: 1575 phydev->interface = PHY_INTERFACE_MODE_GMII; 1576 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1577 break; 1578 case PHY_ID_BCM50610: 1579 case PHY_ID_BCM50610M: 1580 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | 1581 PHY_BRCM_RX_REFCLK_UNUSED | 1582 PHY_BRCM_DIS_TXCRXC_NOENRGY | 1583 PHY_BRCM_AUTO_PWRDWN_ENABLE; 1584 fallthrough; 1585 case PHY_ID_RTL8211C: 1586 phydev->interface = PHY_INTERFACE_MODE_RGMII; 1587 break; 1588 case PHY_ID_RTL8201E: 1589 case PHY_ID_BCMAC131: 1590 phydev->interface = PHY_INTERFACE_MODE_MII; 1591 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1592 tp->phy_flags |= TG3_PHYFLG_IS_FET; 1593 break; 1594 } 1595 1596 tg3_flag_set(tp, MDIOBUS_INITED); 1597 1598 if (tg3_asic_rev(tp) == ASIC_REV_5785) 1599 tg3_mdio_config_5785(tp); 1600 1601 return 0; 1602 } 1603 1604 static void tg3_mdio_fini(struct tg3 *tp) 1605 { 1606 if (tg3_flag(tp, MDIOBUS_INITED)) { 1607 tg3_flag_clear(tp, MDIOBUS_INITED); 1608 mdiobus_unregister(tp->mdio_bus); 1609 mdiobus_free(tp->mdio_bus); 1610 } 1611 } 1612 1613 /* tp->lock is held. */ 1614 static inline void tg3_generate_fw_event(struct tg3 *tp) 1615 { 1616 u32 val; 1617 1618 val = tr32(GRC_RX_CPU_EVENT); 1619 val |= GRC_RX_CPU_DRIVER_EVENT; 1620 tw32_f(GRC_RX_CPU_EVENT, val); 1621 1622 tp->last_event_jiffies = jiffies; 1623 } 1624 1625 #define TG3_FW_EVENT_TIMEOUT_USEC 2500 1626 1627 /* tp->lock is held. */ 1628 static void tg3_wait_for_event_ack(struct tg3 *tp) 1629 { 1630 int i; 1631 unsigned int delay_cnt; 1632 long time_remain; 1633 1634 /* If enough time has passed, no wait is necessary. */ 1635 time_remain = (long)(tp->last_event_jiffies + 1 + 1636 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - 1637 (long)jiffies; 1638 if (time_remain < 0) 1639 return; 1640 1641 /* Check if we can shorten the wait time. */ 1642 delay_cnt = jiffies_to_usecs(time_remain); 1643 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) 1644 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; 1645 delay_cnt = (delay_cnt >> 3) + 1; 1646 1647 for (i = 0; i < delay_cnt; i++) { 1648 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) 1649 break; 1650 if (pci_channel_offline(tp->pdev)) 1651 break; 1652 1653 udelay(8); 1654 } 1655 } 1656 1657 /* tp->lock is held. */ 1658 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data) 1659 { 1660 u32 reg, val; 1661 1662 val = 0; 1663 if (!tg3_readphy(tp, MII_BMCR, ®)) 1664 val = reg << 16; 1665 if (!tg3_readphy(tp, MII_BMSR, ®)) 1666 val |= (reg & 0xffff); 1667 *data++ = val; 1668 1669 val = 0; 1670 if (!tg3_readphy(tp, MII_ADVERTISE, ®)) 1671 val = reg << 16; 1672 if (!tg3_readphy(tp, MII_LPA, ®)) 1673 val |= (reg & 0xffff); 1674 *data++ = val; 1675 1676 val = 0; 1677 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { 1678 if (!tg3_readphy(tp, MII_CTRL1000, ®)) 1679 val = reg << 16; 1680 if (!tg3_readphy(tp, MII_STAT1000, ®)) 1681 val |= (reg & 0xffff); 1682 } 1683 *data++ = val; 1684 1685 if (!tg3_readphy(tp, MII_PHYADDR, ®)) 1686 val = reg << 16; 1687 else 1688 val = 0; 1689 *data++ = val; 1690 } 1691 1692 /* tp->lock is held. */ 1693 static void tg3_ump_link_report(struct tg3 *tp) 1694 { 1695 u32 data[4]; 1696 1697 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) 1698 return; 1699 1700 tg3_phy_gather_ump_data(tp, data); 1701 1702 tg3_wait_for_event_ack(tp); 1703 1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); 1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); 1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]); 1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]); 1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]); 1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]); 1710 1711 tg3_generate_fw_event(tp); 1712 } 1713 1714 /* tp->lock is held. */ 1715 static void tg3_stop_fw(struct tg3 *tp) 1716 { 1717 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 1718 /* Wait for RX cpu to ACK the previous event. */ 1719 tg3_wait_for_event_ack(tp); 1720 1721 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); 1722 1723 tg3_generate_fw_event(tp); 1724 1725 /* Wait for RX cpu to ACK this event. */ 1726 tg3_wait_for_event_ack(tp); 1727 } 1728 } 1729 1730 /* tp->lock is held. */ 1731 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) 1732 { 1733 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, 1734 NIC_SRAM_FIRMWARE_MBOX_MAGIC1); 1735 1736 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1737 switch (kind) { 1738 case RESET_KIND_INIT: 1739 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1740 DRV_STATE_START); 1741 break; 1742 1743 case RESET_KIND_SHUTDOWN: 1744 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1745 DRV_STATE_UNLOAD); 1746 break; 1747 1748 case RESET_KIND_SUSPEND: 1749 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1750 DRV_STATE_SUSPEND); 1751 break; 1752 1753 default: 1754 break; 1755 } 1756 } 1757 } 1758 1759 /* tp->lock is held. */ 1760 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) 1761 { 1762 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1763 switch (kind) { 1764 case RESET_KIND_INIT: 1765 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1766 DRV_STATE_START_DONE); 1767 break; 1768 1769 case RESET_KIND_SHUTDOWN: 1770 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1771 DRV_STATE_UNLOAD_DONE); 1772 break; 1773 1774 default: 1775 break; 1776 } 1777 } 1778 } 1779 1780 /* tp->lock is held. */ 1781 static void tg3_write_sig_legacy(struct tg3 *tp, int kind) 1782 { 1783 if (tg3_flag(tp, ENABLE_ASF)) { 1784 switch (kind) { 1785 case RESET_KIND_INIT: 1786 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1787 DRV_STATE_START); 1788 break; 1789 1790 case RESET_KIND_SHUTDOWN: 1791 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1792 DRV_STATE_UNLOAD); 1793 break; 1794 1795 case RESET_KIND_SUSPEND: 1796 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1797 DRV_STATE_SUSPEND); 1798 break; 1799 1800 default: 1801 break; 1802 } 1803 } 1804 } 1805 1806 static int tg3_poll_fw(struct tg3 *tp) 1807 { 1808 int i; 1809 u32 val; 1810 1811 if (tg3_flag(tp, NO_FWARE_REPORTED)) 1812 return 0; 1813 1814 if (tg3_flag(tp, IS_SSB_CORE)) { 1815 /* We don't use firmware. */ 1816 return 0; 1817 } 1818 1819 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 1820 /* Wait up to 20ms for init done. */ 1821 for (i = 0; i < 200; i++) { 1822 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) 1823 return 0; 1824 if (pci_channel_offline(tp->pdev)) 1825 return -ENODEV; 1826 1827 udelay(100); 1828 } 1829 return -ENODEV; 1830 } 1831 1832 /* Wait for firmware initialization to complete. */ 1833 for (i = 0; i < 100000; i++) { 1834 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); 1835 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 1836 break; 1837 if (pci_channel_offline(tp->pdev)) { 1838 if (!tg3_flag(tp, NO_FWARE_REPORTED)) { 1839 tg3_flag_set(tp, NO_FWARE_REPORTED); 1840 netdev_info(tp->dev, "No firmware running\n"); 1841 } 1842 1843 break; 1844 } 1845 1846 udelay(10); 1847 } 1848 1849 /* Chip might not be fitted with firmware. Some Sun onboard 1850 * parts are configured like that. So don't signal the timeout 1851 * of the above loop as an error, but do report the lack of 1852 * running firmware once. 1853 */ 1854 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { 1855 tg3_flag_set(tp, NO_FWARE_REPORTED); 1856 1857 netdev_info(tp->dev, "No firmware running\n"); 1858 } 1859 1860 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 1861 /* The 57765 A0 needs a little more 1862 * time to do some important work. 1863 */ 1864 mdelay(10); 1865 } 1866 1867 return 0; 1868 } 1869 1870 static void tg3_link_report(struct tg3 *tp) 1871 { 1872 if (!netif_carrier_ok(tp->dev)) { 1873 netif_info(tp, link, tp->dev, "Link is down\n"); 1874 tg3_ump_link_report(tp); 1875 } else if (netif_msg_link(tp)) { 1876 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", 1877 (tp->link_config.active_speed == SPEED_1000 ? 1878 1000 : 1879 (tp->link_config.active_speed == SPEED_100 ? 1880 100 : 10)), 1881 (tp->link_config.active_duplex == DUPLEX_FULL ? 1882 "full" : "half")); 1883 1884 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", 1885 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? 1886 "on" : "off", 1887 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? 1888 "on" : "off"); 1889 1890 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 1891 netdev_info(tp->dev, "EEE is %s\n", 1892 tp->setlpicnt ? "enabled" : "disabled"); 1893 1894 tg3_ump_link_report(tp); 1895 } 1896 1897 tp->link_up = netif_carrier_ok(tp->dev); 1898 } 1899 1900 static u32 tg3_decode_flowctrl_1000T(u32 adv) 1901 { 1902 u32 flowctrl = 0; 1903 1904 if (adv & ADVERTISE_PAUSE_CAP) { 1905 flowctrl |= FLOW_CTRL_RX; 1906 if (!(adv & ADVERTISE_PAUSE_ASYM)) 1907 flowctrl |= FLOW_CTRL_TX; 1908 } else if (adv & ADVERTISE_PAUSE_ASYM) 1909 flowctrl |= FLOW_CTRL_TX; 1910 1911 return flowctrl; 1912 } 1913 1914 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) 1915 { 1916 u16 miireg; 1917 1918 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) 1919 miireg = ADVERTISE_1000XPAUSE; 1920 else if (flow_ctrl & FLOW_CTRL_TX) 1921 miireg = ADVERTISE_1000XPSE_ASYM; 1922 else if (flow_ctrl & FLOW_CTRL_RX) 1923 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; 1924 else 1925 miireg = 0; 1926 1927 return miireg; 1928 } 1929 1930 static u32 tg3_decode_flowctrl_1000X(u32 adv) 1931 { 1932 u32 flowctrl = 0; 1933 1934 if (adv & ADVERTISE_1000XPAUSE) { 1935 flowctrl |= FLOW_CTRL_RX; 1936 if (!(adv & ADVERTISE_1000XPSE_ASYM)) 1937 flowctrl |= FLOW_CTRL_TX; 1938 } else if (adv & ADVERTISE_1000XPSE_ASYM) 1939 flowctrl |= FLOW_CTRL_TX; 1940 1941 return flowctrl; 1942 } 1943 1944 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) 1945 { 1946 u8 cap = 0; 1947 1948 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) { 1949 cap = FLOW_CTRL_TX | FLOW_CTRL_RX; 1950 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) { 1951 if (lcladv & ADVERTISE_1000XPAUSE) 1952 cap = FLOW_CTRL_RX; 1953 if (rmtadv & ADVERTISE_1000XPAUSE) 1954 cap = FLOW_CTRL_TX; 1955 } 1956 1957 return cap; 1958 } 1959 1960 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) 1961 { 1962 u8 autoneg; 1963 u8 flowctrl = 0; 1964 u32 old_rx_mode = tp->rx_mode; 1965 u32 old_tx_mode = tp->tx_mode; 1966 1967 if (tg3_flag(tp, USE_PHYLIB)) 1968 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg; 1969 else 1970 autoneg = tp->link_config.autoneg; 1971 1972 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { 1973 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 1974 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); 1975 else 1976 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 1977 } else 1978 flowctrl = tp->link_config.flowctrl; 1979 1980 tp->link_config.active_flowctrl = flowctrl; 1981 1982 if (flowctrl & FLOW_CTRL_RX) 1983 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; 1984 else 1985 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; 1986 1987 if (old_rx_mode != tp->rx_mode) 1988 tw32_f(MAC_RX_MODE, tp->rx_mode); 1989 1990 if (flowctrl & FLOW_CTRL_TX) 1991 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; 1992 else 1993 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; 1994 1995 if (old_tx_mode != tp->tx_mode) 1996 tw32_f(MAC_TX_MODE, tp->tx_mode); 1997 } 1998 1999 static void tg3_adjust_link(struct net_device *dev) 2000 { 2001 u8 oldflowctrl, linkmesg = 0; 2002 u32 mac_mode, lcl_adv, rmt_adv; 2003 struct tg3 *tp = netdev_priv(dev); 2004 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2005 2006 spin_lock_bh(&tp->lock); 2007 2008 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | 2009 MAC_MODE_HALF_DUPLEX); 2010 2011 oldflowctrl = tp->link_config.active_flowctrl; 2012 2013 if (phydev->link) { 2014 lcl_adv = 0; 2015 rmt_adv = 0; 2016 2017 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) 2018 mac_mode |= MAC_MODE_PORT_MODE_MII; 2019 else if (phydev->speed == SPEED_1000 || 2020 tg3_asic_rev(tp) != ASIC_REV_5785) 2021 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2022 else 2023 mac_mode |= MAC_MODE_PORT_MODE_MII; 2024 2025 if (phydev->duplex == DUPLEX_HALF) 2026 mac_mode |= MAC_MODE_HALF_DUPLEX; 2027 else { 2028 lcl_adv = mii_advertise_flowctrl( 2029 tp->link_config.flowctrl); 2030 2031 if (phydev->pause) 2032 rmt_adv = LPA_PAUSE_CAP; 2033 if (phydev->asym_pause) 2034 rmt_adv |= LPA_PAUSE_ASYM; 2035 } 2036 2037 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 2038 } else 2039 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2040 2041 if (mac_mode != tp->mac_mode) { 2042 tp->mac_mode = mac_mode; 2043 tw32_f(MAC_MODE, tp->mac_mode); 2044 udelay(40); 2045 } 2046 2047 if (tg3_asic_rev(tp) == ASIC_REV_5785) { 2048 if (phydev->speed == SPEED_10) 2049 tw32(MAC_MI_STAT, 2050 MAC_MI_STAT_10MBPS_MODE | 2051 MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2052 else 2053 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2054 } 2055 2056 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) 2057 tw32(MAC_TX_LENGTHS, 2058 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2059 (6 << TX_LENGTHS_IPG_SHIFT) | 2060 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); 2061 else 2062 tw32(MAC_TX_LENGTHS, 2063 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2064 (6 << TX_LENGTHS_IPG_SHIFT) | 2065 (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); 2066 2067 if (phydev->link != tp->old_link || 2068 phydev->speed != tp->link_config.active_speed || 2069 phydev->duplex != tp->link_config.active_duplex || 2070 oldflowctrl != tp->link_config.active_flowctrl) 2071 linkmesg = 1; 2072 2073 tp->old_link = phydev->link; 2074 tp->link_config.active_speed = phydev->speed; 2075 tp->link_config.active_duplex = phydev->duplex; 2076 2077 spin_unlock_bh(&tp->lock); 2078 2079 if (linkmesg) 2080 tg3_link_report(tp); 2081 } 2082 2083 static int tg3_phy_init(struct tg3 *tp) 2084 { 2085 struct phy_device *phydev; 2086 2087 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) 2088 return 0; 2089 2090 /* Bring the PHY back to a known state. */ 2091 tg3_bmcr_reset(tp); 2092 2093 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2094 2095 /* Attach the MAC to the PHY. */ 2096 phydev = phy_connect(tp->dev, phydev_name(phydev), 2097 tg3_adjust_link, phydev->interface); 2098 if (IS_ERR(phydev)) { 2099 dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); 2100 return PTR_ERR(phydev); 2101 } 2102 2103 /* Mask with MAC supported features. */ 2104 switch (phydev->interface) { 2105 case PHY_INTERFACE_MODE_GMII: 2106 case PHY_INTERFACE_MODE_RGMII: 2107 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 2108 phy_set_max_speed(phydev, SPEED_1000); 2109 phy_support_asym_pause(phydev); 2110 break; 2111 } 2112 fallthrough; 2113 case PHY_INTERFACE_MODE_MII: 2114 phy_set_max_speed(phydev, SPEED_100); 2115 phy_support_asym_pause(phydev); 2116 break; 2117 default: 2118 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2119 return -EINVAL; 2120 } 2121 2122 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; 2123 2124 phy_attached_info(phydev); 2125 2126 return 0; 2127 } 2128 2129 static void tg3_phy_start(struct tg3 *tp) 2130 { 2131 struct phy_device *phydev; 2132 2133 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2134 return; 2135 2136 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2137 2138 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 2139 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 2140 phydev->speed = tp->link_config.speed; 2141 phydev->duplex = tp->link_config.duplex; 2142 phydev->autoneg = tp->link_config.autoneg; 2143 ethtool_convert_legacy_u32_to_link_mode( 2144 phydev->advertising, tp->link_config.advertising); 2145 } 2146 2147 phy_start(phydev); 2148 2149 phy_start_aneg(phydev); 2150 } 2151 2152 static void tg3_phy_stop(struct tg3 *tp) 2153 { 2154 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2155 return; 2156 2157 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2158 } 2159 2160 static void tg3_phy_fini(struct tg3 *tp) 2161 { 2162 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 2163 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2164 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; 2165 } 2166 } 2167 2168 static int tg3_phy_set_extloopbk(struct tg3 *tp) 2169 { 2170 int err; 2171 u32 val; 2172 2173 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 2174 return 0; 2175 2176 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2177 /* Cannot do read-modify-write on 5401 */ 2178 err = tg3_phy_auxctl_write(tp, 2179 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2180 MII_TG3_AUXCTL_ACTL_EXTLOOPBK | 2181 0x4c20); 2182 goto done; 2183 } 2184 2185 err = tg3_phy_auxctl_read(tp, 2186 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2187 if (err) 2188 return err; 2189 2190 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK; 2191 err = tg3_phy_auxctl_write(tp, 2192 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val); 2193 2194 done: 2195 return err; 2196 } 2197 2198 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) 2199 { 2200 u32 phytest; 2201 2202 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 2203 u32 phy; 2204 2205 tg3_writephy(tp, MII_TG3_FET_TEST, 2206 phytest | MII_TG3_FET_SHADOW_EN); 2207 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { 2208 if (enable) 2209 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; 2210 else 2211 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; 2212 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); 2213 } 2214 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 2215 } 2216 } 2217 2218 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) 2219 { 2220 u32 reg; 2221 2222 if (!tg3_flag(tp, 5705_PLUS) || 2223 (tg3_flag(tp, 5717_PLUS) && 2224 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) 2225 return; 2226 2227 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2228 tg3_phy_fet_toggle_apd(tp, enable); 2229 return; 2230 } 2231 2232 reg = MII_TG3_MISC_SHDW_SCR5_LPED | 2233 MII_TG3_MISC_SHDW_SCR5_DLPTLM | 2234 MII_TG3_MISC_SHDW_SCR5_SDTL | 2235 MII_TG3_MISC_SHDW_SCR5_C125OE; 2236 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable) 2237 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; 2238 2239 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg); 2240 2241 2242 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS; 2243 if (enable) 2244 reg |= MII_TG3_MISC_SHDW_APD_ENABLE; 2245 2246 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg); 2247 } 2248 2249 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable) 2250 { 2251 u32 phy; 2252 2253 if (!tg3_flag(tp, 5705_PLUS) || 2254 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 2255 return; 2256 2257 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2258 u32 ephy; 2259 2260 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { 2261 u32 reg = MII_TG3_FET_SHDW_MISCCTRL; 2262 2263 tg3_writephy(tp, MII_TG3_FET_TEST, 2264 ephy | MII_TG3_FET_SHADOW_EN); 2265 if (!tg3_readphy(tp, reg, &phy)) { 2266 if (enable) 2267 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2268 else 2269 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2270 tg3_writephy(tp, reg, phy); 2271 } 2272 tg3_writephy(tp, MII_TG3_FET_TEST, ephy); 2273 } 2274 } else { 2275 int ret; 2276 2277 ret = tg3_phy_auxctl_read(tp, 2278 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); 2279 if (!ret) { 2280 if (enable) 2281 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2282 else 2283 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2284 tg3_phy_auxctl_write(tp, 2285 MII_TG3_AUXCTL_SHDWSEL_MISC, phy); 2286 } 2287 } 2288 } 2289 2290 static void tg3_phy_set_wirespeed(struct tg3 *tp) 2291 { 2292 int ret; 2293 u32 val; 2294 2295 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) 2296 return; 2297 2298 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); 2299 if (!ret) 2300 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, 2301 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); 2302 } 2303 2304 static void tg3_phy_apply_otp(struct tg3 *tp) 2305 { 2306 u32 otp, phy; 2307 2308 if (!tp->phy_otp) 2309 return; 2310 2311 otp = tp->phy_otp; 2312 2313 if (tg3_phy_toggle_auxctl_smdsp(tp, true)) 2314 return; 2315 2316 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); 2317 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; 2318 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); 2319 2320 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | 2321 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); 2322 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); 2323 2324 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); 2325 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; 2326 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); 2327 2328 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); 2329 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); 2330 2331 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); 2332 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); 2333 2334 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | 2335 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); 2336 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); 2337 2338 tg3_phy_toggle_auxctl_smdsp(tp, false); 2339 } 2340 2341 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee) 2342 { 2343 u32 val; 2344 struct ethtool_eee *dest = &tp->eee; 2345 2346 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2347 return; 2348 2349 if (eee) 2350 dest = eee; 2351 2352 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val)) 2353 return; 2354 2355 /* Pull eee_active */ 2356 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || 2357 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) { 2358 dest->eee_active = 1; 2359 } else 2360 dest->eee_active = 0; 2361 2362 /* Pull lp advertised settings */ 2363 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val)) 2364 return; 2365 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2366 2367 /* Pull advertised and eee_enabled settings */ 2368 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) 2369 return; 2370 dest->eee_enabled = !!val; 2371 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2372 2373 /* Pull tx_lpi_enabled */ 2374 val = tr32(TG3_CPMU_EEE_MODE); 2375 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX); 2376 2377 /* Pull lpi timer value */ 2378 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff; 2379 } 2380 2381 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) 2382 { 2383 u32 val; 2384 2385 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2386 return; 2387 2388 tp->setlpicnt = 0; 2389 2390 if (tp->link_config.autoneg == AUTONEG_ENABLE && 2391 current_link_up && 2392 tp->link_config.active_duplex == DUPLEX_FULL && 2393 (tp->link_config.active_speed == SPEED_100 || 2394 tp->link_config.active_speed == SPEED_1000)) { 2395 u32 eeectl; 2396 2397 if (tp->link_config.active_speed == SPEED_1000) 2398 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; 2399 else 2400 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; 2401 2402 tw32(TG3_CPMU_EEE_CTRL, eeectl); 2403 2404 tg3_eee_pull_config(tp, NULL); 2405 if (tp->eee.eee_active) 2406 tp->setlpicnt = 2; 2407 } 2408 2409 if (!tp->setlpicnt) { 2410 if (current_link_up && 2411 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2412 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); 2413 tg3_phy_toggle_auxctl_smdsp(tp, false); 2414 } 2415 2416 val = tr32(TG3_CPMU_EEE_MODE); 2417 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); 2418 } 2419 } 2420 2421 static void tg3_phy_eee_enable(struct tg3 *tp) 2422 { 2423 u32 val; 2424 2425 if (tp->link_config.active_speed == SPEED_1000 && 2426 (tg3_asic_rev(tp) == ASIC_REV_5717 || 2427 tg3_asic_rev(tp) == ASIC_REV_5719 || 2428 tg3_flag(tp, 57765_CLASS)) && 2429 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2430 val = MII_TG3_DSP_TAP26_ALNOKO | 2431 MII_TG3_DSP_TAP26_RMRXSTO; 2432 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 2433 tg3_phy_toggle_auxctl_smdsp(tp, false); 2434 } 2435 2436 val = tr32(TG3_CPMU_EEE_MODE); 2437 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); 2438 } 2439 2440 static int tg3_wait_macro_done(struct tg3 *tp) 2441 { 2442 int limit = 100; 2443 2444 while (limit--) { 2445 u32 tmp32; 2446 2447 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { 2448 if ((tmp32 & 0x1000) == 0) 2449 break; 2450 } 2451 } 2452 if (limit < 0) 2453 return -EBUSY; 2454 2455 return 0; 2456 } 2457 2458 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) 2459 { 2460 static const u32 test_pat[4][6] = { 2461 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, 2462 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, 2463 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, 2464 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } 2465 }; 2466 int chan; 2467 2468 for (chan = 0; chan < 4; chan++) { 2469 int i; 2470 2471 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2472 (chan * 0x2000) | 0x0200); 2473 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2474 2475 for (i = 0; i < 6; i++) 2476 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 2477 test_pat[chan][i]); 2478 2479 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2480 if (tg3_wait_macro_done(tp)) { 2481 *resetp = 1; 2482 return -EBUSY; 2483 } 2484 2485 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2486 (chan * 0x2000) | 0x0200); 2487 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); 2488 if (tg3_wait_macro_done(tp)) { 2489 *resetp = 1; 2490 return -EBUSY; 2491 } 2492 2493 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); 2494 if (tg3_wait_macro_done(tp)) { 2495 *resetp = 1; 2496 return -EBUSY; 2497 } 2498 2499 for (i = 0; i < 6; i += 2) { 2500 u32 low, high; 2501 2502 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || 2503 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || 2504 tg3_wait_macro_done(tp)) { 2505 *resetp = 1; 2506 return -EBUSY; 2507 } 2508 low &= 0x7fff; 2509 high &= 0x000f; 2510 if (low != test_pat[chan][i] || 2511 high != test_pat[chan][i+1]) { 2512 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); 2513 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); 2514 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); 2515 2516 return -EBUSY; 2517 } 2518 } 2519 } 2520 2521 return 0; 2522 } 2523 2524 static int tg3_phy_reset_chanpat(struct tg3 *tp) 2525 { 2526 int chan; 2527 2528 for (chan = 0; chan < 4; chan++) { 2529 int i; 2530 2531 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2532 (chan * 0x2000) | 0x0200); 2533 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2534 for (i = 0; i < 6; i++) 2535 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); 2536 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2537 if (tg3_wait_macro_done(tp)) 2538 return -EBUSY; 2539 } 2540 2541 return 0; 2542 } 2543 2544 static int tg3_phy_reset_5703_4_5(struct tg3 *tp) 2545 { 2546 u32 reg32, phy9_orig; 2547 int retries, do_phy_reset, err; 2548 2549 retries = 10; 2550 do_phy_reset = 1; 2551 do { 2552 if (do_phy_reset) { 2553 err = tg3_bmcr_reset(tp); 2554 if (err) 2555 return err; 2556 do_phy_reset = 0; 2557 } 2558 2559 /* Disable transmitter and interrupt. */ 2560 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) 2561 continue; 2562 2563 reg32 |= 0x3000; 2564 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2565 2566 /* Set full-duplex, 1000 mbps. */ 2567 tg3_writephy(tp, MII_BMCR, 2568 BMCR_FULLDPLX | BMCR_SPEED1000); 2569 2570 /* Set to master mode. */ 2571 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) 2572 continue; 2573 2574 tg3_writephy(tp, MII_CTRL1000, 2575 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 2576 2577 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 2578 if (err) 2579 return err; 2580 2581 /* Block the PHY control access. */ 2582 tg3_phydsp_write(tp, 0x8005, 0x0800); 2583 2584 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); 2585 if (!err) 2586 break; 2587 } while (--retries); 2588 2589 err = tg3_phy_reset_chanpat(tp); 2590 if (err) 2591 return err; 2592 2593 tg3_phydsp_write(tp, 0x8005, 0x0000); 2594 2595 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); 2596 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); 2597 2598 tg3_phy_toggle_auxctl_smdsp(tp, false); 2599 2600 tg3_writephy(tp, MII_CTRL1000, phy9_orig); 2601 2602 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32); 2603 if (err) 2604 return err; 2605 2606 reg32 &= ~0x3000; 2607 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2608 2609 return 0; 2610 } 2611 2612 static void tg3_carrier_off(struct tg3 *tp) 2613 { 2614 netif_carrier_off(tp->dev); 2615 tp->link_up = false; 2616 } 2617 2618 static void tg3_warn_mgmt_link_flap(struct tg3 *tp) 2619 { 2620 if (tg3_flag(tp, ENABLE_ASF)) 2621 netdev_warn(tp->dev, 2622 "Management side-band traffic will be interrupted during phy settings change\n"); 2623 } 2624 2625 /* This will reset the tigon3 PHY if there is no valid 2626 * link unless the FORCE argument is non-zero. 2627 */ 2628 static int tg3_phy_reset(struct tg3 *tp) 2629 { 2630 u32 val, cpmuctrl; 2631 int err; 2632 2633 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2634 val = tr32(GRC_MISC_CFG); 2635 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); 2636 udelay(40); 2637 } 2638 err = tg3_readphy(tp, MII_BMSR, &val); 2639 err |= tg3_readphy(tp, MII_BMSR, &val); 2640 if (err != 0) 2641 return -EBUSY; 2642 2643 if (netif_running(tp->dev) && tp->link_up) { 2644 netif_carrier_off(tp->dev); 2645 tg3_link_report(tp); 2646 } 2647 2648 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 2649 tg3_asic_rev(tp) == ASIC_REV_5704 || 2650 tg3_asic_rev(tp) == ASIC_REV_5705) { 2651 err = tg3_phy_reset_5703_4_5(tp); 2652 if (err) 2653 return err; 2654 goto out; 2655 } 2656 2657 cpmuctrl = 0; 2658 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 2659 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 2660 cpmuctrl = tr32(TG3_CPMU_CTRL); 2661 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) 2662 tw32(TG3_CPMU_CTRL, 2663 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); 2664 } 2665 2666 err = tg3_bmcr_reset(tp); 2667 if (err) 2668 return err; 2669 2670 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { 2671 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; 2672 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); 2673 2674 tw32(TG3_CPMU_CTRL, cpmuctrl); 2675 } 2676 2677 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 2678 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 2679 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 2680 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == 2681 CPMU_LSPD_1000MB_MACCLK_12_5) { 2682 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 2683 udelay(40); 2684 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 2685 } 2686 } 2687 2688 if (tg3_flag(tp, 5717_PLUS) && 2689 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) 2690 return 0; 2691 2692 tg3_phy_apply_otp(tp); 2693 2694 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 2695 tg3_phy_toggle_apd(tp, true); 2696 else 2697 tg3_phy_toggle_apd(tp, false); 2698 2699 out: 2700 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && 2701 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2702 tg3_phydsp_write(tp, 0x201f, 0x2aaa); 2703 tg3_phydsp_write(tp, 0x000a, 0x0323); 2704 tg3_phy_toggle_auxctl_smdsp(tp, false); 2705 } 2706 2707 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { 2708 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2709 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2710 } 2711 2712 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { 2713 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2714 tg3_phydsp_write(tp, 0x000a, 0x310b); 2715 tg3_phydsp_write(tp, 0x201f, 0x9506); 2716 tg3_phydsp_write(tp, 0x401f, 0x14e2); 2717 tg3_phy_toggle_auxctl_smdsp(tp, false); 2718 } 2719 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { 2720 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2721 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 2722 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { 2723 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); 2724 tg3_writephy(tp, MII_TG3_TEST1, 2725 MII_TG3_TEST1_TRIM_EN | 0x4); 2726 } else 2727 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); 2728 2729 tg3_phy_toggle_auxctl_smdsp(tp, false); 2730 } 2731 } 2732 2733 /* Set Extended packet length bit (bit 14) on all chips that */ 2734 /* support jumbo frames */ 2735 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2736 /* Cannot do read-modify-write on 5401 */ 2737 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 2738 } else if (tg3_flag(tp, JUMBO_CAPABLE)) { 2739 /* Set bit 14 with read-modify-write to preserve other bits */ 2740 err = tg3_phy_auxctl_read(tp, 2741 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2742 if (!err) 2743 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2744 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); 2745 } 2746 2747 /* Set phy register 0x10 bit 0 to high fifo elasticity to support 2748 * jumbo frames transmission. 2749 */ 2750 if (tg3_flag(tp, JUMBO_CAPABLE)) { 2751 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) 2752 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2753 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); 2754 } 2755 2756 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2757 /* adjust output voltage */ 2758 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); 2759 } 2760 2761 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0) 2762 tg3_phydsp_write(tp, 0xffb, 0x4000); 2763 2764 tg3_phy_toggle_automdix(tp, true); 2765 tg3_phy_set_wirespeed(tp); 2766 return 0; 2767 } 2768 2769 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001 2770 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002 2771 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ 2772 TG3_GPIO_MSG_NEED_VAUX) 2773 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ 2774 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ 2775 (TG3_GPIO_MSG_DRVR_PRES << 4) | \ 2776 (TG3_GPIO_MSG_DRVR_PRES << 8) | \ 2777 (TG3_GPIO_MSG_DRVR_PRES << 12)) 2778 2779 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ 2780 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ 2781 (TG3_GPIO_MSG_NEED_VAUX << 4) | \ 2782 (TG3_GPIO_MSG_NEED_VAUX << 8) | \ 2783 (TG3_GPIO_MSG_NEED_VAUX << 12)) 2784 2785 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) 2786 { 2787 u32 status, shift; 2788 2789 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2790 tg3_asic_rev(tp) == ASIC_REV_5719) 2791 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); 2792 else 2793 status = tr32(TG3_CPMU_DRV_STATUS); 2794 2795 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; 2796 status &= ~(TG3_GPIO_MSG_MASK << shift); 2797 status |= (newstat << shift); 2798 2799 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2800 tg3_asic_rev(tp) == ASIC_REV_5719) 2801 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); 2802 else 2803 tw32(TG3_CPMU_DRV_STATUS, status); 2804 2805 return status >> TG3_APE_GPIO_MSG_SHIFT; 2806 } 2807 2808 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) 2809 { 2810 if (!tg3_flag(tp, IS_NIC)) 2811 return 0; 2812 2813 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2814 tg3_asic_rev(tp) == ASIC_REV_5719 || 2815 tg3_asic_rev(tp) == ASIC_REV_5720) { 2816 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2817 return -EIO; 2818 2819 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); 2820 2821 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2822 TG3_GRC_LCLCTL_PWRSW_DELAY); 2823 2824 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2825 } else { 2826 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2827 TG3_GRC_LCLCTL_PWRSW_DELAY); 2828 } 2829 2830 return 0; 2831 } 2832 2833 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) 2834 { 2835 u32 grc_local_ctrl; 2836 2837 if (!tg3_flag(tp, IS_NIC) || 2838 tg3_asic_rev(tp) == ASIC_REV_5700 || 2839 tg3_asic_rev(tp) == ASIC_REV_5701) 2840 return; 2841 2842 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; 2843 2844 tw32_wait_f(GRC_LOCAL_CTRL, 2845 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2846 TG3_GRC_LCLCTL_PWRSW_DELAY); 2847 2848 tw32_wait_f(GRC_LOCAL_CTRL, 2849 grc_local_ctrl, 2850 TG3_GRC_LCLCTL_PWRSW_DELAY); 2851 2852 tw32_wait_f(GRC_LOCAL_CTRL, 2853 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2854 TG3_GRC_LCLCTL_PWRSW_DELAY); 2855 } 2856 2857 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) 2858 { 2859 if (!tg3_flag(tp, IS_NIC)) 2860 return; 2861 2862 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 2863 tg3_asic_rev(tp) == ASIC_REV_5701) { 2864 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2865 (GRC_LCLCTRL_GPIO_OE0 | 2866 GRC_LCLCTRL_GPIO_OE1 | 2867 GRC_LCLCTRL_GPIO_OE2 | 2868 GRC_LCLCTRL_GPIO_OUTPUT0 | 2869 GRC_LCLCTRL_GPIO_OUTPUT1), 2870 TG3_GRC_LCLCTL_PWRSW_DELAY); 2871 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 2872 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 2873 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ 2874 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | 2875 GRC_LCLCTRL_GPIO_OE1 | 2876 GRC_LCLCTRL_GPIO_OE2 | 2877 GRC_LCLCTRL_GPIO_OUTPUT0 | 2878 GRC_LCLCTRL_GPIO_OUTPUT1 | 2879 tp->grc_local_ctrl; 2880 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2881 TG3_GRC_LCLCTL_PWRSW_DELAY); 2882 2883 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; 2884 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2885 TG3_GRC_LCLCTL_PWRSW_DELAY); 2886 2887 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; 2888 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2889 TG3_GRC_LCLCTL_PWRSW_DELAY); 2890 } else { 2891 u32 no_gpio2; 2892 u32 grc_local_ctrl = 0; 2893 2894 /* Workaround to prevent overdrawing Amps. */ 2895 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 2896 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 2897 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2898 grc_local_ctrl, 2899 TG3_GRC_LCLCTL_PWRSW_DELAY); 2900 } 2901 2902 /* On 5753 and variants, GPIO2 cannot be used. */ 2903 no_gpio2 = tp->nic_sram_data_cfg & 2904 NIC_SRAM_DATA_CFG_NO_GPIO2; 2905 2906 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 2907 GRC_LCLCTRL_GPIO_OE1 | 2908 GRC_LCLCTRL_GPIO_OE2 | 2909 GRC_LCLCTRL_GPIO_OUTPUT1 | 2910 GRC_LCLCTRL_GPIO_OUTPUT2; 2911 if (no_gpio2) { 2912 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | 2913 GRC_LCLCTRL_GPIO_OUTPUT2); 2914 } 2915 tw32_wait_f(GRC_LOCAL_CTRL, 2916 tp->grc_local_ctrl | grc_local_ctrl, 2917 TG3_GRC_LCLCTL_PWRSW_DELAY); 2918 2919 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; 2920 2921 tw32_wait_f(GRC_LOCAL_CTRL, 2922 tp->grc_local_ctrl | grc_local_ctrl, 2923 TG3_GRC_LCLCTL_PWRSW_DELAY); 2924 2925 if (!no_gpio2) { 2926 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; 2927 tw32_wait_f(GRC_LOCAL_CTRL, 2928 tp->grc_local_ctrl | grc_local_ctrl, 2929 TG3_GRC_LCLCTL_PWRSW_DELAY); 2930 } 2931 } 2932 } 2933 2934 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) 2935 { 2936 u32 msg = 0; 2937 2938 /* Serialize power state transitions */ 2939 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2940 return; 2941 2942 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) 2943 msg = TG3_GPIO_MSG_NEED_VAUX; 2944 2945 msg = tg3_set_function_status(tp, msg); 2946 2947 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) 2948 goto done; 2949 2950 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) 2951 tg3_pwrsrc_switch_to_vaux(tp); 2952 else 2953 tg3_pwrsrc_die_with_vmain(tp); 2954 2955 done: 2956 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2957 } 2958 2959 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) 2960 { 2961 bool need_vaux = false; 2962 2963 /* The GPIOs do something completely different on 57765. */ 2964 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS)) 2965 return; 2966 2967 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2968 tg3_asic_rev(tp) == ASIC_REV_5719 || 2969 tg3_asic_rev(tp) == ASIC_REV_5720) { 2970 tg3_frob_aux_power_5717(tp, include_wol ? 2971 tg3_flag(tp, WOL_ENABLE) != 0 : 0); 2972 return; 2973 } 2974 2975 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { 2976 struct net_device *dev_peer; 2977 2978 dev_peer = pci_get_drvdata(tp->pdev_peer); 2979 2980 /* remove_one() may have been run on the peer. */ 2981 if (dev_peer) { 2982 struct tg3 *tp_peer = netdev_priv(dev_peer); 2983 2984 if (tg3_flag(tp_peer, INIT_COMPLETE)) 2985 return; 2986 2987 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || 2988 tg3_flag(tp_peer, ENABLE_ASF)) 2989 need_vaux = true; 2990 } 2991 } 2992 2993 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || 2994 tg3_flag(tp, ENABLE_ASF)) 2995 need_vaux = true; 2996 2997 if (need_vaux) 2998 tg3_pwrsrc_switch_to_vaux(tp); 2999 else 3000 tg3_pwrsrc_die_with_vmain(tp); 3001 } 3002 3003 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) 3004 { 3005 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) 3006 return 1; 3007 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { 3008 if (speed != SPEED_10) 3009 return 1; 3010 } else if (speed == SPEED_10) 3011 return 1; 3012 3013 return 0; 3014 } 3015 3016 static bool tg3_phy_power_bug(struct tg3 *tp) 3017 { 3018 switch (tg3_asic_rev(tp)) { 3019 case ASIC_REV_5700: 3020 case ASIC_REV_5704: 3021 return true; 3022 case ASIC_REV_5780: 3023 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 3024 return true; 3025 return false; 3026 case ASIC_REV_5717: 3027 if (!tp->pci_fn) 3028 return true; 3029 return false; 3030 case ASIC_REV_5719: 3031 case ASIC_REV_5720: 3032 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 3033 !tp->pci_fn) 3034 return true; 3035 return false; 3036 } 3037 3038 return false; 3039 } 3040 3041 static bool tg3_phy_led_bug(struct tg3 *tp) 3042 { 3043 switch (tg3_asic_rev(tp)) { 3044 case ASIC_REV_5719: 3045 case ASIC_REV_5720: 3046 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 3047 !tp->pci_fn) 3048 return true; 3049 return false; 3050 } 3051 3052 return false; 3053 } 3054 3055 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) 3056 { 3057 u32 val; 3058 3059 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) 3060 return; 3061 3062 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 3063 if (tg3_asic_rev(tp) == ASIC_REV_5704) { 3064 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); 3065 u32 serdes_cfg = tr32(MAC_SERDES_CFG); 3066 3067 sg_dig_ctrl |= 3068 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; 3069 tw32(SG_DIG_CTRL, sg_dig_ctrl); 3070 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); 3071 } 3072 return; 3073 } 3074 3075 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3076 tg3_bmcr_reset(tp); 3077 val = tr32(GRC_MISC_CFG); 3078 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 3079 udelay(40); 3080 return; 3081 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 3082 u32 phytest; 3083 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 3084 u32 phy; 3085 3086 tg3_writephy(tp, MII_ADVERTISE, 0); 3087 tg3_writephy(tp, MII_BMCR, 3088 BMCR_ANENABLE | BMCR_ANRESTART); 3089 3090 tg3_writephy(tp, MII_TG3_FET_TEST, 3091 phytest | MII_TG3_FET_SHADOW_EN); 3092 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { 3093 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; 3094 tg3_writephy(tp, 3095 MII_TG3_FET_SHDW_AUXMODE4, 3096 phy); 3097 } 3098 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 3099 } 3100 return; 3101 } else if (do_low_power) { 3102 if (!tg3_phy_led_bug(tp)) 3103 tg3_writephy(tp, MII_TG3_EXT_CTRL, 3104 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 3105 3106 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | 3107 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | 3108 MII_TG3_AUXCTL_PCTL_VREG_11V; 3109 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); 3110 } 3111 3112 /* The PHY should not be powered down on some chips because 3113 * of bugs. 3114 */ 3115 if (tg3_phy_power_bug(tp)) 3116 return; 3117 3118 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 3119 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 3120 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 3121 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 3122 val |= CPMU_LSPD_1000MB_MACCLK_12_5; 3123 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 3124 } 3125 3126 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); 3127 } 3128 3129 /* tp->lock is held. */ 3130 static int tg3_nvram_lock(struct tg3 *tp) 3131 { 3132 if (tg3_flag(tp, NVRAM)) { 3133 int i; 3134 3135 if (tp->nvram_lock_cnt == 0) { 3136 tw32(NVRAM_SWARB, SWARB_REQ_SET1); 3137 for (i = 0; i < 8000; i++) { 3138 if (tr32(NVRAM_SWARB) & SWARB_GNT1) 3139 break; 3140 udelay(20); 3141 } 3142 if (i == 8000) { 3143 tw32(NVRAM_SWARB, SWARB_REQ_CLR1); 3144 return -ENODEV; 3145 } 3146 } 3147 tp->nvram_lock_cnt++; 3148 } 3149 return 0; 3150 } 3151 3152 /* tp->lock is held. */ 3153 static void tg3_nvram_unlock(struct tg3 *tp) 3154 { 3155 if (tg3_flag(tp, NVRAM)) { 3156 if (tp->nvram_lock_cnt > 0) 3157 tp->nvram_lock_cnt--; 3158 if (tp->nvram_lock_cnt == 0) 3159 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); 3160 } 3161 } 3162 3163 /* tp->lock is held. */ 3164 static void tg3_enable_nvram_access(struct tg3 *tp) 3165 { 3166 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3167 u32 nvaccess = tr32(NVRAM_ACCESS); 3168 3169 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); 3170 } 3171 } 3172 3173 /* tp->lock is held. */ 3174 static void tg3_disable_nvram_access(struct tg3 *tp) 3175 { 3176 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3177 u32 nvaccess = tr32(NVRAM_ACCESS); 3178 3179 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); 3180 } 3181 } 3182 3183 static int tg3_nvram_read_using_eeprom(struct tg3 *tp, 3184 u32 offset, u32 *val) 3185 { 3186 u32 tmp; 3187 int i; 3188 3189 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) 3190 return -EINVAL; 3191 3192 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | 3193 EEPROM_ADDR_DEVID_MASK | 3194 EEPROM_ADDR_READ); 3195 tw32(GRC_EEPROM_ADDR, 3196 tmp | 3197 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3198 ((offset << EEPROM_ADDR_ADDR_SHIFT) & 3199 EEPROM_ADDR_ADDR_MASK) | 3200 EEPROM_ADDR_READ | EEPROM_ADDR_START); 3201 3202 for (i = 0; i < 1000; i++) { 3203 tmp = tr32(GRC_EEPROM_ADDR); 3204 3205 if (tmp & EEPROM_ADDR_COMPLETE) 3206 break; 3207 msleep(1); 3208 } 3209 if (!(tmp & EEPROM_ADDR_COMPLETE)) 3210 return -EBUSY; 3211 3212 tmp = tr32(GRC_EEPROM_DATA); 3213 3214 /* 3215 * The data will always be opposite the native endian 3216 * format. Perform a blind byteswap to compensate. 3217 */ 3218 *val = swab32(tmp); 3219 3220 return 0; 3221 } 3222 3223 #define NVRAM_CMD_TIMEOUT 10000 3224 3225 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) 3226 { 3227 int i; 3228 3229 tw32(NVRAM_CMD, nvram_cmd); 3230 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { 3231 usleep_range(10, 40); 3232 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { 3233 udelay(10); 3234 break; 3235 } 3236 } 3237 3238 if (i == NVRAM_CMD_TIMEOUT) 3239 return -EBUSY; 3240 3241 return 0; 3242 } 3243 3244 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) 3245 { 3246 if (tg3_flag(tp, NVRAM) && 3247 tg3_flag(tp, NVRAM_BUFFERED) && 3248 tg3_flag(tp, FLASH) && 3249 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3250 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3251 3252 addr = ((addr / tp->nvram_pagesize) << 3253 ATMEL_AT45DB0X1B_PAGE_POS) + 3254 (addr % tp->nvram_pagesize); 3255 3256 return addr; 3257 } 3258 3259 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) 3260 { 3261 if (tg3_flag(tp, NVRAM) && 3262 tg3_flag(tp, NVRAM_BUFFERED) && 3263 tg3_flag(tp, FLASH) && 3264 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3265 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3266 3267 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * 3268 tp->nvram_pagesize) + 3269 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); 3270 3271 return addr; 3272 } 3273 3274 /* NOTE: Data read in from NVRAM is byteswapped according to 3275 * the byteswapping settings for all other register accesses. 3276 * tg3 devices are BE devices, so on a BE machine, the data 3277 * returned will be exactly as it is seen in NVRAM. On a LE 3278 * machine, the 32-bit value will be byteswapped. 3279 */ 3280 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) 3281 { 3282 int ret; 3283 3284 if (!tg3_flag(tp, NVRAM)) 3285 return tg3_nvram_read_using_eeprom(tp, offset, val); 3286 3287 offset = tg3_nvram_phys_addr(tp, offset); 3288 3289 if (offset > NVRAM_ADDR_MSK) 3290 return -EINVAL; 3291 3292 ret = tg3_nvram_lock(tp); 3293 if (ret) 3294 return ret; 3295 3296 tg3_enable_nvram_access(tp); 3297 3298 tw32(NVRAM_ADDR, offset); 3299 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | 3300 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); 3301 3302 if (ret == 0) 3303 *val = tr32(NVRAM_RDDATA); 3304 3305 tg3_disable_nvram_access(tp); 3306 3307 tg3_nvram_unlock(tp); 3308 3309 return ret; 3310 } 3311 3312 /* Ensures NVRAM data is in bytestream format. */ 3313 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) 3314 { 3315 u32 v; 3316 int res = tg3_nvram_read(tp, offset, &v); 3317 if (!res) 3318 *val = cpu_to_be32(v); 3319 return res; 3320 } 3321 3322 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, 3323 u32 offset, u32 len, u8 *buf) 3324 { 3325 int i, j, rc = 0; 3326 u32 val; 3327 3328 for (i = 0; i < len; i += 4) { 3329 u32 addr; 3330 __be32 data; 3331 3332 addr = offset + i; 3333 3334 memcpy(&data, buf + i, 4); 3335 3336 /* 3337 * The SEEPROM interface expects the data to always be opposite 3338 * the native endian format. We accomplish this by reversing 3339 * all the operations that would have been performed on the 3340 * data from a call to tg3_nvram_read_be32(). 3341 */ 3342 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); 3343 3344 val = tr32(GRC_EEPROM_ADDR); 3345 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); 3346 3347 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | 3348 EEPROM_ADDR_READ); 3349 tw32(GRC_EEPROM_ADDR, val | 3350 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3351 (addr & EEPROM_ADDR_ADDR_MASK) | 3352 EEPROM_ADDR_START | 3353 EEPROM_ADDR_WRITE); 3354 3355 for (j = 0; j < 1000; j++) { 3356 val = tr32(GRC_EEPROM_ADDR); 3357 3358 if (val & EEPROM_ADDR_COMPLETE) 3359 break; 3360 msleep(1); 3361 } 3362 if (!(val & EEPROM_ADDR_COMPLETE)) { 3363 rc = -EBUSY; 3364 break; 3365 } 3366 } 3367 3368 return rc; 3369 } 3370 3371 /* offset and length are dword aligned */ 3372 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, 3373 u8 *buf) 3374 { 3375 int ret = 0; 3376 u32 pagesize = tp->nvram_pagesize; 3377 u32 pagemask = pagesize - 1; 3378 u32 nvram_cmd; 3379 u8 *tmp; 3380 3381 tmp = kmalloc(pagesize, GFP_KERNEL); 3382 if (tmp == NULL) 3383 return -ENOMEM; 3384 3385 while (len) { 3386 int j; 3387 u32 phy_addr, page_off, size; 3388 3389 phy_addr = offset & ~pagemask; 3390 3391 for (j = 0; j < pagesize; j += 4) { 3392 ret = tg3_nvram_read_be32(tp, phy_addr + j, 3393 (__be32 *) (tmp + j)); 3394 if (ret) 3395 break; 3396 } 3397 if (ret) 3398 break; 3399 3400 page_off = offset & pagemask; 3401 size = pagesize; 3402 if (len < size) 3403 size = len; 3404 3405 len -= size; 3406 3407 memcpy(tmp + page_off, buf, size); 3408 3409 offset = offset + (pagesize - page_off); 3410 3411 tg3_enable_nvram_access(tp); 3412 3413 /* 3414 * Before we can erase the flash page, we need 3415 * to issue a special "write enable" command. 3416 */ 3417 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3418 3419 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3420 break; 3421 3422 /* Erase the target page */ 3423 tw32(NVRAM_ADDR, phy_addr); 3424 3425 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | 3426 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; 3427 3428 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3429 break; 3430 3431 /* Issue another write enable to start the write. */ 3432 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3433 3434 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3435 break; 3436 3437 for (j = 0; j < pagesize; j += 4) { 3438 __be32 data; 3439 3440 data = *((__be32 *) (tmp + j)); 3441 3442 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3443 3444 tw32(NVRAM_ADDR, phy_addr + j); 3445 3446 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | 3447 NVRAM_CMD_WR; 3448 3449 if (j == 0) 3450 nvram_cmd |= NVRAM_CMD_FIRST; 3451 else if (j == (pagesize - 4)) 3452 nvram_cmd |= NVRAM_CMD_LAST; 3453 3454 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3455 if (ret) 3456 break; 3457 } 3458 if (ret) 3459 break; 3460 } 3461 3462 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3463 tg3_nvram_exec_cmd(tp, nvram_cmd); 3464 3465 kfree(tmp); 3466 3467 return ret; 3468 } 3469 3470 /* offset and length are dword aligned */ 3471 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, 3472 u8 *buf) 3473 { 3474 int i, ret = 0; 3475 3476 for (i = 0; i < len; i += 4, offset += 4) { 3477 u32 page_off, phy_addr, nvram_cmd; 3478 __be32 data; 3479 3480 memcpy(&data, buf + i, 4); 3481 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3482 3483 page_off = offset % tp->nvram_pagesize; 3484 3485 phy_addr = tg3_nvram_phys_addr(tp, offset); 3486 3487 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; 3488 3489 if (page_off == 0 || i == 0) 3490 nvram_cmd |= NVRAM_CMD_FIRST; 3491 if (page_off == (tp->nvram_pagesize - 4)) 3492 nvram_cmd |= NVRAM_CMD_LAST; 3493 3494 if (i == (len - 4)) 3495 nvram_cmd |= NVRAM_CMD_LAST; 3496 3497 if ((nvram_cmd & NVRAM_CMD_FIRST) || 3498 !tg3_flag(tp, FLASH) || 3499 !tg3_flag(tp, 57765_PLUS)) 3500 tw32(NVRAM_ADDR, phy_addr); 3501 3502 if (tg3_asic_rev(tp) != ASIC_REV_5752 && 3503 !tg3_flag(tp, 5755_PLUS) && 3504 (tp->nvram_jedecnum == JEDEC_ST) && 3505 (nvram_cmd & NVRAM_CMD_FIRST)) { 3506 u32 cmd; 3507 3508 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3509 ret = tg3_nvram_exec_cmd(tp, cmd); 3510 if (ret) 3511 break; 3512 } 3513 if (!tg3_flag(tp, FLASH)) { 3514 /* We always do complete word writes to eeprom. */ 3515 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); 3516 } 3517 3518 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3519 if (ret) 3520 break; 3521 } 3522 return ret; 3523 } 3524 3525 /* offset and length are dword aligned */ 3526 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) 3527 { 3528 int ret; 3529 3530 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3531 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & 3532 ~GRC_LCLCTRL_GPIO_OUTPUT1); 3533 udelay(40); 3534 } 3535 3536 if (!tg3_flag(tp, NVRAM)) { 3537 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); 3538 } else { 3539 u32 grc_mode; 3540 3541 ret = tg3_nvram_lock(tp); 3542 if (ret) 3543 return ret; 3544 3545 tg3_enable_nvram_access(tp); 3546 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) 3547 tw32(NVRAM_WRITE1, 0x406); 3548 3549 grc_mode = tr32(GRC_MODE); 3550 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); 3551 3552 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { 3553 ret = tg3_nvram_write_block_buffered(tp, offset, len, 3554 buf); 3555 } else { 3556 ret = tg3_nvram_write_block_unbuffered(tp, offset, len, 3557 buf); 3558 } 3559 3560 grc_mode = tr32(GRC_MODE); 3561 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); 3562 3563 tg3_disable_nvram_access(tp); 3564 tg3_nvram_unlock(tp); 3565 } 3566 3567 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3568 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 3569 udelay(40); 3570 } 3571 3572 return ret; 3573 } 3574 3575 #define RX_CPU_SCRATCH_BASE 0x30000 3576 #define RX_CPU_SCRATCH_SIZE 0x04000 3577 #define TX_CPU_SCRATCH_BASE 0x34000 3578 #define TX_CPU_SCRATCH_SIZE 0x04000 3579 3580 /* tp->lock is held. */ 3581 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base) 3582 { 3583 int i; 3584 const int iters = 10000; 3585 3586 for (i = 0; i < iters; i++) { 3587 tw32(cpu_base + CPU_STATE, 0xffffffff); 3588 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3589 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) 3590 break; 3591 if (pci_channel_offline(tp->pdev)) 3592 return -EBUSY; 3593 } 3594 3595 return (i == iters) ? -EBUSY : 0; 3596 } 3597 3598 /* tp->lock is held. */ 3599 static int tg3_rxcpu_pause(struct tg3 *tp) 3600 { 3601 int rc = tg3_pause_cpu(tp, RX_CPU_BASE); 3602 3603 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 3604 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); 3605 udelay(10); 3606 3607 return rc; 3608 } 3609 3610 /* tp->lock is held. */ 3611 static int tg3_txcpu_pause(struct tg3 *tp) 3612 { 3613 return tg3_pause_cpu(tp, TX_CPU_BASE); 3614 } 3615 3616 /* tp->lock is held. */ 3617 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base) 3618 { 3619 tw32(cpu_base + CPU_STATE, 0xffffffff); 3620 tw32_f(cpu_base + CPU_MODE, 0x00000000); 3621 } 3622 3623 /* tp->lock is held. */ 3624 static void tg3_rxcpu_resume(struct tg3 *tp) 3625 { 3626 tg3_resume_cpu(tp, RX_CPU_BASE); 3627 } 3628 3629 /* tp->lock is held. */ 3630 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base) 3631 { 3632 int rc; 3633 3634 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); 3635 3636 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3637 u32 val = tr32(GRC_VCPU_EXT_CTRL); 3638 3639 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); 3640 return 0; 3641 } 3642 if (cpu_base == RX_CPU_BASE) { 3643 rc = tg3_rxcpu_pause(tp); 3644 } else { 3645 /* 3646 * There is only an Rx CPU for the 5750 derivative in the 3647 * BCM4785. 3648 */ 3649 if (tg3_flag(tp, IS_SSB_CORE)) 3650 return 0; 3651 3652 rc = tg3_txcpu_pause(tp); 3653 } 3654 3655 if (rc) { 3656 netdev_err(tp->dev, "%s timed out, %s CPU\n", 3657 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX"); 3658 return -ENODEV; 3659 } 3660 3661 /* Clear firmware's nvram arbitration. */ 3662 if (tg3_flag(tp, NVRAM)) 3663 tw32(NVRAM_SWARB, SWARB_REQ_CLR0); 3664 return 0; 3665 } 3666 3667 static int tg3_fw_data_len(struct tg3 *tp, 3668 const struct tg3_firmware_hdr *fw_hdr) 3669 { 3670 int fw_len; 3671 3672 /* Non fragmented firmware have one firmware header followed by a 3673 * contiguous chunk of data to be written. The length field in that 3674 * header is not the length of data to be written but the complete 3675 * length of the bss. The data length is determined based on 3676 * tp->fw->size minus headers. 3677 * 3678 * Fragmented firmware have a main header followed by multiple 3679 * fragments. Each fragment is identical to non fragmented firmware 3680 * with a firmware header followed by a contiguous chunk of data. In 3681 * the main header, the length field is unused and set to 0xffffffff. 3682 * In each fragment header the length is the entire size of that 3683 * fragment i.e. fragment data + header length. Data length is 3684 * therefore length field in the header minus TG3_FW_HDR_LEN. 3685 */ 3686 if (tp->fw_len == 0xffffffff) 3687 fw_len = be32_to_cpu(fw_hdr->len); 3688 else 3689 fw_len = tp->fw->size; 3690 3691 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32); 3692 } 3693 3694 /* tp->lock is held. */ 3695 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, 3696 u32 cpu_scratch_base, int cpu_scratch_size, 3697 const struct tg3_firmware_hdr *fw_hdr) 3698 { 3699 int err, i; 3700 void (*write_op)(struct tg3 *, u32, u32); 3701 int total_len = tp->fw->size; 3702 3703 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { 3704 netdev_err(tp->dev, 3705 "%s: Trying to load TX cpu firmware which is 5705\n", 3706 __func__); 3707 return -EINVAL; 3708 } 3709 3710 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766) 3711 write_op = tg3_write_mem; 3712 else 3713 write_op = tg3_write_indirect_reg32; 3714 3715 if (tg3_asic_rev(tp) != ASIC_REV_57766) { 3716 /* It is possible that bootcode is still loading at this point. 3717 * Get the nvram lock first before halting the cpu. 3718 */ 3719 int lock_err = tg3_nvram_lock(tp); 3720 err = tg3_halt_cpu(tp, cpu_base); 3721 if (!lock_err) 3722 tg3_nvram_unlock(tp); 3723 if (err) 3724 goto out; 3725 3726 for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) 3727 write_op(tp, cpu_scratch_base + i, 0); 3728 tw32(cpu_base + CPU_STATE, 0xffffffff); 3729 tw32(cpu_base + CPU_MODE, 3730 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT); 3731 } else { 3732 /* Subtract additional main header for fragmented firmware and 3733 * advance to the first fragment 3734 */ 3735 total_len -= TG3_FW_HDR_LEN; 3736 fw_hdr++; 3737 } 3738 3739 do { 3740 u32 *fw_data = (u32 *)(fw_hdr + 1); 3741 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++) 3742 write_op(tp, cpu_scratch_base + 3743 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) + 3744 (i * sizeof(u32)), 3745 be32_to_cpu(fw_data[i])); 3746 3747 total_len -= be32_to_cpu(fw_hdr->len); 3748 3749 /* Advance to next fragment */ 3750 fw_hdr = (struct tg3_firmware_hdr *) 3751 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len)); 3752 } while (total_len > 0); 3753 3754 err = 0; 3755 3756 out: 3757 return err; 3758 } 3759 3760 /* tp->lock is held. */ 3761 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc) 3762 { 3763 int i; 3764 const int iters = 5; 3765 3766 tw32(cpu_base + CPU_STATE, 0xffffffff); 3767 tw32_f(cpu_base + CPU_PC, pc); 3768 3769 for (i = 0; i < iters; i++) { 3770 if (tr32(cpu_base + CPU_PC) == pc) 3771 break; 3772 tw32(cpu_base + CPU_STATE, 0xffffffff); 3773 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3774 tw32_f(cpu_base + CPU_PC, pc); 3775 udelay(1000); 3776 } 3777 3778 return (i == iters) ? -EBUSY : 0; 3779 } 3780 3781 /* tp->lock is held. */ 3782 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) 3783 { 3784 const struct tg3_firmware_hdr *fw_hdr; 3785 int err; 3786 3787 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3788 3789 /* Firmware blob starts with version numbers, followed by 3790 start address and length. We are setting complete length. 3791 length = end_address_of_bss - start_address_of_text. 3792 Remainder is the blob to be loaded contiguously 3793 from start address. */ 3794 3795 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, 3796 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, 3797 fw_hdr); 3798 if (err) 3799 return err; 3800 3801 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, 3802 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, 3803 fw_hdr); 3804 if (err) 3805 return err; 3806 3807 /* Now startup only the RX cpu. */ 3808 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE, 3809 be32_to_cpu(fw_hdr->base_addr)); 3810 if (err) { 3811 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " 3812 "should be %08x\n", __func__, 3813 tr32(RX_CPU_BASE + CPU_PC), 3814 be32_to_cpu(fw_hdr->base_addr)); 3815 return -ENODEV; 3816 } 3817 3818 tg3_rxcpu_resume(tp); 3819 3820 return 0; 3821 } 3822 3823 static int tg3_validate_rxcpu_state(struct tg3 *tp) 3824 { 3825 const int iters = 1000; 3826 int i; 3827 u32 val; 3828 3829 /* Wait for boot code to complete initialization and enter service 3830 * loop. It is then safe to download service patches 3831 */ 3832 for (i = 0; i < iters; i++) { 3833 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP) 3834 break; 3835 3836 udelay(10); 3837 } 3838 3839 if (i == iters) { 3840 netdev_err(tp->dev, "Boot code not ready for service patches\n"); 3841 return -EBUSY; 3842 } 3843 3844 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE); 3845 if (val & 0xff) { 3846 netdev_warn(tp->dev, 3847 "Other patches exist. Not downloading EEE patch\n"); 3848 return -EEXIST; 3849 } 3850 3851 return 0; 3852 } 3853 3854 /* tp->lock is held. */ 3855 static void tg3_load_57766_firmware(struct tg3 *tp) 3856 { 3857 struct tg3_firmware_hdr *fw_hdr; 3858 3859 if (!tg3_flag(tp, NO_NVRAM)) 3860 return; 3861 3862 if (tg3_validate_rxcpu_state(tp)) 3863 return; 3864 3865 if (!tp->fw) 3866 return; 3867 3868 /* This firmware blob has a different format than older firmware 3869 * releases as given below. The main difference is we have fragmented 3870 * data to be written to non-contiguous locations. 3871 * 3872 * In the beginning we have a firmware header identical to other 3873 * firmware which consists of version, base addr and length. The length 3874 * here is unused and set to 0xffffffff. 3875 * 3876 * This is followed by a series of firmware fragments which are 3877 * individually identical to previous firmware. i.e. they have the 3878 * firmware header and followed by data for that fragment. The version 3879 * field of the individual fragment header is unused. 3880 */ 3881 3882 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3883 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR) 3884 return; 3885 3886 if (tg3_rxcpu_pause(tp)) 3887 return; 3888 3889 /* tg3_load_firmware_cpu() will always succeed for the 57766 */ 3890 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr); 3891 3892 tg3_rxcpu_resume(tp); 3893 } 3894 3895 /* tp->lock is held. */ 3896 static int tg3_load_tso_firmware(struct tg3 *tp) 3897 { 3898 const struct tg3_firmware_hdr *fw_hdr; 3899 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; 3900 int err; 3901 3902 if (!tg3_flag(tp, FW_TSO)) 3903 return 0; 3904 3905 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3906 3907 /* Firmware blob starts with version numbers, followed by 3908 start address and length. We are setting complete length. 3909 length = end_address_of_bss - start_address_of_text. 3910 Remainder is the blob to be loaded contiguously 3911 from start address. */ 3912 3913 cpu_scratch_size = tp->fw_len; 3914 3915 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 3916 cpu_base = RX_CPU_BASE; 3917 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; 3918 } else { 3919 cpu_base = TX_CPU_BASE; 3920 cpu_scratch_base = TX_CPU_SCRATCH_BASE; 3921 cpu_scratch_size = TX_CPU_SCRATCH_SIZE; 3922 } 3923 3924 err = tg3_load_firmware_cpu(tp, cpu_base, 3925 cpu_scratch_base, cpu_scratch_size, 3926 fw_hdr); 3927 if (err) 3928 return err; 3929 3930 /* Now startup the cpu. */ 3931 err = tg3_pause_cpu_and_set_pc(tp, cpu_base, 3932 be32_to_cpu(fw_hdr->base_addr)); 3933 if (err) { 3934 netdev_err(tp->dev, 3935 "%s fails to set CPU PC, is %08x should be %08x\n", 3936 __func__, tr32(cpu_base + CPU_PC), 3937 be32_to_cpu(fw_hdr->base_addr)); 3938 return -ENODEV; 3939 } 3940 3941 tg3_resume_cpu(tp, cpu_base); 3942 return 0; 3943 } 3944 3945 /* tp->lock is held. */ 3946 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr, 3947 int index) 3948 { 3949 u32 addr_high, addr_low; 3950 3951 addr_high = ((mac_addr[0] << 8) | mac_addr[1]); 3952 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) | 3953 (mac_addr[4] << 8) | mac_addr[5]); 3954 3955 if (index < 4) { 3956 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high); 3957 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low); 3958 } else { 3959 index -= 4; 3960 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high); 3961 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low); 3962 } 3963 } 3964 3965 /* tp->lock is held. */ 3966 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1) 3967 { 3968 u32 addr_high; 3969 int i; 3970 3971 for (i = 0; i < 4; i++) { 3972 if (i == 1 && skip_mac_1) 3973 continue; 3974 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3975 } 3976 3977 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 3978 tg3_asic_rev(tp) == ASIC_REV_5704) { 3979 for (i = 4; i < 16; i++) 3980 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3981 } 3982 3983 addr_high = (tp->dev->dev_addr[0] + 3984 tp->dev->dev_addr[1] + 3985 tp->dev->dev_addr[2] + 3986 tp->dev->dev_addr[3] + 3987 tp->dev->dev_addr[4] + 3988 tp->dev->dev_addr[5]) & 3989 TX_BACKOFF_SEED_MASK; 3990 tw32(MAC_TX_BACKOFF_SEED, addr_high); 3991 } 3992 3993 static void tg3_enable_register_access(struct tg3 *tp) 3994 { 3995 /* 3996 * Make sure register accesses (indirect or otherwise) will function 3997 * correctly. 3998 */ 3999 pci_write_config_dword(tp->pdev, 4000 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); 4001 } 4002 4003 static int tg3_power_up(struct tg3 *tp) 4004 { 4005 int err; 4006 4007 tg3_enable_register_access(tp); 4008 4009 err = pci_set_power_state(tp->pdev, PCI_D0); 4010 if (!err) { 4011 /* Switch out of Vaux if it is a NIC */ 4012 tg3_pwrsrc_switch_to_vmain(tp); 4013 } else { 4014 netdev_err(tp->dev, "Transition to D0 failed\n"); 4015 } 4016 4017 return err; 4018 } 4019 4020 static int tg3_setup_phy(struct tg3 *, bool); 4021 4022 static int tg3_power_down_prepare(struct tg3 *tp) 4023 { 4024 u32 misc_host_ctrl; 4025 bool device_should_wake, do_low_power; 4026 4027 tg3_enable_register_access(tp); 4028 4029 /* Restore the CLKREQ setting. */ 4030 if (tg3_flag(tp, CLKREQ_BUG)) 4031 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 4032 PCI_EXP_LNKCTL_CLKREQ_EN); 4033 4034 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 4035 tw32(TG3PCI_MISC_HOST_CTRL, 4036 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 4037 4038 device_should_wake = device_may_wakeup(&tp->pdev->dev) && 4039 tg3_flag(tp, WOL_ENABLE); 4040 4041 if (tg3_flag(tp, USE_PHYLIB)) { 4042 do_low_power = false; 4043 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && 4044 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4045 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, }; 4046 struct phy_device *phydev; 4047 u32 phyid; 4048 4049 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 4050 4051 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4052 4053 tp->link_config.speed = phydev->speed; 4054 tp->link_config.duplex = phydev->duplex; 4055 tp->link_config.autoneg = phydev->autoneg; 4056 ethtool_convert_link_mode_to_legacy_u32( 4057 &tp->link_config.advertising, 4058 phydev->advertising); 4059 4060 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising); 4061 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 4062 advertising); 4063 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 4064 advertising); 4065 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, 4066 advertising); 4067 4068 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { 4069 if (tg3_flag(tp, WOL_SPEED_100MB)) { 4070 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, 4071 advertising); 4072 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, 4073 advertising); 4074 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, 4075 advertising); 4076 } else { 4077 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, 4078 advertising); 4079 } 4080 } 4081 4082 linkmode_copy(phydev->advertising, advertising); 4083 phy_start_aneg(phydev); 4084 4085 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; 4086 if (phyid != PHY_ID_BCMAC131) { 4087 phyid &= PHY_BCM_OUI_MASK; 4088 if (phyid == PHY_BCM_OUI_1 || 4089 phyid == PHY_BCM_OUI_2 || 4090 phyid == PHY_BCM_OUI_3) 4091 do_low_power = true; 4092 } 4093 } 4094 } else { 4095 do_low_power = true; 4096 4097 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) 4098 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4099 4100 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 4101 tg3_setup_phy(tp, false); 4102 } 4103 4104 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 4105 u32 val; 4106 4107 val = tr32(GRC_VCPU_EXT_CTRL); 4108 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); 4109 } else if (!tg3_flag(tp, ENABLE_ASF)) { 4110 int i; 4111 u32 val; 4112 4113 for (i = 0; i < 200; i++) { 4114 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); 4115 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 4116 break; 4117 msleep(1); 4118 } 4119 } 4120 if (tg3_flag(tp, WOL_CAP)) 4121 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | 4122 WOL_DRV_STATE_SHUTDOWN | 4123 WOL_DRV_WOL | 4124 WOL_SET_MAGIC_PKT); 4125 4126 if (device_should_wake) { 4127 u32 mac_mode; 4128 4129 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 4130 if (do_low_power && 4131 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 4132 tg3_phy_auxctl_write(tp, 4133 MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 4134 MII_TG3_AUXCTL_PCTL_WOL_EN | 4135 MII_TG3_AUXCTL_PCTL_100TX_LPWR | 4136 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); 4137 udelay(40); 4138 } 4139 4140 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4141 mac_mode = MAC_MODE_PORT_MODE_GMII; 4142 else if (tp->phy_flags & 4143 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) { 4144 if (tp->link_config.active_speed == SPEED_1000) 4145 mac_mode = MAC_MODE_PORT_MODE_GMII; 4146 else 4147 mac_mode = MAC_MODE_PORT_MODE_MII; 4148 } else 4149 mac_mode = MAC_MODE_PORT_MODE_MII; 4150 4151 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; 4152 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 4153 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? 4154 SPEED_100 : SPEED_10; 4155 if (tg3_5700_link_polarity(tp, speed)) 4156 mac_mode |= MAC_MODE_LINK_POLARITY; 4157 else 4158 mac_mode &= ~MAC_MODE_LINK_POLARITY; 4159 } 4160 } else { 4161 mac_mode = MAC_MODE_PORT_MODE_TBI; 4162 } 4163 4164 if (!tg3_flag(tp, 5750_PLUS)) 4165 tw32(MAC_LED_CTRL, tp->led_ctrl); 4166 4167 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; 4168 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && 4169 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) 4170 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; 4171 4172 if (tg3_flag(tp, ENABLE_APE)) 4173 mac_mode |= MAC_MODE_APE_TX_EN | 4174 MAC_MODE_APE_RX_EN | 4175 MAC_MODE_TDE_ENABLE; 4176 4177 tw32_f(MAC_MODE, mac_mode); 4178 udelay(100); 4179 4180 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); 4181 udelay(10); 4182 } 4183 4184 if (!tg3_flag(tp, WOL_SPEED_100MB) && 4185 (tg3_asic_rev(tp) == ASIC_REV_5700 || 4186 tg3_asic_rev(tp) == ASIC_REV_5701)) { 4187 u32 base_val; 4188 4189 base_val = tp->pci_clock_ctrl; 4190 base_val |= (CLOCK_CTRL_RXCLK_DISABLE | 4191 CLOCK_CTRL_TXCLK_DISABLE); 4192 4193 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | 4194 CLOCK_CTRL_PWRDOWN_PLL133, 40); 4195 } else if (tg3_flag(tp, 5780_CLASS) || 4196 tg3_flag(tp, CPMU_PRESENT) || 4197 tg3_asic_rev(tp) == ASIC_REV_5906) { 4198 /* do nothing */ 4199 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { 4200 u32 newbits1, newbits2; 4201 4202 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4203 tg3_asic_rev(tp) == ASIC_REV_5701) { 4204 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | 4205 CLOCK_CTRL_TXCLK_DISABLE | 4206 CLOCK_CTRL_ALTCLK); 4207 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4208 } else if (tg3_flag(tp, 5705_PLUS)) { 4209 newbits1 = CLOCK_CTRL_625_CORE; 4210 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; 4211 } else { 4212 newbits1 = CLOCK_CTRL_ALTCLK; 4213 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4214 } 4215 4216 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, 4217 40); 4218 4219 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, 4220 40); 4221 4222 if (!tg3_flag(tp, 5705_PLUS)) { 4223 u32 newbits3; 4224 4225 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4226 tg3_asic_rev(tp) == ASIC_REV_5701) { 4227 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | 4228 CLOCK_CTRL_TXCLK_DISABLE | 4229 CLOCK_CTRL_44MHZ_CORE); 4230 } else { 4231 newbits3 = CLOCK_CTRL_44MHZ_CORE; 4232 } 4233 4234 tw32_wait_f(TG3PCI_CLOCK_CTRL, 4235 tp->pci_clock_ctrl | newbits3, 40); 4236 } 4237 } 4238 4239 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) 4240 tg3_power_down_phy(tp, do_low_power); 4241 4242 tg3_frob_aux_power(tp, true); 4243 4244 /* Workaround for unstable PLL clock */ 4245 if ((!tg3_flag(tp, IS_SSB_CORE)) && 4246 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) || 4247 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) { 4248 u32 val = tr32(0x7d00); 4249 4250 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); 4251 tw32(0x7d00, val); 4252 if (!tg3_flag(tp, ENABLE_ASF)) { 4253 int err; 4254 4255 err = tg3_nvram_lock(tp); 4256 tg3_halt_cpu(tp, RX_CPU_BASE); 4257 if (!err) 4258 tg3_nvram_unlock(tp); 4259 } 4260 } 4261 4262 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 4263 4264 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); 4265 4266 return 0; 4267 } 4268 4269 static void tg3_power_down(struct tg3 *tp) 4270 { 4271 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); 4272 pci_set_power_state(tp->pdev, PCI_D3hot); 4273 } 4274 4275 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex) 4276 { 4277 switch (val & MII_TG3_AUX_STAT_SPDMASK) { 4278 case MII_TG3_AUX_STAT_10HALF: 4279 *speed = SPEED_10; 4280 *duplex = DUPLEX_HALF; 4281 break; 4282 4283 case MII_TG3_AUX_STAT_10FULL: 4284 *speed = SPEED_10; 4285 *duplex = DUPLEX_FULL; 4286 break; 4287 4288 case MII_TG3_AUX_STAT_100HALF: 4289 *speed = SPEED_100; 4290 *duplex = DUPLEX_HALF; 4291 break; 4292 4293 case MII_TG3_AUX_STAT_100FULL: 4294 *speed = SPEED_100; 4295 *duplex = DUPLEX_FULL; 4296 break; 4297 4298 case MII_TG3_AUX_STAT_1000HALF: 4299 *speed = SPEED_1000; 4300 *duplex = DUPLEX_HALF; 4301 break; 4302 4303 case MII_TG3_AUX_STAT_1000FULL: 4304 *speed = SPEED_1000; 4305 *duplex = DUPLEX_FULL; 4306 break; 4307 4308 default: 4309 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4310 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : 4311 SPEED_10; 4312 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : 4313 DUPLEX_HALF; 4314 break; 4315 } 4316 *speed = SPEED_UNKNOWN; 4317 *duplex = DUPLEX_UNKNOWN; 4318 break; 4319 } 4320 } 4321 4322 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) 4323 { 4324 int err = 0; 4325 u32 val, new_adv; 4326 4327 new_adv = ADVERTISE_CSMA; 4328 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; 4329 new_adv |= mii_advertise_flowctrl(flowctrl); 4330 4331 err = tg3_writephy(tp, MII_ADVERTISE, new_adv); 4332 if (err) 4333 goto done; 4334 4335 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4336 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); 4337 4338 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4339 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) 4340 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4341 4342 err = tg3_writephy(tp, MII_CTRL1000, new_adv); 4343 if (err) 4344 goto done; 4345 } 4346 4347 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4348 goto done; 4349 4350 tw32(TG3_CPMU_EEE_MODE, 4351 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); 4352 4353 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 4354 if (!err) { 4355 u32 err2; 4356 4357 val = 0; 4358 /* Advertise 100-BaseTX EEE ability */ 4359 if (advertise & ADVERTISED_100baseT_Full) 4360 val |= MDIO_AN_EEE_ADV_100TX; 4361 /* Advertise 1000-BaseT EEE ability */ 4362 if (advertise & ADVERTISED_1000baseT_Full) 4363 val |= MDIO_AN_EEE_ADV_1000T; 4364 4365 if (!tp->eee.eee_enabled) { 4366 val = 0; 4367 tp->eee.advertised = 0; 4368 } else { 4369 tp->eee.advertised = advertise & 4370 (ADVERTISED_100baseT_Full | 4371 ADVERTISED_1000baseT_Full); 4372 } 4373 4374 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); 4375 if (err) 4376 val = 0; 4377 4378 switch (tg3_asic_rev(tp)) { 4379 case ASIC_REV_5717: 4380 case ASIC_REV_57765: 4381 case ASIC_REV_57766: 4382 case ASIC_REV_5719: 4383 /* If we advertised any eee advertisements above... */ 4384 if (val) 4385 val = MII_TG3_DSP_TAP26_ALNOKO | 4386 MII_TG3_DSP_TAP26_RMRXSTO | 4387 MII_TG3_DSP_TAP26_OPCSINPT; 4388 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 4389 fallthrough; 4390 case ASIC_REV_5720: 4391 case ASIC_REV_5762: 4392 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) 4393 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | 4394 MII_TG3_DSP_CH34TP2_HIBW01); 4395 } 4396 4397 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false); 4398 if (!err) 4399 err = err2; 4400 } 4401 4402 done: 4403 return err; 4404 } 4405 4406 static void tg3_phy_copper_begin(struct tg3 *tp) 4407 { 4408 if (tp->link_config.autoneg == AUTONEG_ENABLE || 4409 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4410 u32 adv, fc; 4411 4412 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4413 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4414 adv = ADVERTISED_10baseT_Half | 4415 ADVERTISED_10baseT_Full; 4416 if (tg3_flag(tp, WOL_SPEED_100MB)) 4417 adv |= ADVERTISED_100baseT_Half | 4418 ADVERTISED_100baseT_Full; 4419 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) { 4420 if (!(tp->phy_flags & 4421 TG3_PHYFLG_DISABLE_1G_HD_ADV)) 4422 adv |= ADVERTISED_1000baseT_Half; 4423 adv |= ADVERTISED_1000baseT_Full; 4424 } 4425 4426 fc = FLOW_CTRL_TX | FLOW_CTRL_RX; 4427 } else { 4428 adv = tp->link_config.advertising; 4429 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 4430 adv &= ~(ADVERTISED_1000baseT_Half | 4431 ADVERTISED_1000baseT_Full); 4432 4433 fc = tp->link_config.flowctrl; 4434 } 4435 4436 tg3_phy_autoneg_cfg(tp, adv, fc); 4437 4438 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4439 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4440 /* Normally during power down we want to autonegotiate 4441 * the lowest possible speed for WOL. However, to avoid 4442 * link flap, we leave it untouched. 4443 */ 4444 return; 4445 } 4446 4447 tg3_writephy(tp, MII_BMCR, 4448 BMCR_ANENABLE | BMCR_ANRESTART); 4449 } else { 4450 int i; 4451 u32 bmcr, orig_bmcr; 4452 4453 tp->link_config.active_speed = tp->link_config.speed; 4454 tp->link_config.active_duplex = tp->link_config.duplex; 4455 4456 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 4457 /* With autoneg disabled, 5715 only links up when the 4458 * advertisement register has the configured speed 4459 * enabled. 4460 */ 4461 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); 4462 } 4463 4464 bmcr = 0; 4465 switch (tp->link_config.speed) { 4466 default: 4467 case SPEED_10: 4468 break; 4469 4470 case SPEED_100: 4471 bmcr |= BMCR_SPEED100; 4472 break; 4473 4474 case SPEED_1000: 4475 bmcr |= BMCR_SPEED1000; 4476 break; 4477 } 4478 4479 if (tp->link_config.duplex == DUPLEX_FULL) 4480 bmcr |= BMCR_FULLDPLX; 4481 4482 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && 4483 (bmcr != orig_bmcr)) { 4484 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); 4485 for (i = 0; i < 1500; i++) { 4486 u32 tmp; 4487 4488 udelay(10); 4489 if (tg3_readphy(tp, MII_BMSR, &tmp) || 4490 tg3_readphy(tp, MII_BMSR, &tmp)) 4491 continue; 4492 if (!(tmp & BMSR_LSTATUS)) { 4493 udelay(40); 4494 break; 4495 } 4496 } 4497 tg3_writephy(tp, MII_BMCR, bmcr); 4498 udelay(40); 4499 } 4500 } 4501 } 4502 4503 static int tg3_phy_pull_config(struct tg3 *tp) 4504 { 4505 int err; 4506 u32 val; 4507 4508 err = tg3_readphy(tp, MII_BMCR, &val); 4509 if (err) 4510 goto done; 4511 4512 if (!(val & BMCR_ANENABLE)) { 4513 tp->link_config.autoneg = AUTONEG_DISABLE; 4514 tp->link_config.advertising = 0; 4515 tg3_flag_clear(tp, PAUSE_AUTONEG); 4516 4517 err = -EIO; 4518 4519 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) { 4520 case 0: 4521 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4522 goto done; 4523 4524 tp->link_config.speed = SPEED_10; 4525 break; 4526 case BMCR_SPEED100: 4527 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4528 goto done; 4529 4530 tp->link_config.speed = SPEED_100; 4531 break; 4532 case BMCR_SPEED1000: 4533 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4534 tp->link_config.speed = SPEED_1000; 4535 break; 4536 } 4537 fallthrough; 4538 default: 4539 goto done; 4540 } 4541 4542 if (val & BMCR_FULLDPLX) 4543 tp->link_config.duplex = DUPLEX_FULL; 4544 else 4545 tp->link_config.duplex = DUPLEX_HALF; 4546 4547 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; 4548 4549 err = 0; 4550 goto done; 4551 } 4552 4553 tp->link_config.autoneg = AUTONEG_ENABLE; 4554 tp->link_config.advertising = ADVERTISED_Autoneg; 4555 tg3_flag_set(tp, PAUSE_AUTONEG); 4556 4557 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4558 u32 adv; 4559 4560 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4561 if (err) 4562 goto done; 4563 4564 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL); 4565 tp->link_config.advertising |= adv | ADVERTISED_TP; 4566 4567 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val); 4568 } else { 4569 tp->link_config.advertising |= ADVERTISED_FIBRE; 4570 } 4571 4572 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4573 u32 adv; 4574 4575 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4576 err = tg3_readphy(tp, MII_CTRL1000, &val); 4577 if (err) 4578 goto done; 4579 4580 adv = mii_ctrl1000_to_ethtool_adv_t(val); 4581 } else { 4582 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4583 if (err) 4584 goto done; 4585 4586 adv = tg3_decode_flowctrl_1000X(val); 4587 tp->link_config.flowctrl = adv; 4588 4589 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL); 4590 adv = mii_adv_to_ethtool_adv_x(val); 4591 } 4592 4593 tp->link_config.advertising |= adv; 4594 } 4595 4596 done: 4597 return err; 4598 } 4599 4600 static int tg3_init_5401phy_dsp(struct tg3 *tp) 4601 { 4602 int err; 4603 4604 /* Turn off tap power management. */ 4605 /* Set Extended packet length bit */ 4606 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 4607 4608 err |= tg3_phydsp_write(tp, 0x0012, 0x1804); 4609 err |= tg3_phydsp_write(tp, 0x0013, 0x1204); 4610 err |= tg3_phydsp_write(tp, 0x8006, 0x0132); 4611 err |= tg3_phydsp_write(tp, 0x8006, 0x0232); 4612 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); 4613 4614 udelay(40); 4615 4616 return err; 4617 } 4618 4619 static bool tg3_phy_eee_config_ok(struct tg3 *tp) 4620 { 4621 struct ethtool_eee eee; 4622 4623 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4624 return true; 4625 4626 tg3_eee_pull_config(tp, &eee); 4627 4628 if (tp->eee.eee_enabled) { 4629 if (tp->eee.advertised != eee.advertised || 4630 tp->eee.tx_lpi_timer != eee.tx_lpi_timer || 4631 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) 4632 return false; 4633 } else { 4634 /* EEE is disabled but we're advertising */ 4635 if (eee.advertised) 4636 return false; 4637 } 4638 4639 return true; 4640 } 4641 4642 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) 4643 { 4644 u32 advmsk, tgtadv, advertising; 4645 4646 advertising = tp->link_config.advertising; 4647 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; 4648 4649 advmsk = ADVERTISE_ALL; 4650 if (tp->link_config.active_duplex == DUPLEX_FULL) { 4651 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); 4652 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4653 } 4654 4655 if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) 4656 return false; 4657 4658 if ((*lcladv & advmsk) != tgtadv) 4659 return false; 4660 4661 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4662 u32 tg3_ctrl; 4663 4664 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); 4665 4666 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) 4667 return false; 4668 4669 if (tgtadv && 4670 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4671 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) { 4672 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4673 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL | 4674 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 4675 } else { 4676 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); 4677 } 4678 4679 if (tg3_ctrl != tgtadv) 4680 return false; 4681 } 4682 4683 return true; 4684 } 4685 4686 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) 4687 { 4688 u32 lpeth = 0; 4689 4690 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4691 u32 val; 4692 4693 if (tg3_readphy(tp, MII_STAT1000, &val)) 4694 return false; 4695 4696 lpeth = mii_stat1000_to_ethtool_lpa_t(val); 4697 } 4698 4699 if (tg3_readphy(tp, MII_LPA, rmtadv)) 4700 return false; 4701 4702 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); 4703 tp->link_config.rmt_adv = lpeth; 4704 4705 return true; 4706 } 4707 4708 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up) 4709 { 4710 if (curr_link_up != tp->link_up) { 4711 if (curr_link_up) { 4712 netif_carrier_on(tp->dev); 4713 } else { 4714 netif_carrier_off(tp->dev); 4715 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4716 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 4717 } 4718 4719 tg3_link_report(tp); 4720 return true; 4721 } 4722 4723 return false; 4724 } 4725 4726 static void tg3_clear_mac_status(struct tg3 *tp) 4727 { 4728 tw32(MAC_EVENT, 0); 4729 4730 tw32_f(MAC_STATUS, 4731 MAC_STATUS_SYNC_CHANGED | 4732 MAC_STATUS_CFG_CHANGED | 4733 MAC_STATUS_MI_COMPLETION | 4734 MAC_STATUS_LNKSTATE_CHANGED); 4735 udelay(40); 4736 } 4737 4738 static void tg3_setup_eee(struct tg3 *tp) 4739 { 4740 u32 val; 4741 4742 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | 4743 TG3_CPMU_EEE_LNKIDL_UART_IDL; 4744 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 4745 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; 4746 4747 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); 4748 4749 tw32_f(TG3_CPMU_EEE_CTRL, 4750 TG3_CPMU_EEE_CTRL_EXIT_20_1_US); 4751 4752 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | 4753 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) | 4754 TG3_CPMU_EEEMD_LPI_IN_RX | 4755 TG3_CPMU_EEEMD_EEE_ENABLE; 4756 4757 if (tg3_asic_rev(tp) != ASIC_REV_5717) 4758 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; 4759 4760 if (tg3_flag(tp, ENABLE_APE)) 4761 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; 4762 4763 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0); 4764 4765 tw32_f(TG3_CPMU_EEE_DBTMR1, 4766 TG3_CPMU_DBTMR1_PCIEXIT_2047US | 4767 (tp->eee.tx_lpi_timer & 0xffff)); 4768 4769 tw32_f(TG3_CPMU_EEE_DBTMR2, 4770 TG3_CPMU_DBTMR2_APE_TX_2047US | 4771 TG3_CPMU_DBTMR2_TXIDXEQ_2047US); 4772 } 4773 4774 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) 4775 { 4776 bool current_link_up; 4777 u32 bmsr, val; 4778 u32 lcl_adv, rmt_adv; 4779 u32 current_speed; 4780 u8 current_duplex; 4781 int i, err; 4782 4783 tg3_clear_mac_status(tp); 4784 4785 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 4786 tw32_f(MAC_MI_MODE, 4787 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 4788 udelay(80); 4789 } 4790 4791 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); 4792 4793 /* Some third-party PHYs need to be reset on link going 4794 * down. 4795 */ 4796 if ((tg3_asic_rev(tp) == ASIC_REV_5703 || 4797 tg3_asic_rev(tp) == ASIC_REV_5704 || 4798 tg3_asic_rev(tp) == ASIC_REV_5705) && 4799 tp->link_up) { 4800 tg3_readphy(tp, MII_BMSR, &bmsr); 4801 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4802 !(bmsr & BMSR_LSTATUS)) 4803 force_reset = true; 4804 } 4805 if (force_reset) 4806 tg3_phy_reset(tp); 4807 4808 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 4809 tg3_readphy(tp, MII_BMSR, &bmsr); 4810 if (tg3_readphy(tp, MII_BMSR, &bmsr) || 4811 !tg3_flag(tp, INIT_COMPLETE)) 4812 bmsr = 0; 4813 4814 if (!(bmsr & BMSR_LSTATUS)) { 4815 err = tg3_init_5401phy_dsp(tp); 4816 if (err) 4817 return err; 4818 4819 tg3_readphy(tp, MII_BMSR, &bmsr); 4820 for (i = 0; i < 1000; i++) { 4821 udelay(10); 4822 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4823 (bmsr & BMSR_LSTATUS)) { 4824 udelay(40); 4825 break; 4826 } 4827 } 4828 4829 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == 4830 TG3_PHY_REV_BCM5401_B0 && 4831 !(bmsr & BMSR_LSTATUS) && 4832 tp->link_config.active_speed == SPEED_1000) { 4833 err = tg3_phy_reset(tp); 4834 if (!err) 4835 err = tg3_init_5401phy_dsp(tp); 4836 if (err) 4837 return err; 4838 } 4839 } 4840 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4841 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) { 4842 /* 5701 {A0,B0} CRC bug workaround */ 4843 tg3_writephy(tp, 0x15, 0x0a75); 4844 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4845 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 4846 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4847 } 4848 4849 /* Clear pending interrupts... */ 4850 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4851 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4852 4853 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) 4854 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); 4855 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) 4856 tg3_writephy(tp, MII_TG3_IMASK, ~0); 4857 4858 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4859 tg3_asic_rev(tp) == ASIC_REV_5701) { 4860 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) 4861 tg3_writephy(tp, MII_TG3_EXT_CTRL, 4862 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 4863 else 4864 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); 4865 } 4866 4867 current_link_up = false; 4868 current_speed = SPEED_UNKNOWN; 4869 current_duplex = DUPLEX_UNKNOWN; 4870 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; 4871 tp->link_config.rmt_adv = 0; 4872 4873 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { 4874 err = tg3_phy_auxctl_read(tp, 4875 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4876 &val); 4877 if (!err && !(val & (1 << 10))) { 4878 tg3_phy_auxctl_write(tp, 4879 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4880 val | (1 << 10)); 4881 goto relink; 4882 } 4883 } 4884 4885 bmsr = 0; 4886 for (i = 0; i < 100; i++) { 4887 tg3_readphy(tp, MII_BMSR, &bmsr); 4888 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4889 (bmsr & BMSR_LSTATUS)) 4890 break; 4891 udelay(40); 4892 } 4893 4894 if (bmsr & BMSR_LSTATUS) { 4895 u32 aux_stat, bmcr; 4896 4897 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); 4898 for (i = 0; i < 2000; i++) { 4899 udelay(10); 4900 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && 4901 aux_stat) 4902 break; 4903 } 4904 4905 tg3_aux_stat_to_speed_duplex(tp, aux_stat, 4906 ¤t_speed, 4907 ¤t_duplex); 4908 4909 bmcr = 0; 4910 for (i = 0; i < 200; i++) { 4911 tg3_readphy(tp, MII_BMCR, &bmcr); 4912 if (tg3_readphy(tp, MII_BMCR, &bmcr)) 4913 continue; 4914 if (bmcr && bmcr != 0x7fff) 4915 break; 4916 udelay(10); 4917 } 4918 4919 lcl_adv = 0; 4920 rmt_adv = 0; 4921 4922 tp->link_config.active_speed = current_speed; 4923 tp->link_config.active_duplex = current_duplex; 4924 4925 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 4926 bool eee_config_ok = tg3_phy_eee_config_ok(tp); 4927 4928 if ((bmcr & BMCR_ANENABLE) && 4929 eee_config_ok && 4930 tg3_phy_copper_an_config_ok(tp, &lcl_adv) && 4931 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) 4932 current_link_up = true; 4933 4934 /* EEE settings changes take effect only after a phy 4935 * reset. If we have skipped a reset due to Link Flap 4936 * Avoidance being enabled, do it now. 4937 */ 4938 if (!eee_config_ok && 4939 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 4940 !force_reset) { 4941 tg3_setup_eee(tp); 4942 tg3_phy_reset(tp); 4943 } 4944 } else { 4945 if (!(bmcr & BMCR_ANENABLE) && 4946 tp->link_config.speed == current_speed && 4947 tp->link_config.duplex == current_duplex) { 4948 current_link_up = true; 4949 } 4950 } 4951 4952 if (current_link_up && 4953 tp->link_config.active_duplex == DUPLEX_FULL) { 4954 u32 reg, bit; 4955 4956 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4957 reg = MII_TG3_FET_GEN_STAT; 4958 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; 4959 } else { 4960 reg = MII_TG3_EXT_STAT; 4961 bit = MII_TG3_EXT_STAT_MDIX; 4962 } 4963 4964 if (!tg3_readphy(tp, reg, &val) && (val & bit)) 4965 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; 4966 4967 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 4968 } 4969 } 4970 4971 relink: 4972 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4973 tg3_phy_copper_begin(tp); 4974 4975 if (tg3_flag(tp, ROBOSWITCH)) { 4976 current_link_up = true; 4977 /* FIXME: when BCM5325 switch is used use 100 MBit/s */ 4978 current_speed = SPEED_1000; 4979 current_duplex = DUPLEX_FULL; 4980 tp->link_config.active_speed = current_speed; 4981 tp->link_config.active_duplex = current_duplex; 4982 } 4983 4984 tg3_readphy(tp, MII_BMSR, &bmsr); 4985 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || 4986 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 4987 current_link_up = true; 4988 } 4989 4990 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 4991 if (current_link_up) { 4992 if (tp->link_config.active_speed == SPEED_100 || 4993 tp->link_config.active_speed == SPEED_10) 4994 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 4995 else 4996 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 4997 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) 4998 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 4999 else 5000 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5001 5002 /* In order for the 5750 core in BCM4785 chip to work properly 5003 * in RGMII mode, the Led Control Register must be set up. 5004 */ 5005 if (tg3_flag(tp, RGMII_MODE)) { 5006 u32 led_ctrl = tr32(MAC_LED_CTRL); 5007 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON); 5008 5009 if (tp->link_config.active_speed == SPEED_10) 5010 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE; 5011 else if (tp->link_config.active_speed == SPEED_100) 5012 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5013 LED_CTRL_100MBPS_ON); 5014 else if (tp->link_config.active_speed == SPEED_1000) 5015 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5016 LED_CTRL_1000MBPS_ON); 5017 5018 tw32(MAC_LED_CTRL, led_ctrl); 5019 udelay(40); 5020 } 5021 5022 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5023 if (tp->link_config.active_duplex == DUPLEX_HALF) 5024 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5025 5026 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 5027 if (current_link_up && 5028 tg3_5700_link_polarity(tp, tp->link_config.active_speed)) 5029 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 5030 else 5031 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 5032 } 5033 5034 /* ??? Without this setting Netgear GA302T PHY does not 5035 * ??? send/receive packets... 5036 */ 5037 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && 5038 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) { 5039 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; 5040 tw32_f(MAC_MI_MODE, tp->mi_mode); 5041 udelay(80); 5042 } 5043 5044 tw32_f(MAC_MODE, tp->mac_mode); 5045 udelay(40); 5046 5047 tg3_phy_eee_adjust(tp, current_link_up); 5048 5049 if (tg3_flag(tp, USE_LINKCHG_REG)) { 5050 /* Polled via timer. */ 5051 tw32_f(MAC_EVENT, 0); 5052 } else { 5053 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5054 } 5055 udelay(40); 5056 5057 if (tg3_asic_rev(tp) == ASIC_REV_5700 && 5058 current_link_up && 5059 tp->link_config.active_speed == SPEED_1000 && 5060 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { 5061 udelay(120); 5062 tw32_f(MAC_STATUS, 5063 (MAC_STATUS_SYNC_CHANGED | 5064 MAC_STATUS_CFG_CHANGED)); 5065 udelay(40); 5066 tg3_write_mem(tp, 5067 NIC_SRAM_FIRMWARE_MBOX, 5068 NIC_SRAM_FIRMWARE_MBOX_MAGIC2); 5069 } 5070 5071 /* Prevent send BD corruption. */ 5072 if (tg3_flag(tp, CLKREQ_BUG)) { 5073 if (tp->link_config.active_speed == SPEED_100 || 5074 tp->link_config.active_speed == SPEED_10) 5075 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL, 5076 PCI_EXP_LNKCTL_CLKREQ_EN); 5077 else 5078 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 5079 PCI_EXP_LNKCTL_CLKREQ_EN); 5080 } 5081 5082 tg3_test_and_report_link_chg(tp, current_link_up); 5083 5084 return 0; 5085 } 5086 5087 struct tg3_fiber_aneginfo { 5088 int state; 5089 #define ANEG_STATE_UNKNOWN 0 5090 #define ANEG_STATE_AN_ENABLE 1 5091 #define ANEG_STATE_RESTART_INIT 2 5092 #define ANEG_STATE_RESTART 3 5093 #define ANEG_STATE_DISABLE_LINK_OK 4 5094 #define ANEG_STATE_ABILITY_DETECT_INIT 5 5095 #define ANEG_STATE_ABILITY_DETECT 6 5096 #define ANEG_STATE_ACK_DETECT_INIT 7 5097 #define ANEG_STATE_ACK_DETECT 8 5098 #define ANEG_STATE_COMPLETE_ACK_INIT 9 5099 #define ANEG_STATE_COMPLETE_ACK 10 5100 #define ANEG_STATE_IDLE_DETECT_INIT 11 5101 #define ANEG_STATE_IDLE_DETECT 12 5102 #define ANEG_STATE_LINK_OK 13 5103 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 5104 #define ANEG_STATE_NEXT_PAGE_WAIT 15 5105 5106 u32 flags; 5107 #define MR_AN_ENABLE 0x00000001 5108 #define MR_RESTART_AN 0x00000002 5109 #define MR_AN_COMPLETE 0x00000004 5110 #define MR_PAGE_RX 0x00000008 5111 #define MR_NP_LOADED 0x00000010 5112 #define MR_TOGGLE_TX 0x00000020 5113 #define MR_LP_ADV_FULL_DUPLEX 0x00000040 5114 #define MR_LP_ADV_HALF_DUPLEX 0x00000080 5115 #define MR_LP_ADV_SYM_PAUSE 0x00000100 5116 #define MR_LP_ADV_ASYM_PAUSE 0x00000200 5117 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400 5118 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800 5119 #define MR_LP_ADV_NEXT_PAGE 0x00001000 5120 #define MR_TOGGLE_RX 0x00002000 5121 #define MR_NP_RX 0x00004000 5122 5123 #define MR_LINK_OK 0x80000000 5124 5125 unsigned long link_time, cur_time; 5126 5127 u32 ability_match_cfg; 5128 int ability_match_count; 5129 5130 char ability_match, idle_match, ack_match; 5131 5132 u32 txconfig, rxconfig; 5133 #define ANEG_CFG_NP 0x00000080 5134 #define ANEG_CFG_ACK 0x00000040 5135 #define ANEG_CFG_RF2 0x00000020 5136 #define ANEG_CFG_RF1 0x00000010 5137 #define ANEG_CFG_PS2 0x00000001 5138 #define ANEG_CFG_PS1 0x00008000 5139 #define ANEG_CFG_HD 0x00004000 5140 #define ANEG_CFG_FD 0x00002000 5141 #define ANEG_CFG_INVAL 0x00001f06 5142 5143 }; 5144 #define ANEG_OK 0 5145 #define ANEG_DONE 1 5146 #define ANEG_TIMER_ENAB 2 5147 #define ANEG_FAILED -1 5148 5149 #define ANEG_STATE_SETTLE_TIME 10000 5150 5151 static int tg3_fiber_aneg_smachine(struct tg3 *tp, 5152 struct tg3_fiber_aneginfo *ap) 5153 { 5154 u16 flowctrl; 5155 unsigned long delta; 5156 u32 rx_cfg_reg; 5157 int ret; 5158 5159 if (ap->state == ANEG_STATE_UNKNOWN) { 5160 ap->rxconfig = 0; 5161 ap->link_time = 0; 5162 ap->cur_time = 0; 5163 ap->ability_match_cfg = 0; 5164 ap->ability_match_count = 0; 5165 ap->ability_match = 0; 5166 ap->idle_match = 0; 5167 ap->ack_match = 0; 5168 } 5169 ap->cur_time++; 5170 5171 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { 5172 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); 5173 5174 if (rx_cfg_reg != ap->ability_match_cfg) { 5175 ap->ability_match_cfg = rx_cfg_reg; 5176 ap->ability_match = 0; 5177 ap->ability_match_count = 0; 5178 } else { 5179 if (++ap->ability_match_count > 1) { 5180 ap->ability_match = 1; 5181 ap->ability_match_cfg = rx_cfg_reg; 5182 } 5183 } 5184 if (rx_cfg_reg & ANEG_CFG_ACK) 5185 ap->ack_match = 1; 5186 else 5187 ap->ack_match = 0; 5188 5189 ap->idle_match = 0; 5190 } else { 5191 ap->idle_match = 1; 5192 ap->ability_match_cfg = 0; 5193 ap->ability_match_count = 0; 5194 ap->ability_match = 0; 5195 ap->ack_match = 0; 5196 5197 rx_cfg_reg = 0; 5198 } 5199 5200 ap->rxconfig = rx_cfg_reg; 5201 ret = ANEG_OK; 5202 5203 switch (ap->state) { 5204 case ANEG_STATE_UNKNOWN: 5205 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) 5206 ap->state = ANEG_STATE_AN_ENABLE; 5207 5208 fallthrough; 5209 case ANEG_STATE_AN_ENABLE: 5210 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); 5211 if (ap->flags & MR_AN_ENABLE) { 5212 ap->link_time = 0; 5213 ap->cur_time = 0; 5214 ap->ability_match_cfg = 0; 5215 ap->ability_match_count = 0; 5216 ap->ability_match = 0; 5217 ap->idle_match = 0; 5218 ap->ack_match = 0; 5219 5220 ap->state = ANEG_STATE_RESTART_INIT; 5221 } else { 5222 ap->state = ANEG_STATE_DISABLE_LINK_OK; 5223 } 5224 break; 5225 5226 case ANEG_STATE_RESTART_INIT: 5227 ap->link_time = ap->cur_time; 5228 ap->flags &= ~(MR_NP_LOADED); 5229 ap->txconfig = 0; 5230 tw32(MAC_TX_AUTO_NEG, 0); 5231 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5232 tw32_f(MAC_MODE, tp->mac_mode); 5233 udelay(40); 5234 5235 ret = ANEG_TIMER_ENAB; 5236 ap->state = ANEG_STATE_RESTART; 5237 5238 fallthrough; 5239 case ANEG_STATE_RESTART: 5240 delta = ap->cur_time - ap->link_time; 5241 if (delta > ANEG_STATE_SETTLE_TIME) 5242 ap->state = ANEG_STATE_ABILITY_DETECT_INIT; 5243 else 5244 ret = ANEG_TIMER_ENAB; 5245 break; 5246 5247 case ANEG_STATE_DISABLE_LINK_OK: 5248 ret = ANEG_DONE; 5249 break; 5250 5251 case ANEG_STATE_ABILITY_DETECT_INIT: 5252 ap->flags &= ~(MR_TOGGLE_TX); 5253 ap->txconfig = ANEG_CFG_FD; 5254 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5255 if (flowctrl & ADVERTISE_1000XPAUSE) 5256 ap->txconfig |= ANEG_CFG_PS1; 5257 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5258 ap->txconfig |= ANEG_CFG_PS2; 5259 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5260 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5261 tw32_f(MAC_MODE, tp->mac_mode); 5262 udelay(40); 5263 5264 ap->state = ANEG_STATE_ABILITY_DETECT; 5265 break; 5266 5267 case ANEG_STATE_ABILITY_DETECT: 5268 if (ap->ability_match != 0 && ap->rxconfig != 0) 5269 ap->state = ANEG_STATE_ACK_DETECT_INIT; 5270 break; 5271 5272 case ANEG_STATE_ACK_DETECT_INIT: 5273 ap->txconfig |= ANEG_CFG_ACK; 5274 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5275 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5276 tw32_f(MAC_MODE, tp->mac_mode); 5277 udelay(40); 5278 5279 ap->state = ANEG_STATE_ACK_DETECT; 5280 5281 fallthrough; 5282 case ANEG_STATE_ACK_DETECT: 5283 if (ap->ack_match != 0) { 5284 if ((ap->rxconfig & ~ANEG_CFG_ACK) == 5285 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { 5286 ap->state = ANEG_STATE_COMPLETE_ACK_INIT; 5287 } else { 5288 ap->state = ANEG_STATE_AN_ENABLE; 5289 } 5290 } else if (ap->ability_match != 0 && 5291 ap->rxconfig == 0) { 5292 ap->state = ANEG_STATE_AN_ENABLE; 5293 } 5294 break; 5295 5296 case ANEG_STATE_COMPLETE_ACK_INIT: 5297 if (ap->rxconfig & ANEG_CFG_INVAL) { 5298 ret = ANEG_FAILED; 5299 break; 5300 } 5301 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | 5302 MR_LP_ADV_HALF_DUPLEX | 5303 MR_LP_ADV_SYM_PAUSE | 5304 MR_LP_ADV_ASYM_PAUSE | 5305 MR_LP_ADV_REMOTE_FAULT1 | 5306 MR_LP_ADV_REMOTE_FAULT2 | 5307 MR_LP_ADV_NEXT_PAGE | 5308 MR_TOGGLE_RX | 5309 MR_NP_RX); 5310 if (ap->rxconfig & ANEG_CFG_FD) 5311 ap->flags |= MR_LP_ADV_FULL_DUPLEX; 5312 if (ap->rxconfig & ANEG_CFG_HD) 5313 ap->flags |= MR_LP_ADV_HALF_DUPLEX; 5314 if (ap->rxconfig & ANEG_CFG_PS1) 5315 ap->flags |= MR_LP_ADV_SYM_PAUSE; 5316 if (ap->rxconfig & ANEG_CFG_PS2) 5317 ap->flags |= MR_LP_ADV_ASYM_PAUSE; 5318 if (ap->rxconfig & ANEG_CFG_RF1) 5319 ap->flags |= MR_LP_ADV_REMOTE_FAULT1; 5320 if (ap->rxconfig & ANEG_CFG_RF2) 5321 ap->flags |= MR_LP_ADV_REMOTE_FAULT2; 5322 if (ap->rxconfig & ANEG_CFG_NP) 5323 ap->flags |= MR_LP_ADV_NEXT_PAGE; 5324 5325 ap->link_time = ap->cur_time; 5326 5327 ap->flags ^= (MR_TOGGLE_TX); 5328 if (ap->rxconfig & 0x0008) 5329 ap->flags |= MR_TOGGLE_RX; 5330 if (ap->rxconfig & ANEG_CFG_NP) 5331 ap->flags |= MR_NP_RX; 5332 ap->flags |= MR_PAGE_RX; 5333 5334 ap->state = ANEG_STATE_COMPLETE_ACK; 5335 ret = ANEG_TIMER_ENAB; 5336 break; 5337 5338 case ANEG_STATE_COMPLETE_ACK: 5339 if (ap->ability_match != 0 && 5340 ap->rxconfig == 0) { 5341 ap->state = ANEG_STATE_AN_ENABLE; 5342 break; 5343 } 5344 delta = ap->cur_time - ap->link_time; 5345 if (delta > ANEG_STATE_SETTLE_TIME) { 5346 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { 5347 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5348 } else { 5349 if ((ap->txconfig & ANEG_CFG_NP) == 0 && 5350 !(ap->flags & MR_NP_RX)) { 5351 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5352 } else { 5353 ret = ANEG_FAILED; 5354 } 5355 } 5356 } 5357 break; 5358 5359 case ANEG_STATE_IDLE_DETECT_INIT: 5360 ap->link_time = ap->cur_time; 5361 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5362 tw32_f(MAC_MODE, tp->mac_mode); 5363 udelay(40); 5364 5365 ap->state = ANEG_STATE_IDLE_DETECT; 5366 ret = ANEG_TIMER_ENAB; 5367 break; 5368 5369 case ANEG_STATE_IDLE_DETECT: 5370 if (ap->ability_match != 0 && 5371 ap->rxconfig == 0) { 5372 ap->state = ANEG_STATE_AN_ENABLE; 5373 break; 5374 } 5375 delta = ap->cur_time - ap->link_time; 5376 if (delta > ANEG_STATE_SETTLE_TIME) { 5377 /* XXX another gem from the Broadcom driver :( */ 5378 ap->state = ANEG_STATE_LINK_OK; 5379 } 5380 break; 5381 5382 case ANEG_STATE_LINK_OK: 5383 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); 5384 ret = ANEG_DONE; 5385 break; 5386 5387 case ANEG_STATE_NEXT_PAGE_WAIT_INIT: 5388 /* ??? unimplemented */ 5389 break; 5390 5391 case ANEG_STATE_NEXT_PAGE_WAIT: 5392 /* ??? unimplemented */ 5393 break; 5394 5395 default: 5396 ret = ANEG_FAILED; 5397 break; 5398 } 5399 5400 return ret; 5401 } 5402 5403 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) 5404 { 5405 int res = 0; 5406 struct tg3_fiber_aneginfo aninfo; 5407 int status = ANEG_FAILED; 5408 unsigned int tick; 5409 u32 tmp; 5410 5411 tw32_f(MAC_TX_AUTO_NEG, 0); 5412 5413 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 5414 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); 5415 udelay(40); 5416 5417 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); 5418 udelay(40); 5419 5420 memset(&aninfo, 0, sizeof(aninfo)); 5421 aninfo.flags |= MR_AN_ENABLE; 5422 aninfo.state = ANEG_STATE_UNKNOWN; 5423 aninfo.cur_time = 0; 5424 tick = 0; 5425 while (++tick < 195000) { 5426 status = tg3_fiber_aneg_smachine(tp, &aninfo); 5427 if (status == ANEG_DONE || status == ANEG_FAILED) 5428 break; 5429 5430 udelay(1); 5431 } 5432 5433 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5434 tw32_f(MAC_MODE, tp->mac_mode); 5435 udelay(40); 5436 5437 *txflags = aninfo.txconfig; 5438 *rxflags = aninfo.flags; 5439 5440 if (status == ANEG_DONE && 5441 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | 5442 MR_LP_ADV_FULL_DUPLEX))) 5443 res = 1; 5444 5445 return res; 5446 } 5447 5448 static void tg3_init_bcm8002(struct tg3 *tp) 5449 { 5450 u32 mac_status = tr32(MAC_STATUS); 5451 int i; 5452 5453 /* Reset when initting first time or we have a link. */ 5454 if (tg3_flag(tp, INIT_COMPLETE) && 5455 !(mac_status & MAC_STATUS_PCS_SYNCED)) 5456 return; 5457 5458 /* Set PLL lock range. */ 5459 tg3_writephy(tp, 0x16, 0x8007); 5460 5461 /* SW reset */ 5462 tg3_writephy(tp, MII_BMCR, BMCR_RESET); 5463 5464 /* Wait for reset to complete. */ 5465 /* XXX schedule_timeout() ... */ 5466 for (i = 0; i < 500; i++) 5467 udelay(10); 5468 5469 /* Config mode; select PMA/Ch 1 regs. */ 5470 tg3_writephy(tp, 0x10, 0x8411); 5471 5472 /* Enable auto-lock and comdet, select txclk for tx. */ 5473 tg3_writephy(tp, 0x11, 0x0a10); 5474 5475 tg3_writephy(tp, 0x18, 0x00a0); 5476 tg3_writephy(tp, 0x16, 0x41ff); 5477 5478 /* Assert and deassert POR. */ 5479 tg3_writephy(tp, 0x13, 0x0400); 5480 udelay(40); 5481 tg3_writephy(tp, 0x13, 0x0000); 5482 5483 tg3_writephy(tp, 0x11, 0x0a50); 5484 udelay(40); 5485 tg3_writephy(tp, 0x11, 0x0a10); 5486 5487 /* Wait for signal to stabilize */ 5488 /* XXX schedule_timeout() ... */ 5489 for (i = 0; i < 15000; i++) 5490 udelay(10); 5491 5492 /* Deselect the channel register so we can read the PHYID 5493 * later. 5494 */ 5495 tg3_writephy(tp, 0x10, 0x8011); 5496 } 5497 5498 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) 5499 { 5500 u16 flowctrl; 5501 bool current_link_up; 5502 u32 sg_dig_ctrl, sg_dig_status; 5503 u32 serdes_cfg, expected_sg_dig_ctrl; 5504 int workaround, port_a; 5505 5506 serdes_cfg = 0; 5507 workaround = 0; 5508 port_a = 1; 5509 current_link_up = false; 5510 5511 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 && 5512 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) { 5513 workaround = 1; 5514 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 5515 port_a = 0; 5516 5517 /* preserve bits 0-11,13,14 for signal pre-emphasis */ 5518 /* preserve bits 20-23 for voltage regulator */ 5519 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; 5520 } 5521 5522 sg_dig_ctrl = tr32(SG_DIG_CTRL); 5523 5524 if (tp->link_config.autoneg != AUTONEG_ENABLE) { 5525 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { 5526 if (workaround) { 5527 u32 val = serdes_cfg; 5528 5529 if (port_a) 5530 val |= 0xc010000; 5531 else 5532 val |= 0x4010000; 5533 tw32_f(MAC_SERDES_CFG, val); 5534 } 5535 5536 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5537 } 5538 if (mac_status & MAC_STATUS_PCS_SYNCED) { 5539 tg3_setup_flow_control(tp, 0, 0); 5540 current_link_up = true; 5541 } 5542 goto out; 5543 } 5544 5545 /* Want auto-negotiation. */ 5546 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; 5547 5548 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5549 if (flowctrl & ADVERTISE_1000XPAUSE) 5550 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; 5551 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5552 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; 5553 5554 if (sg_dig_ctrl != expected_sg_dig_ctrl) { 5555 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && 5556 tp->serdes_counter && 5557 ((mac_status & (MAC_STATUS_PCS_SYNCED | 5558 MAC_STATUS_RCVD_CFG)) == 5559 MAC_STATUS_PCS_SYNCED)) { 5560 tp->serdes_counter--; 5561 current_link_up = true; 5562 goto out; 5563 } 5564 restart_autoneg: 5565 if (workaround) 5566 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); 5567 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); 5568 udelay(5); 5569 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); 5570 5571 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5572 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5573 } else if (mac_status & (MAC_STATUS_PCS_SYNCED | 5574 MAC_STATUS_SIGNAL_DET)) { 5575 sg_dig_status = tr32(SG_DIG_STATUS); 5576 mac_status = tr32(MAC_STATUS); 5577 5578 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && 5579 (mac_status & MAC_STATUS_PCS_SYNCED)) { 5580 u32 local_adv = 0, remote_adv = 0; 5581 5582 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) 5583 local_adv |= ADVERTISE_1000XPAUSE; 5584 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) 5585 local_adv |= ADVERTISE_1000XPSE_ASYM; 5586 5587 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) 5588 remote_adv |= LPA_1000XPAUSE; 5589 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) 5590 remote_adv |= LPA_1000XPAUSE_ASYM; 5591 5592 tp->link_config.rmt_adv = 5593 mii_adv_to_ethtool_adv_x(remote_adv); 5594 5595 tg3_setup_flow_control(tp, local_adv, remote_adv); 5596 current_link_up = true; 5597 tp->serdes_counter = 0; 5598 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5599 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { 5600 if (tp->serdes_counter) 5601 tp->serdes_counter--; 5602 else { 5603 if (workaround) { 5604 u32 val = serdes_cfg; 5605 5606 if (port_a) 5607 val |= 0xc010000; 5608 else 5609 val |= 0x4010000; 5610 5611 tw32_f(MAC_SERDES_CFG, val); 5612 } 5613 5614 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5615 udelay(40); 5616 5617 /* Link parallel detection - link is up */ 5618 /* only if we have PCS_SYNC and not */ 5619 /* receiving config code words */ 5620 mac_status = tr32(MAC_STATUS); 5621 if ((mac_status & MAC_STATUS_PCS_SYNCED) && 5622 !(mac_status & MAC_STATUS_RCVD_CFG)) { 5623 tg3_setup_flow_control(tp, 0, 0); 5624 current_link_up = true; 5625 tp->phy_flags |= 5626 TG3_PHYFLG_PARALLEL_DETECT; 5627 tp->serdes_counter = 5628 SERDES_PARALLEL_DET_TIMEOUT; 5629 } else 5630 goto restart_autoneg; 5631 } 5632 } 5633 } else { 5634 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5635 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5636 } 5637 5638 out: 5639 return current_link_up; 5640 } 5641 5642 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) 5643 { 5644 bool current_link_up = false; 5645 5646 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) 5647 goto out; 5648 5649 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5650 u32 txflags, rxflags; 5651 int i; 5652 5653 if (fiber_autoneg(tp, &txflags, &rxflags)) { 5654 u32 local_adv = 0, remote_adv = 0; 5655 5656 if (txflags & ANEG_CFG_PS1) 5657 local_adv |= ADVERTISE_1000XPAUSE; 5658 if (txflags & ANEG_CFG_PS2) 5659 local_adv |= ADVERTISE_1000XPSE_ASYM; 5660 5661 if (rxflags & MR_LP_ADV_SYM_PAUSE) 5662 remote_adv |= LPA_1000XPAUSE; 5663 if (rxflags & MR_LP_ADV_ASYM_PAUSE) 5664 remote_adv |= LPA_1000XPAUSE_ASYM; 5665 5666 tp->link_config.rmt_adv = 5667 mii_adv_to_ethtool_adv_x(remote_adv); 5668 5669 tg3_setup_flow_control(tp, local_adv, remote_adv); 5670 5671 current_link_up = true; 5672 } 5673 for (i = 0; i < 30; i++) { 5674 udelay(20); 5675 tw32_f(MAC_STATUS, 5676 (MAC_STATUS_SYNC_CHANGED | 5677 MAC_STATUS_CFG_CHANGED)); 5678 udelay(40); 5679 if ((tr32(MAC_STATUS) & 5680 (MAC_STATUS_SYNC_CHANGED | 5681 MAC_STATUS_CFG_CHANGED)) == 0) 5682 break; 5683 } 5684 5685 mac_status = tr32(MAC_STATUS); 5686 if (!current_link_up && 5687 (mac_status & MAC_STATUS_PCS_SYNCED) && 5688 !(mac_status & MAC_STATUS_RCVD_CFG)) 5689 current_link_up = true; 5690 } else { 5691 tg3_setup_flow_control(tp, 0, 0); 5692 5693 /* Forcing 1000FD link up. */ 5694 current_link_up = true; 5695 5696 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); 5697 udelay(40); 5698 5699 tw32_f(MAC_MODE, tp->mac_mode); 5700 udelay(40); 5701 } 5702 5703 out: 5704 return current_link_up; 5705 } 5706 5707 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) 5708 { 5709 u32 orig_pause_cfg; 5710 u32 orig_active_speed; 5711 u8 orig_active_duplex; 5712 u32 mac_status; 5713 bool current_link_up; 5714 int i; 5715 5716 orig_pause_cfg = tp->link_config.active_flowctrl; 5717 orig_active_speed = tp->link_config.active_speed; 5718 orig_active_duplex = tp->link_config.active_duplex; 5719 5720 if (!tg3_flag(tp, HW_AUTONEG) && 5721 tp->link_up && 5722 tg3_flag(tp, INIT_COMPLETE)) { 5723 mac_status = tr32(MAC_STATUS); 5724 mac_status &= (MAC_STATUS_PCS_SYNCED | 5725 MAC_STATUS_SIGNAL_DET | 5726 MAC_STATUS_CFG_CHANGED | 5727 MAC_STATUS_RCVD_CFG); 5728 if (mac_status == (MAC_STATUS_PCS_SYNCED | 5729 MAC_STATUS_SIGNAL_DET)) { 5730 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5731 MAC_STATUS_CFG_CHANGED)); 5732 return 0; 5733 } 5734 } 5735 5736 tw32_f(MAC_TX_AUTO_NEG, 0); 5737 5738 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 5739 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; 5740 tw32_f(MAC_MODE, tp->mac_mode); 5741 udelay(40); 5742 5743 if (tp->phy_id == TG3_PHY_ID_BCM8002) 5744 tg3_init_bcm8002(tp); 5745 5746 /* Enable link change event even when serdes polling. */ 5747 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5748 udelay(40); 5749 5750 tp->link_config.rmt_adv = 0; 5751 mac_status = tr32(MAC_STATUS); 5752 5753 if (tg3_flag(tp, HW_AUTONEG)) 5754 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); 5755 else 5756 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); 5757 5758 tp->napi[0].hw_status->status = 5759 (SD_STATUS_UPDATED | 5760 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); 5761 5762 for (i = 0; i < 100; i++) { 5763 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5764 MAC_STATUS_CFG_CHANGED)); 5765 udelay(5); 5766 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | 5767 MAC_STATUS_CFG_CHANGED | 5768 MAC_STATUS_LNKSTATE_CHANGED)) == 0) 5769 break; 5770 } 5771 5772 mac_status = tr32(MAC_STATUS); 5773 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { 5774 current_link_up = false; 5775 if (tp->link_config.autoneg == AUTONEG_ENABLE && 5776 tp->serdes_counter == 0) { 5777 tw32_f(MAC_MODE, (tp->mac_mode | 5778 MAC_MODE_SEND_CONFIGS)); 5779 udelay(1); 5780 tw32_f(MAC_MODE, tp->mac_mode); 5781 } 5782 } 5783 5784 if (current_link_up) { 5785 tp->link_config.active_speed = SPEED_1000; 5786 tp->link_config.active_duplex = DUPLEX_FULL; 5787 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5788 LED_CTRL_LNKLED_OVERRIDE | 5789 LED_CTRL_1000MBPS_ON)); 5790 } else { 5791 tp->link_config.active_speed = SPEED_UNKNOWN; 5792 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 5793 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5794 LED_CTRL_LNKLED_OVERRIDE | 5795 LED_CTRL_TRAFFIC_OVERRIDE)); 5796 } 5797 5798 if (!tg3_test_and_report_link_chg(tp, current_link_up)) { 5799 u32 now_pause_cfg = tp->link_config.active_flowctrl; 5800 if (orig_pause_cfg != now_pause_cfg || 5801 orig_active_speed != tp->link_config.active_speed || 5802 orig_active_duplex != tp->link_config.active_duplex) 5803 tg3_link_report(tp); 5804 } 5805 5806 return 0; 5807 } 5808 5809 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) 5810 { 5811 int err = 0; 5812 u32 bmsr, bmcr; 5813 u32 current_speed = SPEED_UNKNOWN; 5814 u8 current_duplex = DUPLEX_UNKNOWN; 5815 bool current_link_up = false; 5816 u32 local_adv, remote_adv, sgsr; 5817 5818 if ((tg3_asic_rev(tp) == ASIC_REV_5719 || 5819 tg3_asic_rev(tp) == ASIC_REV_5720) && 5820 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) && 5821 (sgsr & SERDES_TG3_SGMII_MODE)) { 5822 5823 if (force_reset) 5824 tg3_phy_reset(tp); 5825 5826 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 5827 5828 if (!(sgsr & SERDES_TG3_LINK_UP)) { 5829 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5830 } else { 5831 current_link_up = true; 5832 if (sgsr & SERDES_TG3_SPEED_1000) { 5833 current_speed = SPEED_1000; 5834 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5835 } else if (sgsr & SERDES_TG3_SPEED_100) { 5836 current_speed = SPEED_100; 5837 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5838 } else { 5839 current_speed = SPEED_10; 5840 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5841 } 5842 5843 if (sgsr & SERDES_TG3_FULL_DUPLEX) 5844 current_duplex = DUPLEX_FULL; 5845 else 5846 current_duplex = DUPLEX_HALF; 5847 } 5848 5849 tw32_f(MAC_MODE, tp->mac_mode); 5850 udelay(40); 5851 5852 tg3_clear_mac_status(tp); 5853 5854 goto fiber_setup_done; 5855 } 5856 5857 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5858 tw32_f(MAC_MODE, tp->mac_mode); 5859 udelay(40); 5860 5861 tg3_clear_mac_status(tp); 5862 5863 if (force_reset) 5864 tg3_phy_reset(tp); 5865 5866 tp->link_config.rmt_adv = 0; 5867 5868 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5869 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5870 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5871 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5872 bmsr |= BMSR_LSTATUS; 5873 else 5874 bmsr &= ~BMSR_LSTATUS; 5875 } 5876 5877 err |= tg3_readphy(tp, MII_BMCR, &bmcr); 5878 5879 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && 5880 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 5881 /* do nothing, just check for link up at the end */ 5882 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5883 u32 adv, newadv; 5884 5885 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5886 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | 5887 ADVERTISE_1000XPAUSE | 5888 ADVERTISE_1000XPSE_ASYM | 5889 ADVERTISE_SLCT); 5890 5891 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5892 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); 5893 5894 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { 5895 tg3_writephy(tp, MII_ADVERTISE, newadv); 5896 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; 5897 tg3_writephy(tp, MII_BMCR, bmcr); 5898 5899 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5900 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; 5901 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5902 5903 return err; 5904 } 5905 } else { 5906 u32 new_bmcr; 5907 5908 bmcr &= ~BMCR_SPEED1000; 5909 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); 5910 5911 if (tp->link_config.duplex == DUPLEX_FULL) 5912 new_bmcr |= BMCR_FULLDPLX; 5913 5914 if (new_bmcr != bmcr) { 5915 /* BMCR_SPEED1000 is a reserved bit that needs 5916 * to be set on write. 5917 */ 5918 new_bmcr |= BMCR_SPEED1000; 5919 5920 /* Force a linkdown */ 5921 if (tp->link_up) { 5922 u32 adv; 5923 5924 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5925 adv &= ~(ADVERTISE_1000XFULL | 5926 ADVERTISE_1000XHALF | 5927 ADVERTISE_SLCT); 5928 tg3_writephy(tp, MII_ADVERTISE, adv); 5929 tg3_writephy(tp, MII_BMCR, bmcr | 5930 BMCR_ANRESTART | 5931 BMCR_ANENABLE); 5932 udelay(10); 5933 tg3_carrier_off(tp); 5934 } 5935 tg3_writephy(tp, MII_BMCR, new_bmcr); 5936 bmcr = new_bmcr; 5937 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5938 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5939 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5940 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5941 bmsr |= BMSR_LSTATUS; 5942 else 5943 bmsr &= ~BMSR_LSTATUS; 5944 } 5945 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5946 } 5947 } 5948 5949 if (bmsr & BMSR_LSTATUS) { 5950 current_speed = SPEED_1000; 5951 current_link_up = true; 5952 if (bmcr & BMCR_FULLDPLX) 5953 current_duplex = DUPLEX_FULL; 5954 else 5955 current_duplex = DUPLEX_HALF; 5956 5957 local_adv = 0; 5958 remote_adv = 0; 5959 5960 if (bmcr & BMCR_ANENABLE) { 5961 u32 common; 5962 5963 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); 5964 err |= tg3_readphy(tp, MII_LPA, &remote_adv); 5965 common = local_adv & remote_adv; 5966 if (common & (ADVERTISE_1000XHALF | 5967 ADVERTISE_1000XFULL)) { 5968 if (common & ADVERTISE_1000XFULL) 5969 current_duplex = DUPLEX_FULL; 5970 else 5971 current_duplex = DUPLEX_HALF; 5972 5973 tp->link_config.rmt_adv = 5974 mii_adv_to_ethtool_adv_x(remote_adv); 5975 } else if (!tg3_flag(tp, 5780_CLASS)) { 5976 /* Link is up via parallel detect */ 5977 } else { 5978 current_link_up = false; 5979 } 5980 } 5981 } 5982 5983 fiber_setup_done: 5984 if (current_link_up && current_duplex == DUPLEX_FULL) 5985 tg3_setup_flow_control(tp, local_adv, remote_adv); 5986 5987 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5988 if (tp->link_config.active_duplex == DUPLEX_HALF) 5989 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5990 5991 tw32_f(MAC_MODE, tp->mac_mode); 5992 udelay(40); 5993 5994 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5995 5996 tp->link_config.active_speed = current_speed; 5997 tp->link_config.active_duplex = current_duplex; 5998 5999 tg3_test_and_report_link_chg(tp, current_link_up); 6000 return err; 6001 } 6002 6003 static void tg3_serdes_parallel_detect(struct tg3 *tp) 6004 { 6005 if (tp->serdes_counter) { 6006 /* Give autoneg time to complete. */ 6007 tp->serdes_counter--; 6008 return; 6009 } 6010 6011 if (!tp->link_up && 6012 (tp->link_config.autoneg == AUTONEG_ENABLE)) { 6013 u32 bmcr; 6014 6015 tg3_readphy(tp, MII_BMCR, &bmcr); 6016 if (bmcr & BMCR_ANENABLE) { 6017 u32 phy1, phy2; 6018 6019 /* Select shadow register 0x1f */ 6020 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); 6021 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); 6022 6023 /* Select expansion interrupt status register */ 6024 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6025 MII_TG3_DSP_EXP1_INT_STAT); 6026 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6027 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6028 6029 if ((phy1 & 0x10) && !(phy2 & 0x20)) { 6030 /* We have signal detect and not receiving 6031 * config code words, link is up by parallel 6032 * detection. 6033 */ 6034 6035 bmcr &= ~BMCR_ANENABLE; 6036 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; 6037 tg3_writephy(tp, MII_BMCR, bmcr); 6038 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; 6039 } 6040 } 6041 } else if (tp->link_up && 6042 (tp->link_config.autoneg == AUTONEG_ENABLE) && 6043 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 6044 u32 phy2; 6045 6046 /* Select expansion interrupt status register */ 6047 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6048 MII_TG3_DSP_EXP1_INT_STAT); 6049 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6050 if (phy2 & 0x20) { 6051 u32 bmcr; 6052 6053 /* Config code words received, turn on autoneg. */ 6054 tg3_readphy(tp, MII_BMCR, &bmcr); 6055 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); 6056 6057 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 6058 6059 } 6060 } 6061 } 6062 6063 static int tg3_setup_phy(struct tg3 *tp, bool force_reset) 6064 { 6065 u32 val; 6066 int err; 6067 6068 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 6069 err = tg3_setup_fiber_phy(tp, force_reset); 6070 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 6071 err = tg3_setup_fiber_mii_phy(tp, force_reset); 6072 else 6073 err = tg3_setup_copper_phy(tp, force_reset); 6074 6075 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 6076 u32 scale; 6077 6078 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; 6079 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) 6080 scale = 65; 6081 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) 6082 scale = 6; 6083 else 6084 scale = 12; 6085 6086 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; 6087 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); 6088 tw32(GRC_MISC_CFG, val); 6089 } 6090 6091 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 6092 (6 << TX_LENGTHS_IPG_SHIFT); 6093 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 6094 tg3_asic_rev(tp) == ASIC_REV_5762) 6095 val |= tr32(MAC_TX_LENGTHS) & 6096 (TX_LENGTHS_JMB_FRM_LEN_MSK | 6097 TX_LENGTHS_CNT_DWN_VAL_MSK); 6098 6099 if (tp->link_config.active_speed == SPEED_1000 && 6100 tp->link_config.active_duplex == DUPLEX_HALF) 6101 tw32(MAC_TX_LENGTHS, val | 6102 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); 6103 else 6104 tw32(MAC_TX_LENGTHS, val | 6105 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); 6106 6107 if (!tg3_flag(tp, 5705_PLUS)) { 6108 if (tp->link_up) { 6109 tw32(HOSTCC_STAT_COAL_TICKS, 6110 tp->coal.stats_block_coalesce_usecs); 6111 } else { 6112 tw32(HOSTCC_STAT_COAL_TICKS, 0); 6113 } 6114 } 6115 6116 if (tg3_flag(tp, ASPM_WORKAROUND)) { 6117 val = tr32(PCIE_PWR_MGMT_THRESH); 6118 if (!tp->link_up) 6119 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | 6120 tp->pwrmgmt_thresh; 6121 else 6122 val |= PCIE_PWR_MGMT_L1_THRESH_MSK; 6123 tw32(PCIE_PWR_MGMT_THRESH, val); 6124 } 6125 6126 return err; 6127 } 6128 6129 /* tp->lock must be held */ 6130 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts) 6131 { 6132 u64 stamp; 6133 6134 ptp_read_system_prets(sts); 6135 stamp = tr32(TG3_EAV_REF_CLCK_LSB); 6136 ptp_read_system_postts(sts); 6137 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32; 6138 6139 return stamp; 6140 } 6141 6142 /* tp->lock must be held */ 6143 static void tg3_refclk_write(struct tg3 *tp, u64 newval) 6144 { 6145 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6146 6147 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP); 6148 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff); 6149 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32); 6150 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME); 6151 } 6152 6153 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); 6154 static inline void tg3_full_unlock(struct tg3 *tp); 6155 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) 6156 { 6157 struct tg3 *tp = netdev_priv(dev); 6158 6159 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 6160 SOF_TIMESTAMPING_RX_SOFTWARE | 6161 SOF_TIMESTAMPING_SOFTWARE; 6162 6163 if (tg3_flag(tp, PTP_CAPABLE)) { 6164 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 6165 SOF_TIMESTAMPING_RX_HARDWARE | 6166 SOF_TIMESTAMPING_RAW_HARDWARE; 6167 } 6168 6169 if (tp->ptp_clock) 6170 info->phc_index = ptp_clock_index(tp->ptp_clock); 6171 else 6172 info->phc_index = -1; 6173 6174 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 6175 6176 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 6177 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 6178 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 6179 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 6180 return 0; 6181 } 6182 6183 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 6184 { 6185 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6186 u64 correction; 6187 bool neg_adj; 6188 6189 /* Frequency adjustment is performed using hardware with a 24 bit 6190 * accumulator and a programmable correction value. On each clk, the 6191 * correction value gets added to the accumulator and when it 6192 * overflows, the time counter is incremented/decremented. 6193 */ 6194 neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction); 6195 6196 tg3_full_lock(tp, 0); 6197 6198 if (correction) 6199 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 6200 TG3_EAV_REF_CLK_CORRECT_EN | 6201 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | 6202 ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK)); 6203 else 6204 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0); 6205 6206 tg3_full_unlock(tp); 6207 6208 return 0; 6209 } 6210 6211 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 6212 { 6213 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6214 6215 tg3_full_lock(tp, 0); 6216 tp->ptp_adjust += delta; 6217 tg3_full_unlock(tp); 6218 6219 return 0; 6220 } 6221 6222 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, 6223 struct ptp_system_timestamp *sts) 6224 { 6225 u64 ns; 6226 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6227 6228 tg3_full_lock(tp, 0); 6229 ns = tg3_refclk_read(tp, sts); 6230 ns += tp->ptp_adjust; 6231 tg3_full_unlock(tp); 6232 6233 *ts = ns_to_timespec64(ns); 6234 6235 return 0; 6236 } 6237 6238 static int tg3_ptp_settime(struct ptp_clock_info *ptp, 6239 const struct timespec64 *ts) 6240 { 6241 u64 ns; 6242 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6243 6244 ns = timespec64_to_ns(ts); 6245 6246 tg3_full_lock(tp, 0); 6247 tg3_refclk_write(tp, ns); 6248 tp->ptp_adjust = 0; 6249 tg3_full_unlock(tp); 6250 6251 return 0; 6252 } 6253 6254 static int tg3_ptp_enable(struct ptp_clock_info *ptp, 6255 struct ptp_clock_request *rq, int on) 6256 { 6257 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6258 u32 clock_ctl; 6259 int rval = 0; 6260 6261 switch (rq->type) { 6262 case PTP_CLK_REQ_PEROUT: 6263 /* Reject requests with unsupported flags */ 6264 if (rq->perout.flags) 6265 return -EOPNOTSUPP; 6266 6267 if (rq->perout.index != 0) 6268 return -EINVAL; 6269 6270 tg3_full_lock(tp, 0); 6271 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6272 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK; 6273 6274 if (on) { 6275 u64 nsec; 6276 6277 nsec = rq->perout.start.sec * 1000000000ULL + 6278 rq->perout.start.nsec; 6279 6280 if (rq->perout.period.sec || rq->perout.period.nsec) { 6281 netdev_warn(tp->dev, 6282 "Device supports only a one-shot timesync output, period must be 0\n"); 6283 rval = -EINVAL; 6284 goto err_out; 6285 } 6286 6287 if (nsec & (1ULL << 63)) { 6288 netdev_warn(tp->dev, 6289 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n"); 6290 rval = -EINVAL; 6291 goto err_out; 6292 } 6293 6294 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff)); 6295 tw32(TG3_EAV_WATCHDOG0_MSB, 6296 TG3_EAV_WATCHDOG0_EN | 6297 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK)); 6298 6299 tw32(TG3_EAV_REF_CLCK_CTL, 6300 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0); 6301 } else { 6302 tw32(TG3_EAV_WATCHDOG0_MSB, 0); 6303 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl); 6304 } 6305 6306 err_out: 6307 tg3_full_unlock(tp); 6308 return rval; 6309 6310 default: 6311 break; 6312 } 6313 6314 return -EOPNOTSUPP; 6315 } 6316 6317 static const struct ptp_clock_info tg3_ptp_caps = { 6318 .owner = THIS_MODULE, 6319 .name = "tg3 clock", 6320 .max_adj = 250000000, 6321 .n_alarm = 0, 6322 .n_ext_ts = 0, 6323 .n_per_out = 1, 6324 .n_pins = 0, 6325 .pps = 0, 6326 .adjfine = tg3_ptp_adjfine, 6327 .adjtime = tg3_ptp_adjtime, 6328 .gettimex64 = tg3_ptp_gettimex, 6329 .settime64 = tg3_ptp_settime, 6330 .enable = tg3_ptp_enable, 6331 }; 6332 6333 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, 6334 struct skb_shared_hwtstamps *timestamp) 6335 { 6336 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); 6337 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + 6338 tp->ptp_adjust); 6339 } 6340 6341 /* tp->lock must be held */ 6342 static void tg3_ptp_init(struct tg3 *tp) 6343 { 6344 if (!tg3_flag(tp, PTP_CAPABLE)) 6345 return; 6346 6347 /* Initialize the hardware clock to the system time. */ 6348 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real())); 6349 tp->ptp_adjust = 0; 6350 tp->ptp_info = tg3_ptp_caps; 6351 } 6352 6353 /* tp->lock must be held */ 6354 static void tg3_ptp_resume(struct tg3 *tp) 6355 { 6356 if (!tg3_flag(tp, PTP_CAPABLE)) 6357 return; 6358 6359 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust); 6360 tp->ptp_adjust = 0; 6361 } 6362 6363 static void tg3_ptp_fini(struct tg3 *tp) 6364 { 6365 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock) 6366 return; 6367 6368 ptp_clock_unregister(tp->ptp_clock); 6369 tp->ptp_clock = NULL; 6370 tp->ptp_adjust = 0; 6371 } 6372 6373 static inline int tg3_irq_sync(struct tg3 *tp) 6374 { 6375 return tp->irq_sync; 6376 } 6377 6378 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) 6379 { 6380 int i; 6381 6382 dst = (u32 *)((u8 *)dst + off); 6383 for (i = 0; i < len; i += sizeof(u32)) 6384 *dst++ = tr32(off + i); 6385 } 6386 6387 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) 6388 { 6389 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); 6390 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); 6391 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); 6392 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); 6393 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); 6394 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); 6395 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); 6396 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); 6397 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); 6398 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); 6399 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); 6400 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); 6401 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); 6402 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); 6403 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); 6404 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); 6405 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); 6406 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); 6407 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); 6408 6409 if (tg3_flag(tp, SUPPORT_MSIX)) 6410 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); 6411 6412 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); 6413 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); 6414 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); 6415 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); 6416 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); 6417 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); 6418 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); 6419 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); 6420 6421 if (!tg3_flag(tp, 5705_PLUS)) { 6422 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); 6423 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); 6424 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); 6425 } 6426 6427 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); 6428 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); 6429 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); 6430 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); 6431 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); 6432 6433 if (tg3_flag(tp, NVRAM)) 6434 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); 6435 } 6436 6437 static void tg3_dump_state(struct tg3 *tp) 6438 { 6439 int i; 6440 u32 *regs; 6441 6442 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); 6443 if (!regs) 6444 return; 6445 6446 if (tg3_flag(tp, PCI_EXPRESS)) { 6447 /* Read up to but not including private PCI registers */ 6448 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) 6449 regs[i / sizeof(u32)] = tr32(i); 6450 } else 6451 tg3_dump_legacy_regs(tp, regs); 6452 6453 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { 6454 if (!regs[i + 0] && !regs[i + 1] && 6455 !regs[i + 2] && !regs[i + 3]) 6456 continue; 6457 6458 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", 6459 i * 4, 6460 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); 6461 } 6462 6463 kfree(regs); 6464 6465 for (i = 0; i < tp->irq_cnt; i++) { 6466 struct tg3_napi *tnapi = &tp->napi[i]; 6467 6468 /* SW status block */ 6469 netdev_err(tp->dev, 6470 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", 6471 i, 6472 tnapi->hw_status->status, 6473 tnapi->hw_status->status_tag, 6474 tnapi->hw_status->rx_jumbo_consumer, 6475 tnapi->hw_status->rx_consumer, 6476 tnapi->hw_status->rx_mini_consumer, 6477 tnapi->hw_status->idx[0].rx_producer, 6478 tnapi->hw_status->idx[0].tx_consumer); 6479 6480 netdev_err(tp->dev, 6481 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", 6482 i, 6483 tnapi->last_tag, tnapi->last_irq_tag, 6484 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, 6485 tnapi->rx_rcb_ptr, 6486 tnapi->prodring.rx_std_prod_idx, 6487 tnapi->prodring.rx_std_cons_idx, 6488 tnapi->prodring.rx_jmb_prod_idx, 6489 tnapi->prodring.rx_jmb_cons_idx); 6490 } 6491 } 6492 6493 /* This is called whenever we suspect that the system chipset is re- 6494 * ordering the sequence of MMIO to the tx send mailbox. The symptom 6495 * is bogus tx completions. We try to recover by setting the 6496 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later 6497 * in the workqueue. 6498 */ 6499 static void tg3_tx_recover(struct tg3 *tp) 6500 { 6501 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || 6502 tp->write32_tx_mbox == tg3_write_indirect_mbox); 6503 6504 netdev_warn(tp->dev, 6505 "The system may be re-ordering memory-mapped I/O " 6506 "cycles to the network device, attempting to recover. " 6507 "Please report the problem to the driver maintainer " 6508 "and include system chipset information.\n"); 6509 6510 tg3_flag_set(tp, TX_RECOVERY_PENDING); 6511 } 6512 6513 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) 6514 { 6515 /* Tell compiler to fetch tx indices from memory. */ 6516 barrier(); 6517 return tnapi->tx_pending - 6518 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); 6519 } 6520 6521 /* Tigon3 never reports partial packet sends. So we do not 6522 * need special logic to handle SKBs that have not had all 6523 * of their frags sent yet, like SunGEM does. 6524 */ 6525 static void tg3_tx(struct tg3_napi *tnapi) 6526 { 6527 struct tg3 *tp = tnapi->tp; 6528 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; 6529 u32 sw_idx = tnapi->tx_cons; 6530 struct netdev_queue *txq; 6531 int index = tnapi - tp->napi; 6532 unsigned int pkts_compl = 0, bytes_compl = 0; 6533 6534 if (tg3_flag(tp, ENABLE_TSS)) 6535 index--; 6536 6537 txq = netdev_get_tx_queue(tp->dev, index); 6538 6539 while (sw_idx != hw_idx) { 6540 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; 6541 struct sk_buff *skb = ri->skb; 6542 int i, tx_bug = 0; 6543 6544 if (unlikely(skb == NULL)) { 6545 tg3_tx_recover(tp); 6546 return; 6547 } 6548 6549 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) { 6550 struct skb_shared_hwtstamps timestamp; 6551 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB); 6552 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; 6553 6554 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); 6555 6556 skb_tstamp_tx(skb, ×tamp); 6557 } 6558 6559 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), 6560 skb_headlen(skb), DMA_TO_DEVICE); 6561 6562 ri->skb = NULL; 6563 6564 while (ri->fragmented) { 6565 ri->fragmented = false; 6566 sw_idx = NEXT_TX(sw_idx); 6567 ri = &tnapi->tx_buffers[sw_idx]; 6568 } 6569 6570 sw_idx = NEXT_TX(sw_idx); 6571 6572 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 6573 ri = &tnapi->tx_buffers[sw_idx]; 6574 if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) 6575 tx_bug = 1; 6576 6577 dma_unmap_page(&tp->pdev->dev, 6578 dma_unmap_addr(ri, mapping), 6579 skb_frag_size(&skb_shinfo(skb)->frags[i]), 6580 DMA_TO_DEVICE); 6581 6582 while (ri->fragmented) { 6583 ri->fragmented = false; 6584 sw_idx = NEXT_TX(sw_idx); 6585 ri = &tnapi->tx_buffers[sw_idx]; 6586 } 6587 6588 sw_idx = NEXT_TX(sw_idx); 6589 } 6590 6591 pkts_compl++; 6592 bytes_compl += skb->len; 6593 6594 dev_consume_skb_any(skb); 6595 6596 if (unlikely(tx_bug)) { 6597 tg3_tx_recover(tp); 6598 return; 6599 } 6600 } 6601 6602 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); 6603 6604 tnapi->tx_cons = sw_idx; 6605 6606 /* Need to make the tx_cons update visible to tg3_start_xmit() 6607 * before checking for netif_queue_stopped(). Without the 6608 * memory barrier, there is a small possibility that tg3_start_xmit() 6609 * will miss it and cause the queue to be stopped forever. 6610 */ 6611 smp_mb(); 6612 6613 if (unlikely(netif_tx_queue_stopped(txq) && 6614 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { 6615 __netif_tx_lock(txq, smp_processor_id()); 6616 if (netif_tx_queue_stopped(txq) && 6617 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) 6618 netif_tx_wake_queue(txq); 6619 __netif_tx_unlock(txq); 6620 } 6621 } 6622 6623 static void tg3_frag_free(bool is_frag, void *data) 6624 { 6625 if (is_frag) 6626 skb_free_frag(data); 6627 else 6628 kfree(data); 6629 } 6630 6631 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) 6632 { 6633 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) + 6634 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6635 6636 if (!ri->data) 6637 return; 6638 6639 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz, 6640 DMA_FROM_DEVICE); 6641 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data); 6642 ri->data = NULL; 6643 } 6644 6645 6646 /* Returns size of skb allocated or < 0 on error. 6647 * 6648 * We only need to fill in the address because the other members 6649 * of the RX descriptor are invariant, see tg3_init_rings. 6650 * 6651 * Note the purposeful assymetry of cpu vs. chip accesses. For 6652 * posting buffers we only dirty the first cache line of the RX 6653 * descriptor (containing the address). Whereas for the RX status 6654 * buffers the cpu only reads the last cacheline of the RX descriptor 6655 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 6656 */ 6657 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, 6658 u32 opaque_key, u32 dest_idx_unmasked, 6659 unsigned int *frag_size) 6660 { 6661 struct tg3_rx_buffer_desc *desc; 6662 struct ring_info *map; 6663 u8 *data; 6664 dma_addr_t mapping; 6665 int skb_size, data_size, dest_idx; 6666 6667 switch (opaque_key) { 6668 case RXD_OPAQUE_RING_STD: 6669 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6670 desc = &tpr->rx_std[dest_idx]; 6671 map = &tpr->rx_std_buffers[dest_idx]; 6672 data_size = tp->rx_pkt_map_sz; 6673 break; 6674 6675 case RXD_OPAQUE_RING_JUMBO: 6676 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6677 desc = &tpr->rx_jmb[dest_idx].std; 6678 map = &tpr->rx_jmb_buffers[dest_idx]; 6679 data_size = TG3_RX_JMB_MAP_SZ; 6680 break; 6681 6682 default: 6683 return -EINVAL; 6684 } 6685 6686 /* Do not overwrite any of the map or rp information 6687 * until we are sure we can commit to a new buffer. 6688 * 6689 * Callers depend upon this behavior and assume that 6690 * we leave everything unchanged if we fail. 6691 */ 6692 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + 6693 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6694 if (skb_size <= PAGE_SIZE) { 6695 data = napi_alloc_frag(skb_size); 6696 *frag_size = skb_size; 6697 } else { 6698 data = kmalloc(skb_size, GFP_ATOMIC); 6699 *frag_size = 0; 6700 } 6701 if (!data) 6702 return -ENOMEM; 6703 6704 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp), 6705 data_size, DMA_FROM_DEVICE); 6706 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) { 6707 tg3_frag_free(skb_size <= PAGE_SIZE, data); 6708 return -EIO; 6709 } 6710 6711 map->data = data; 6712 dma_unmap_addr_set(map, mapping, mapping); 6713 6714 desc->addr_hi = ((u64)mapping >> 32); 6715 desc->addr_lo = ((u64)mapping & 0xffffffff); 6716 6717 return data_size; 6718 } 6719 6720 /* We only need to move over in the address because the other 6721 * members of the RX descriptor are invariant. See notes above 6722 * tg3_alloc_rx_data for full details. 6723 */ 6724 static void tg3_recycle_rx(struct tg3_napi *tnapi, 6725 struct tg3_rx_prodring_set *dpr, 6726 u32 opaque_key, int src_idx, 6727 u32 dest_idx_unmasked) 6728 { 6729 struct tg3 *tp = tnapi->tp; 6730 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 6731 struct ring_info *src_map, *dest_map; 6732 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; 6733 int dest_idx; 6734 6735 switch (opaque_key) { 6736 case RXD_OPAQUE_RING_STD: 6737 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6738 dest_desc = &dpr->rx_std[dest_idx]; 6739 dest_map = &dpr->rx_std_buffers[dest_idx]; 6740 src_desc = &spr->rx_std[src_idx]; 6741 src_map = &spr->rx_std_buffers[src_idx]; 6742 break; 6743 6744 case RXD_OPAQUE_RING_JUMBO: 6745 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6746 dest_desc = &dpr->rx_jmb[dest_idx].std; 6747 dest_map = &dpr->rx_jmb_buffers[dest_idx]; 6748 src_desc = &spr->rx_jmb[src_idx].std; 6749 src_map = &spr->rx_jmb_buffers[src_idx]; 6750 break; 6751 6752 default: 6753 return; 6754 } 6755 6756 dest_map->data = src_map->data; 6757 dma_unmap_addr_set(dest_map, mapping, 6758 dma_unmap_addr(src_map, mapping)); 6759 dest_desc->addr_hi = src_desc->addr_hi; 6760 dest_desc->addr_lo = src_desc->addr_lo; 6761 6762 /* Ensure that the update to the skb happens after the physical 6763 * addresses have been transferred to the new BD location. 6764 */ 6765 smp_wmb(); 6766 6767 src_map->data = NULL; 6768 } 6769 6770 /* The RX ring scheme is composed of multiple rings which post fresh 6771 * buffers to the chip, and one special ring the chip uses to report 6772 * status back to the host. 6773 * 6774 * The special ring reports the status of received packets to the 6775 * host. The chip does not write into the original descriptor the 6776 * RX buffer was obtained from. The chip simply takes the original 6777 * descriptor as provided by the host, updates the status and length 6778 * field, then writes this into the next status ring entry. 6779 * 6780 * Each ring the host uses to post buffers to the chip is described 6781 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, 6782 * it is first placed into the on-chip ram. When the packet's length 6783 * is known, it walks down the TG3_BDINFO entries to select the ring. 6784 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO 6785 * which is within the range of the new packet's length is chosen. 6786 * 6787 * The "separate ring for rx status" scheme may sound queer, but it makes 6788 * sense from a cache coherency perspective. If only the host writes 6789 * to the buffer post rings, and only the chip writes to the rx status 6790 * rings, then cache lines never move beyond shared-modified state. 6791 * If both the host and chip were to write into the same ring, cache line 6792 * eviction could occur since both entities want it in an exclusive state. 6793 */ 6794 static int tg3_rx(struct tg3_napi *tnapi, int budget) 6795 { 6796 struct tg3 *tp = tnapi->tp; 6797 u32 work_mask, rx_std_posted = 0; 6798 u32 std_prod_idx, jmb_prod_idx; 6799 u32 sw_idx = tnapi->rx_rcb_ptr; 6800 u16 hw_idx; 6801 int received; 6802 struct tg3_rx_prodring_set *tpr = &tnapi->prodring; 6803 6804 hw_idx = *(tnapi->rx_rcb_prod_idx); 6805 /* 6806 * We need to order the read of hw_idx and the read of 6807 * the opaque cookie. 6808 */ 6809 rmb(); 6810 work_mask = 0; 6811 received = 0; 6812 std_prod_idx = tpr->rx_std_prod_idx; 6813 jmb_prod_idx = tpr->rx_jmb_prod_idx; 6814 while (sw_idx != hw_idx && budget > 0) { 6815 struct ring_info *ri; 6816 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; 6817 unsigned int len; 6818 struct sk_buff *skb; 6819 dma_addr_t dma_addr; 6820 u32 opaque_key, desc_idx, *post_ptr; 6821 u8 *data; 6822 u64 tstamp = 0; 6823 6824 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 6825 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 6826 if (opaque_key == RXD_OPAQUE_RING_STD) { 6827 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; 6828 dma_addr = dma_unmap_addr(ri, mapping); 6829 data = ri->data; 6830 post_ptr = &std_prod_idx; 6831 rx_std_posted++; 6832 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 6833 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; 6834 dma_addr = dma_unmap_addr(ri, mapping); 6835 data = ri->data; 6836 post_ptr = &jmb_prod_idx; 6837 } else 6838 goto next_pkt_nopost; 6839 6840 work_mask |= opaque_key; 6841 6842 if (desc->err_vlan & RXD_ERR_MASK) { 6843 drop_it: 6844 tg3_recycle_rx(tnapi, tpr, opaque_key, 6845 desc_idx, *post_ptr); 6846 drop_it_no_recycle: 6847 /* Other statistics kept track of by card. */ 6848 tnapi->rx_dropped++; 6849 goto next_pkt; 6850 } 6851 6852 prefetch(data + TG3_RX_OFFSET(tp)); 6853 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 6854 ETH_FCS_LEN; 6855 6856 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6857 RXD_FLAG_PTPSTAT_PTPV1 || 6858 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6859 RXD_FLAG_PTPSTAT_PTPV2) { 6860 tstamp = tr32(TG3_RX_TSTAMP_LSB); 6861 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32; 6862 } 6863 6864 if (len > TG3_RX_COPY_THRESH(tp)) { 6865 int skb_size; 6866 unsigned int frag_size; 6867 6868 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, 6869 *post_ptr, &frag_size); 6870 if (skb_size < 0) 6871 goto drop_it; 6872 6873 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size, 6874 DMA_FROM_DEVICE); 6875 6876 /* Ensure that the update to the data happens 6877 * after the usage of the old DMA mapping. 6878 */ 6879 smp_wmb(); 6880 6881 ri->data = NULL; 6882 6883 if (frag_size) 6884 skb = build_skb(data, frag_size); 6885 else 6886 skb = slab_build_skb(data); 6887 if (!skb) { 6888 tg3_frag_free(frag_size != 0, data); 6889 goto drop_it_no_recycle; 6890 } 6891 skb_reserve(skb, TG3_RX_OFFSET(tp)); 6892 } else { 6893 tg3_recycle_rx(tnapi, tpr, opaque_key, 6894 desc_idx, *post_ptr); 6895 6896 skb = netdev_alloc_skb(tp->dev, 6897 len + TG3_RAW_IP_ALIGN); 6898 if (skb == NULL) 6899 goto drop_it_no_recycle; 6900 6901 skb_reserve(skb, TG3_RAW_IP_ALIGN); 6902 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len, 6903 DMA_FROM_DEVICE); 6904 memcpy(skb->data, 6905 data + TG3_RX_OFFSET(tp), 6906 len); 6907 dma_sync_single_for_device(&tp->pdev->dev, dma_addr, 6908 len, DMA_FROM_DEVICE); 6909 } 6910 6911 skb_put(skb, len); 6912 if (tstamp) 6913 tg3_hwclock_to_timestamp(tp, tstamp, 6914 skb_hwtstamps(skb)); 6915 6916 if ((tp->dev->features & NETIF_F_RXCSUM) && 6917 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 6918 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 6919 >> RXD_TCPCSUM_SHIFT) == 0xffff)) 6920 skb->ip_summed = CHECKSUM_UNNECESSARY; 6921 else 6922 skb_checksum_none_assert(skb); 6923 6924 skb->protocol = eth_type_trans(skb, tp->dev); 6925 6926 if (len > (tp->dev->mtu + ETH_HLEN) && 6927 skb->protocol != htons(ETH_P_8021Q) && 6928 skb->protocol != htons(ETH_P_8021AD)) { 6929 dev_kfree_skb_any(skb); 6930 goto drop_it_no_recycle; 6931 } 6932 6933 if (desc->type_flags & RXD_FLAG_VLAN && 6934 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) 6935 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 6936 desc->err_vlan & RXD_VLAN_MASK); 6937 6938 napi_gro_receive(&tnapi->napi, skb); 6939 6940 received++; 6941 budget--; 6942 6943 next_pkt: 6944 (*post_ptr)++; 6945 6946 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 6947 tpr->rx_std_prod_idx = std_prod_idx & 6948 tp->rx_std_ring_mask; 6949 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 6950 tpr->rx_std_prod_idx); 6951 work_mask &= ~RXD_OPAQUE_RING_STD; 6952 rx_std_posted = 0; 6953 } 6954 next_pkt_nopost: 6955 sw_idx++; 6956 sw_idx &= tp->rx_ret_ring_mask; 6957 6958 /* Refresh hw_idx to see if there is new work */ 6959 if (sw_idx == hw_idx) { 6960 hw_idx = *(tnapi->rx_rcb_prod_idx); 6961 rmb(); 6962 } 6963 } 6964 6965 /* ACK the status ring. */ 6966 tnapi->rx_rcb_ptr = sw_idx; 6967 tw32_rx_mbox(tnapi->consmbox, sw_idx); 6968 6969 /* Refill RX ring(s). */ 6970 if (!tg3_flag(tp, ENABLE_RSS)) { 6971 /* Sync BD data before updating mailbox */ 6972 wmb(); 6973 6974 if (work_mask & RXD_OPAQUE_RING_STD) { 6975 tpr->rx_std_prod_idx = std_prod_idx & 6976 tp->rx_std_ring_mask; 6977 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 6978 tpr->rx_std_prod_idx); 6979 } 6980 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 6981 tpr->rx_jmb_prod_idx = jmb_prod_idx & 6982 tp->rx_jmb_ring_mask; 6983 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 6984 tpr->rx_jmb_prod_idx); 6985 } 6986 } else if (work_mask) { 6987 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be 6988 * updated before the producer indices can be updated. 6989 */ 6990 smp_wmb(); 6991 6992 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; 6993 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; 6994 6995 if (tnapi != &tp->napi[1]) { 6996 tp->rx_refill = true; 6997 napi_schedule(&tp->napi[1].napi); 6998 } 6999 } 7000 7001 return received; 7002 } 7003 7004 static void tg3_poll_link(struct tg3 *tp) 7005 { 7006 /* handle link change and other phy events */ 7007 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 7008 struct tg3_hw_status *sblk = tp->napi[0].hw_status; 7009 7010 if (sblk->status & SD_STATUS_LINK_CHG) { 7011 sblk->status = SD_STATUS_UPDATED | 7012 (sblk->status & ~SD_STATUS_LINK_CHG); 7013 spin_lock(&tp->lock); 7014 if (tg3_flag(tp, USE_PHYLIB)) { 7015 tw32_f(MAC_STATUS, 7016 (MAC_STATUS_SYNC_CHANGED | 7017 MAC_STATUS_CFG_CHANGED | 7018 MAC_STATUS_MI_COMPLETION | 7019 MAC_STATUS_LNKSTATE_CHANGED)); 7020 udelay(40); 7021 } else 7022 tg3_setup_phy(tp, false); 7023 spin_unlock(&tp->lock); 7024 } 7025 } 7026 } 7027 7028 static int tg3_rx_prodring_xfer(struct tg3 *tp, 7029 struct tg3_rx_prodring_set *dpr, 7030 struct tg3_rx_prodring_set *spr) 7031 { 7032 u32 si, di, cpycnt, src_prod_idx; 7033 int i, err = 0; 7034 7035 while (1) { 7036 src_prod_idx = spr->rx_std_prod_idx; 7037 7038 /* Make sure updates to the rx_std_buffers[] entries and the 7039 * standard producer index are seen in the correct order. 7040 */ 7041 smp_rmb(); 7042 7043 if (spr->rx_std_cons_idx == src_prod_idx) 7044 break; 7045 7046 if (spr->rx_std_cons_idx < src_prod_idx) 7047 cpycnt = src_prod_idx - spr->rx_std_cons_idx; 7048 else 7049 cpycnt = tp->rx_std_ring_mask + 1 - 7050 spr->rx_std_cons_idx; 7051 7052 cpycnt = min(cpycnt, 7053 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); 7054 7055 si = spr->rx_std_cons_idx; 7056 di = dpr->rx_std_prod_idx; 7057 7058 for (i = di; i < di + cpycnt; i++) { 7059 if (dpr->rx_std_buffers[i].data) { 7060 cpycnt = i - di; 7061 err = -ENOSPC; 7062 break; 7063 } 7064 } 7065 7066 if (!cpycnt) 7067 break; 7068 7069 /* Ensure that updates to the rx_std_buffers ring and the 7070 * shadowed hardware producer ring from tg3_recycle_skb() are 7071 * ordered correctly WRT the skb check above. 7072 */ 7073 smp_rmb(); 7074 7075 memcpy(&dpr->rx_std_buffers[di], 7076 &spr->rx_std_buffers[si], 7077 cpycnt * sizeof(struct ring_info)); 7078 7079 for (i = 0; i < cpycnt; i++, di++, si++) { 7080 struct tg3_rx_buffer_desc *sbd, *dbd; 7081 sbd = &spr->rx_std[si]; 7082 dbd = &dpr->rx_std[di]; 7083 dbd->addr_hi = sbd->addr_hi; 7084 dbd->addr_lo = sbd->addr_lo; 7085 } 7086 7087 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & 7088 tp->rx_std_ring_mask; 7089 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & 7090 tp->rx_std_ring_mask; 7091 } 7092 7093 while (1) { 7094 src_prod_idx = spr->rx_jmb_prod_idx; 7095 7096 /* Make sure updates to the rx_jmb_buffers[] entries and 7097 * the jumbo producer index are seen in the correct order. 7098 */ 7099 smp_rmb(); 7100 7101 if (spr->rx_jmb_cons_idx == src_prod_idx) 7102 break; 7103 7104 if (spr->rx_jmb_cons_idx < src_prod_idx) 7105 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; 7106 else 7107 cpycnt = tp->rx_jmb_ring_mask + 1 - 7108 spr->rx_jmb_cons_idx; 7109 7110 cpycnt = min(cpycnt, 7111 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); 7112 7113 si = spr->rx_jmb_cons_idx; 7114 di = dpr->rx_jmb_prod_idx; 7115 7116 for (i = di; i < di + cpycnt; i++) { 7117 if (dpr->rx_jmb_buffers[i].data) { 7118 cpycnt = i - di; 7119 err = -ENOSPC; 7120 break; 7121 } 7122 } 7123 7124 if (!cpycnt) 7125 break; 7126 7127 /* Ensure that updates to the rx_jmb_buffers ring and the 7128 * shadowed hardware producer ring from tg3_recycle_skb() are 7129 * ordered correctly WRT the skb check above. 7130 */ 7131 smp_rmb(); 7132 7133 memcpy(&dpr->rx_jmb_buffers[di], 7134 &spr->rx_jmb_buffers[si], 7135 cpycnt * sizeof(struct ring_info)); 7136 7137 for (i = 0; i < cpycnt; i++, di++, si++) { 7138 struct tg3_rx_buffer_desc *sbd, *dbd; 7139 sbd = &spr->rx_jmb[si].std; 7140 dbd = &dpr->rx_jmb[di].std; 7141 dbd->addr_hi = sbd->addr_hi; 7142 dbd->addr_lo = sbd->addr_lo; 7143 } 7144 7145 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & 7146 tp->rx_jmb_ring_mask; 7147 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & 7148 tp->rx_jmb_ring_mask; 7149 } 7150 7151 return err; 7152 } 7153 7154 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) 7155 { 7156 struct tg3 *tp = tnapi->tp; 7157 7158 /* run TX completion thread */ 7159 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { 7160 tg3_tx(tnapi); 7161 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7162 return work_done; 7163 } 7164 7165 if (!tnapi->rx_rcb_prod_idx) 7166 return work_done; 7167 7168 /* run RX thread, within the bounds set by NAPI. 7169 * All RX "locking" is done by ensuring outside 7170 * code synchronizes with tg3->napi.poll() 7171 */ 7172 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 7173 work_done += tg3_rx(tnapi, budget - work_done); 7174 7175 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { 7176 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; 7177 int i, err = 0; 7178 u32 std_prod_idx = dpr->rx_std_prod_idx; 7179 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; 7180 7181 tp->rx_refill = false; 7182 for (i = 1; i <= tp->rxq_cnt; i++) 7183 err |= tg3_rx_prodring_xfer(tp, dpr, 7184 &tp->napi[i].prodring); 7185 7186 wmb(); 7187 7188 if (std_prod_idx != dpr->rx_std_prod_idx) 7189 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 7190 dpr->rx_std_prod_idx); 7191 7192 if (jmb_prod_idx != dpr->rx_jmb_prod_idx) 7193 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 7194 dpr->rx_jmb_prod_idx); 7195 7196 if (err) 7197 tw32_f(HOSTCC_MODE, tp->coal_now); 7198 } 7199 7200 return work_done; 7201 } 7202 7203 static inline void tg3_reset_task_schedule(struct tg3 *tp) 7204 { 7205 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) 7206 schedule_work(&tp->reset_task); 7207 } 7208 7209 static inline void tg3_reset_task_cancel(struct tg3 *tp) 7210 { 7211 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) 7212 cancel_work_sync(&tp->reset_task); 7213 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 7214 } 7215 7216 static int tg3_poll_msix(struct napi_struct *napi, int budget) 7217 { 7218 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7219 struct tg3 *tp = tnapi->tp; 7220 int work_done = 0; 7221 struct tg3_hw_status *sblk = tnapi->hw_status; 7222 7223 while (1) { 7224 work_done = tg3_poll_work(tnapi, work_done, budget); 7225 7226 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7227 goto tx_recovery; 7228 7229 if (unlikely(work_done >= budget)) 7230 break; 7231 7232 /* tp->last_tag is used in tg3_int_reenable() below 7233 * to tell the hw how much work has been processed, 7234 * so we must read it before checking for more work. 7235 */ 7236 tnapi->last_tag = sblk->status_tag; 7237 tnapi->last_irq_tag = tnapi->last_tag; 7238 rmb(); 7239 7240 /* check for RX/TX work to do */ 7241 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && 7242 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { 7243 7244 /* This test here is not race free, but will reduce 7245 * the number of interrupts by looping again. 7246 */ 7247 if (tnapi == &tp->napi[1] && tp->rx_refill) 7248 continue; 7249 7250 napi_complete_done(napi, work_done); 7251 /* Reenable interrupts. */ 7252 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 7253 7254 /* This test here is synchronized by napi_schedule() 7255 * and napi_complete() to close the race condition. 7256 */ 7257 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) { 7258 tw32(HOSTCC_MODE, tp->coalesce_mode | 7259 HOSTCC_MODE_ENABLE | 7260 tnapi->coal_now); 7261 } 7262 break; 7263 } 7264 } 7265 7266 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); 7267 return work_done; 7268 7269 tx_recovery: 7270 /* work_done is guaranteed to be less than budget. */ 7271 napi_complete(napi); 7272 tg3_reset_task_schedule(tp); 7273 return work_done; 7274 } 7275 7276 static void tg3_process_error(struct tg3 *tp) 7277 { 7278 u32 val; 7279 bool real_error = false; 7280 7281 if (tg3_flag(tp, ERROR_PROCESSED)) 7282 return; 7283 7284 /* Check Flow Attention register */ 7285 val = tr32(HOSTCC_FLOW_ATTN); 7286 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { 7287 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); 7288 real_error = true; 7289 } 7290 7291 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { 7292 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); 7293 real_error = true; 7294 } 7295 7296 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { 7297 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); 7298 real_error = true; 7299 } 7300 7301 if (!real_error) 7302 return; 7303 7304 tg3_dump_state(tp); 7305 7306 tg3_flag_set(tp, ERROR_PROCESSED); 7307 tg3_reset_task_schedule(tp); 7308 } 7309 7310 static int tg3_poll(struct napi_struct *napi, int budget) 7311 { 7312 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7313 struct tg3 *tp = tnapi->tp; 7314 int work_done = 0; 7315 struct tg3_hw_status *sblk = tnapi->hw_status; 7316 7317 while (1) { 7318 if (sblk->status & SD_STATUS_ERROR) 7319 tg3_process_error(tp); 7320 7321 tg3_poll_link(tp); 7322 7323 work_done = tg3_poll_work(tnapi, work_done, budget); 7324 7325 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7326 goto tx_recovery; 7327 7328 if (unlikely(work_done >= budget)) 7329 break; 7330 7331 if (tg3_flag(tp, TAGGED_STATUS)) { 7332 /* tp->last_tag is used in tg3_int_reenable() below 7333 * to tell the hw how much work has been processed, 7334 * so we must read it before checking for more work. 7335 */ 7336 tnapi->last_tag = sblk->status_tag; 7337 tnapi->last_irq_tag = tnapi->last_tag; 7338 rmb(); 7339 } else 7340 sblk->status &= ~SD_STATUS_UPDATED; 7341 7342 if (likely(!tg3_has_work(tnapi))) { 7343 napi_complete_done(napi, work_done); 7344 tg3_int_reenable(tnapi); 7345 break; 7346 } 7347 } 7348 7349 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); 7350 return work_done; 7351 7352 tx_recovery: 7353 /* work_done is guaranteed to be less than budget. */ 7354 napi_complete(napi); 7355 tg3_reset_task_schedule(tp); 7356 return work_done; 7357 } 7358 7359 static void tg3_napi_disable(struct tg3 *tp) 7360 { 7361 int i; 7362 7363 for (i = tp->irq_cnt - 1; i >= 0; i--) 7364 napi_disable(&tp->napi[i].napi); 7365 } 7366 7367 static void tg3_napi_enable(struct tg3 *tp) 7368 { 7369 int i; 7370 7371 for (i = 0; i < tp->irq_cnt; i++) 7372 napi_enable(&tp->napi[i].napi); 7373 } 7374 7375 static void tg3_napi_init(struct tg3 *tp) 7376 { 7377 int i; 7378 7379 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll); 7380 for (i = 1; i < tp->irq_cnt; i++) 7381 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix); 7382 } 7383 7384 static void tg3_napi_fini(struct tg3 *tp) 7385 { 7386 int i; 7387 7388 for (i = 0; i < tp->irq_cnt; i++) 7389 netif_napi_del(&tp->napi[i].napi); 7390 } 7391 7392 static inline void tg3_netif_stop(struct tg3 *tp) 7393 { 7394 netif_trans_update(tp->dev); /* prevent tx timeout */ 7395 tg3_napi_disable(tp); 7396 netif_carrier_off(tp->dev); 7397 netif_tx_disable(tp->dev); 7398 } 7399 7400 /* tp->lock must be held */ 7401 static inline void tg3_netif_start(struct tg3 *tp) 7402 { 7403 tg3_ptp_resume(tp); 7404 7405 /* NOTE: unconditional netif_tx_wake_all_queues is only 7406 * appropriate so long as all callers are assured to 7407 * have free tx slots (such as after tg3_init_hw) 7408 */ 7409 netif_tx_wake_all_queues(tp->dev); 7410 7411 if (tp->link_up) 7412 netif_carrier_on(tp->dev); 7413 7414 tg3_napi_enable(tp); 7415 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; 7416 tg3_enable_ints(tp); 7417 } 7418 7419 static void tg3_irq_quiesce(struct tg3 *tp) 7420 __releases(tp->lock) 7421 __acquires(tp->lock) 7422 { 7423 int i; 7424 7425 BUG_ON(tp->irq_sync); 7426 7427 tp->irq_sync = 1; 7428 smp_mb(); 7429 7430 spin_unlock_bh(&tp->lock); 7431 7432 for (i = 0; i < tp->irq_cnt; i++) 7433 synchronize_irq(tp->napi[i].irq_vec); 7434 7435 spin_lock_bh(&tp->lock); 7436 } 7437 7438 /* Fully shutdown all tg3 driver activity elsewhere in the system. 7439 * If irq_sync is non-zero, then the IRQ handler must be synchronized 7440 * with as well. Most of the time, this is not necessary except when 7441 * shutting down the device. 7442 */ 7443 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) 7444 { 7445 spin_lock_bh(&tp->lock); 7446 if (irq_sync) 7447 tg3_irq_quiesce(tp); 7448 } 7449 7450 static inline void tg3_full_unlock(struct tg3 *tp) 7451 { 7452 spin_unlock_bh(&tp->lock); 7453 } 7454 7455 /* One-shot MSI handler - Chip automatically disables interrupt 7456 * after sending MSI so driver doesn't have to do it. 7457 */ 7458 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) 7459 { 7460 struct tg3_napi *tnapi = dev_id; 7461 struct tg3 *tp = tnapi->tp; 7462 7463 prefetch(tnapi->hw_status); 7464 if (tnapi->rx_rcb) 7465 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7466 7467 if (likely(!tg3_irq_sync(tp))) 7468 napi_schedule(&tnapi->napi); 7469 7470 return IRQ_HANDLED; 7471 } 7472 7473 /* MSI ISR - No need to check for interrupt sharing and no need to 7474 * flush status block and interrupt mailbox. PCI ordering rules 7475 * guarantee that MSI will arrive after the status block. 7476 */ 7477 static irqreturn_t tg3_msi(int irq, void *dev_id) 7478 { 7479 struct tg3_napi *tnapi = dev_id; 7480 struct tg3 *tp = tnapi->tp; 7481 7482 prefetch(tnapi->hw_status); 7483 if (tnapi->rx_rcb) 7484 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7485 /* 7486 * Writing any value to intr-mbox-0 clears PCI INTA# and 7487 * chip-internal interrupt pending events. 7488 * Writing non-zero to intr-mbox-0 additional tells the 7489 * NIC to stop sending us irqs, engaging "in-intr-handler" 7490 * event coalescing. 7491 */ 7492 tw32_mailbox(tnapi->int_mbox, 0x00000001); 7493 if (likely(!tg3_irq_sync(tp))) 7494 napi_schedule(&tnapi->napi); 7495 7496 return IRQ_RETVAL(1); 7497 } 7498 7499 static irqreturn_t tg3_interrupt(int irq, void *dev_id) 7500 { 7501 struct tg3_napi *tnapi = dev_id; 7502 struct tg3 *tp = tnapi->tp; 7503 struct tg3_hw_status *sblk = tnapi->hw_status; 7504 unsigned int handled = 1; 7505 7506 /* In INTx mode, it is possible for the interrupt to arrive at 7507 * the CPU before the status block posted prior to the interrupt. 7508 * Reading the PCI State register will confirm whether the 7509 * interrupt is ours and will flush the status block. 7510 */ 7511 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { 7512 if (tg3_flag(tp, CHIP_RESETTING) || 7513 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7514 handled = 0; 7515 goto out; 7516 } 7517 } 7518 7519 /* 7520 * Writing any value to intr-mbox-0 clears PCI INTA# and 7521 * chip-internal interrupt pending events. 7522 * Writing non-zero to intr-mbox-0 additional tells the 7523 * NIC to stop sending us irqs, engaging "in-intr-handler" 7524 * event coalescing. 7525 * 7526 * Flush the mailbox to de-assert the IRQ immediately to prevent 7527 * spurious interrupts. The flush impacts performance but 7528 * excessive spurious interrupts can be worse in some cases. 7529 */ 7530 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7531 if (tg3_irq_sync(tp)) 7532 goto out; 7533 sblk->status &= ~SD_STATUS_UPDATED; 7534 if (likely(tg3_has_work(tnapi))) { 7535 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7536 napi_schedule(&tnapi->napi); 7537 } else { 7538 /* No work, shared interrupt perhaps? re-enable 7539 * interrupts, and flush that PCI write 7540 */ 7541 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 7542 0x00000000); 7543 } 7544 out: 7545 return IRQ_RETVAL(handled); 7546 } 7547 7548 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) 7549 { 7550 struct tg3_napi *tnapi = dev_id; 7551 struct tg3 *tp = tnapi->tp; 7552 struct tg3_hw_status *sblk = tnapi->hw_status; 7553 unsigned int handled = 1; 7554 7555 /* In INTx mode, it is possible for the interrupt to arrive at 7556 * the CPU before the status block posted prior to the interrupt. 7557 * Reading the PCI State register will confirm whether the 7558 * interrupt is ours and will flush the status block. 7559 */ 7560 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { 7561 if (tg3_flag(tp, CHIP_RESETTING) || 7562 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7563 handled = 0; 7564 goto out; 7565 } 7566 } 7567 7568 /* 7569 * writing any value to intr-mbox-0 clears PCI INTA# and 7570 * chip-internal interrupt pending events. 7571 * writing non-zero to intr-mbox-0 additional tells the 7572 * NIC to stop sending us irqs, engaging "in-intr-handler" 7573 * event coalescing. 7574 * 7575 * Flush the mailbox to de-assert the IRQ immediately to prevent 7576 * spurious interrupts. The flush impacts performance but 7577 * excessive spurious interrupts can be worse in some cases. 7578 */ 7579 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7580 7581 /* 7582 * In a shared interrupt configuration, sometimes other devices' 7583 * interrupts will scream. We record the current status tag here 7584 * so that the above check can report that the screaming interrupts 7585 * are unhandled. Eventually they will be silenced. 7586 */ 7587 tnapi->last_irq_tag = sblk->status_tag; 7588 7589 if (tg3_irq_sync(tp)) 7590 goto out; 7591 7592 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7593 7594 napi_schedule(&tnapi->napi); 7595 7596 out: 7597 return IRQ_RETVAL(handled); 7598 } 7599 7600 /* ISR for interrupt test */ 7601 static irqreturn_t tg3_test_isr(int irq, void *dev_id) 7602 { 7603 struct tg3_napi *tnapi = dev_id; 7604 struct tg3 *tp = tnapi->tp; 7605 struct tg3_hw_status *sblk = tnapi->hw_status; 7606 7607 if ((sblk->status & SD_STATUS_UPDATED) || 7608 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7609 tg3_disable_ints(tp); 7610 return IRQ_RETVAL(1); 7611 } 7612 return IRQ_RETVAL(0); 7613 } 7614 7615 #ifdef CONFIG_NET_POLL_CONTROLLER 7616 static void tg3_poll_controller(struct net_device *dev) 7617 { 7618 int i; 7619 struct tg3 *tp = netdev_priv(dev); 7620 7621 if (tg3_irq_sync(tp)) 7622 return; 7623 7624 for (i = 0; i < tp->irq_cnt; i++) 7625 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); 7626 } 7627 #endif 7628 7629 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue) 7630 { 7631 struct tg3 *tp = netdev_priv(dev); 7632 7633 if (netif_msg_tx_err(tp)) { 7634 netdev_err(dev, "transmit timed out, resetting\n"); 7635 tg3_dump_state(tp); 7636 } 7637 7638 tg3_reset_task_schedule(tp); 7639 } 7640 7641 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ 7642 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) 7643 { 7644 u32 base = (u32) mapping & 0xffffffff; 7645 7646 return base + len + 8 < base; 7647 } 7648 7649 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes 7650 * of any 4GB boundaries: 4G, 8G, etc 7651 */ 7652 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7653 u32 len, u32 mss) 7654 { 7655 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) { 7656 u32 base = (u32) mapping & 0xffffffff; 7657 7658 return ((base + len + (mss & 0x3fff)) < base); 7659 } 7660 return 0; 7661 } 7662 7663 /* Test for DMA addresses > 40-bit */ 7664 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7665 int len) 7666 { 7667 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) 7668 if (tg3_flag(tp, 40BIT_DMA_BUG)) 7669 return ((u64) mapping + len) > DMA_BIT_MASK(40); 7670 return 0; 7671 #else 7672 return 0; 7673 #endif 7674 } 7675 7676 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, 7677 dma_addr_t mapping, u32 len, u32 flags, 7678 u32 mss, u32 vlan) 7679 { 7680 txbd->addr_hi = ((u64) mapping >> 32); 7681 txbd->addr_lo = ((u64) mapping & 0xffffffff); 7682 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); 7683 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); 7684 } 7685 7686 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, 7687 dma_addr_t map, u32 len, u32 flags, 7688 u32 mss, u32 vlan) 7689 { 7690 struct tg3 *tp = tnapi->tp; 7691 bool hwbug = false; 7692 7693 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) 7694 hwbug = true; 7695 7696 if (tg3_4g_overflow_test(map, len)) 7697 hwbug = true; 7698 7699 if (tg3_4g_tso_overflow_test(tp, map, len, mss)) 7700 hwbug = true; 7701 7702 if (tg3_40bit_overflow_test(tp, map, len)) 7703 hwbug = true; 7704 7705 if (tp->dma_limit) { 7706 u32 prvidx = *entry; 7707 u32 tmp_flag = flags & ~TXD_FLAG_END; 7708 while (len > tp->dma_limit && *budget) { 7709 u32 frag_len = tp->dma_limit; 7710 len -= tp->dma_limit; 7711 7712 /* Avoid the 8byte DMA problem */ 7713 if (len <= 8) { 7714 len += tp->dma_limit / 2; 7715 frag_len = tp->dma_limit / 2; 7716 } 7717 7718 tnapi->tx_buffers[*entry].fragmented = true; 7719 7720 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7721 frag_len, tmp_flag, mss, vlan); 7722 *budget -= 1; 7723 prvidx = *entry; 7724 *entry = NEXT_TX(*entry); 7725 7726 map += frag_len; 7727 } 7728 7729 if (len) { 7730 if (*budget) { 7731 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7732 len, flags, mss, vlan); 7733 *budget -= 1; 7734 *entry = NEXT_TX(*entry); 7735 } else { 7736 hwbug = true; 7737 tnapi->tx_buffers[prvidx].fragmented = false; 7738 } 7739 } 7740 } else { 7741 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7742 len, flags, mss, vlan); 7743 *entry = NEXT_TX(*entry); 7744 } 7745 7746 return hwbug; 7747 } 7748 7749 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) 7750 { 7751 int i; 7752 struct sk_buff *skb; 7753 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; 7754 7755 skb = txb->skb; 7756 txb->skb = NULL; 7757 7758 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping), 7759 skb_headlen(skb), DMA_TO_DEVICE); 7760 7761 while (txb->fragmented) { 7762 txb->fragmented = false; 7763 entry = NEXT_TX(entry); 7764 txb = &tnapi->tx_buffers[entry]; 7765 } 7766 7767 for (i = 0; i <= last; i++) { 7768 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 7769 7770 entry = NEXT_TX(entry); 7771 txb = &tnapi->tx_buffers[entry]; 7772 7773 dma_unmap_page(&tnapi->tp->pdev->dev, 7774 dma_unmap_addr(txb, mapping), 7775 skb_frag_size(frag), DMA_TO_DEVICE); 7776 7777 while (txb->fragmented) { 7778 txb->fragmented = false; 7779 entry = NEXT_TX(entry); 7780 txb = &tnapi->tx_buffers[entry]; 7781 } 7782 } 7783 } 7784 7785 /* Workaround 4GB and 40-bit hardware DMA bugs. */ 7786 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, 7787 struct sk_buff **pskb, 7788 u32 *entry, u32 *budget, 7789 u32 base_flags, u32 mss, u32 vlan) 7790 { 7791 struct tg3 *tp = tnapi->tp; 7792 struct sk_buff *new_skb, *skb = *pskb; 7793 dma_addr_t new_addr = 0; 7794 int ret = 0; 7795 7796 if (tg3_asic_rev(tp) != ASIC_REV_5701) 7797 new_skb = skb_copy(skb, GFP_ATOMIC); 7798 else { 7799 int more_headroom = 4 - ((unsigned long)skb->data & 3); 7800 7801 new_skb = skb_copy_expand(skb, 7802 skb_headroom(skb) + more_headroom, 7803 skb_tailroom(skb), GFP_ATOMIC); 7804 } 7805 7806 if (!new_skb) { 7807 ret = -1; 7808 } else { 7809 /* New SKB is guaranteed to be linear. */ 7810 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data, 7811 new_skb->len, DMA_TO_DEVICE); 7812 /* Make sure the mapping succeeded */ 7813 if (dma_mapping_error(&tp->pdev->dev, new_addr)) { 7814 dev_kfree_skb_any(new_skb); 7815 ret = -1; 7816 } else { 7817 u32 save_entry = *entry; 7818 7819 base_flags |= TXD_FLAG_END; 7820 7821 tnapi->tx_buffers[*entry].skb = new_skb; 7822 dma_unmap_addr_set(&tnapi->tx_buffers[*entry], 7823 mapping, new_addr); 7824 7825 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, 7826 new_skb->len, base_flags, 7827 mss, vlan)) { 7828 tg3_tx_skb_unmap(tnapi, save_entry, -1); 7829 dev_kfree_skb_any(new_skb); 7830 ret = -1; 7831 } 7832 } 7833 } 7834 7835 dev_consume_skb_any(skb); 7836 *pskb = new_skb; 7837 return ret; 7838 } 7839 7840 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb) 7841 { 7842 /* Check if we will never have enough descriptors, 7843 * as gso_segs can be more than current ring size 7844 */ 7845 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; 7846 } 7847 7848 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); 7849 7850 /* Use GSO to workaround all TSO packets that meet HW bug conditions 7851 * indicated in tg3_tx_frag_set() 7852 */ 7853 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, 7854 struct netdev_queue *txq, struct sk_buff *skb) 7855 { 7856 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; 7857 struct sk_buff *segs, *seg, *next; 7858 7859 /* Estimate the number of fragments in the worst case */ 7860 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) { 7861 netif_tx_stop_queue(txq); 7862 7863 /* netif_tx_stop_queue() must be done before checking 7864 * checking tx index in tg3_tx_avail() below, because in 7865 * tg3_tx(), we update tx index before checking for 7866 * netif_tx_queue_stopped(). 7867 */ 7868 smp_mb(); 7869 if (tg3_tx_avail(tnapi) <= frag_cnt_est) 7870 return NETDEV_TX_BUSY; 7871 7872 netif_tx_wake_queue(txq); 7873 } 7874 7875 segs = skb_gso_segment(skb, tp->dev->features & 7876 ~(NETIF_F_TSO | NETIF_F_TSO6)); 7877 if (IS_ERR(segs) || !segs) { 7878 tnapi->tx_dropped++; 7879 goto tg3_tso_bug_end; 7880 } 7881 7882 skb_list_walk_safe(segs, seg, next) { 7883 skb_mark_not_on_list(seg); 7884 tg3_start_xmit(seg, tp->dev); 7885 } 7886 7887 tg3_tso_bug_end: 7888 dev_consume_skb_any(skb); 7889 7890 return NETDEV_TX_OK; 7891 } 7892 7893 /* hard_start_xmit for all devices */ 7894 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 7895 { 7896 struct tg3 *tp = netdev_priv(dev); 7897 u32 len, entry, base_flags, mss, vlan = 0; 7898 u32 budget; 7899 int i = -1, would_hit_hwbug; 7900 dma_addr_t mapping; 7901 struct tg3_napi *tnapi; 7902 struct netdev_queue *txq; 7903 unsigned int last; 7904 struct iphdr *iph = NULL; 7905 struct tcphdr *tcph = NULL; 7906 __sum16 tcp_csum = 0, ip_csum = 0; 7907 __be16 ip_tot_len = 0; 7908 7909 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 7910 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 7911 if (tg3_flag(tp, ENABLE_TSS)) 7912 tnapi++; 7913 7914 budget = tg3_tx_avail(tnapi); 7915 7916 /* We are running in BH disabled context with netif_tx_lock 7917 * and TX reclaim runs via tp->napi.poll inside of a software 7918 * interrupt. Furthermore, IRQ processing runs lockless so we have 7919 * no IRQ context deadlocks to worry about either. Rejoice! 7920 */ 7921 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { 7922 if (!netif_tx_queue_stopped(txq)) { 7923 netif_tx_stop_queue(txq); 7924 7925 /* This is a hard error, log it. */ 7926 netdev_err(dev, 7927 "BUG! Tx Ring full when queue awake!\n"); 7928 } 7929 return NETDEV_TX_BUSY; 7930 } 7931 7932 entry = tnapi->tx_prod; 7933 base_flags = 0; 7934 7935 mss = skb_shinfo(skb)->gso_size; 7936 if (mss) { 7937 u32 tcp_opt_len, hdr_len; 7938 7939 if (skb_cow_head(skb, 0)) 7940 goto drop; 7941 7942 iph = ip_hdr(skb); 7943 tcp_opt_len = tcp_optlen(skb); 7944 7945 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN; 7946 7947 /* HW/FW can not correctly segment packets that have been 7948 * vlan encapsulated. 7949 */ 7950 if (skb->protocol == htons(ETH_P_8021Q) || 7951 skb->protocol == htons(ETH_P_8021AD)) { 7952 if (tg3_tso_bug_gso_check(tnapi, skb)) 7953 return tg3_tso_bug(tp, tnapi, txq, skb); 7954 goto drop; 7955 } 7956 7957 if (!skb_is_gso_v6(skb)) { 7958 if (unlikely((ETH_HLEN + hdr_len) > 80) && 7959 tg3_flag(tp, TSO_BUG)) { 7960 if (tg3_tso_bug_gso_check(tnapi, skb)) 7961 return tg3_tso_bug(tp, tnapi, txq, skb); 7962 goto drop; 7963 } 7964 ip_csum = iph->check; 7965 ip_tot_len = iph->tot_len; 7966 iph->check = 0; 7967 iph->tot_len = htons(mss + hdr_len); 7968 } 7969 7970 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 7971 TXD_FLAG_CPU_POST_DMA); 7972 7973 tcph = tcp_hdr(skb); 7974 tcp_csum = tcph->check; 7975 7976 if (tg3_flag(tp, HW_TSO_1) || 7977 tg3_flag(tp, HW_TSO_2) || 7978 tg3_flag(tp, HW_TSO_3)) { 7979 tcph->check = 0; 7980 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 7981 } else { 7982 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 7983 0, IPPROTO_TCP, 0); 7984 } 7985 7986 if (tg3_flag(tp, HW_TSO_3)) { 7987 mss |= (hdr_len & 0xc) << 12; 7988 if (hdr_len & 0x10) 7989 base_flags |= 0x00000010; 7990 base_flags |= (hdr_len & 0x3e0) << 5; 7991 } else if (tg3_flag(tp, HW_TSO_2)) 7992 mss |= hdr_len << 9; 7993 else if (tg3_flag(tp, HW_TSO_1) || 7994 tg3_asic_rev(tp) == ASIC_REV_5705) { 7995 if (tcp_opt_len || iph->ihl > 5) { 7996 int tsflags; 7997 7998 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 7999 mss |= (tsflags << 11); 8000 } 8001 } else { 8002 if (tcp_opt_len || iph->ihl > 5) { 8003 int tsflags; 8004 8005 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 8006 base_flags |= tsflags << 12; 8007 } 8008 } 8009 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 8010 /* HW/FW can not correctly checksum packets that have been 8011 * vlan encapsulated. 8012 */ 8013 if (skb->protocol == htons(ETH_P_8021Q) || 8014 skb->protocol == htons(ETH_P_8021AD)) { 8015 if (skb_checksum_help(skb)) 8016 goto drop; 8017 } else { 8018 base_flags |= TXD_FLAG_TCPUDP_CSUM; 8019 } 8020 } 8021 8022 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 8023 !mss && skb->len > VLAN_ETH_FRAME_LEN) 8024 base_flags |= TXD_FLAG_JMB_PKT; 8025 8026 if (skb_vlan_tag_present(skb)) { 8027 base_flags |= TXD_FLAG_VLAN; 8028 vlan = skb_vlan_tag_get(skb); 8029 } 8030 8031 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && 8032 tg3_flag(tp, TX_TSTAMP_EN)) { 8033 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 8034 base_flags |= TXD_FLAG_HWTSTAMP; 8035 } 8036 8037 len = skb_headlen(skb); 8038 8039 mapping = dma_map_single(&tp->pdev->dev, skb->data, len, 8040 DMA_TO_DEVICE); 8041 if (dma_mapping_error(&tp->pdev->dev, mapping)) 8042 goto drop; 8043 8044 8045 tnapi->tx_buffers[entry].skb = skb; 8046 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 8047 8048 would_hit_hwbug = 0; 8049 8050 if (tg3_flag(tp, 5701_DMA_BUG)) 8051 would_hit_hwbug = 1; 8052 8053 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | 8054 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), 8055 mss, vlan)) { 8056 would_hit_hwbug = 1; 8057 } else if (skb_shinfo(skb)->nr_frags > 0) { 8058 u32 tmp_mss = mss; 8059 8060 if (!tg3_flag(tp, HW_TSO_1) && 8061 !tg3_flag(tp, HW_TSO_2) && 8062 !tg3_flag(tp, HW_TSO_3)) 8063 tmp_mss = 0; 8064 8065 /* Now loop through additional data 8066 * fragments, and queue them. 8067 */ 8068 last = skb_shinfo(skb)->nr_frags - 1; 8069 for (i = 0; i <= last; i++) { 8070 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 8071 8072 len = skb_frag_size(frag); 8073 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, 8074 len, DMA_TO_DEVICE); 8075 8076 tnapi->tx_buffers[entry].skb = NULL; 8077 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 8078 mapping); 8079 if (dma_mapping_error(&tp->pdev->dev, mapping)) 8080 goto dma_error; 8081 8082 if (!budget || 8083 tg3_tx_frag_set(tnapi, &entry, &budget, mapping, 8084 len, base_flags | 8085 ((i == last) ? TXD_FLAG_END : 0), 8086 tmp_mss, vlan)) { 8087 would_hit_hwbug = 1; 8088 break; 8089 } 8090 } 8091 } 8092 8093 if (would_hit_hwbug) { 8094 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 8095 8096 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) { 8097 /* If it's a TSO packet, do GSO instead of 8098 * allocating and copying to a large linear SKB 8099 */ 8100 if (ip_tot_len) { 8101 iph->check = ip_csum; 8102 iph->tot_len = ip_tot_len; 8103 } 8104 tcph->check = tcp_csum; 8105 return tg3_tso_bug(tp, tnapi, txq, skb); 8106 } 8107 8108 /* If the workaround fails due to memory/mapping 8109 * failure, silently drop this packet. 8110 */ 8111 entry = tnapi->tx_prod; 8112 budget = tg3_tx_avail(tnapi); 8113 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, 8114 base_flags, mss, vlan)) 8115 goto drop_nofree; 8116 } 8117 8118 skb_tx_timestamp(skb); 8119 netdev_tx_sent_queue(txq, skb->len); 8120 8121 /* Sync BD data before updating mailbox */ 8122 wmb(); 8123 8124 tnapi->tx_prod = entry; 8125 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 8126 netif_tx_stop_queue(txq); 8127 8128 /* netif_tx_stop_queue() must be done before checking 8129 * checking tx index in tg3_tx_avail() below, because in 8130 * tg3_tx(), we update tx index before checking for 8131 * netif_tx_queue_stopped(). 8132 */ 8133 smp_mb(); 8134 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 8135 netif_tx_wake_queue(txq); 8136 } 8137 8138 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { 8139 /* Packets are ready, update Tx producer idx on card. */ 8140 tw32_tx_mbox(tnapi->prodmbox, entry); 8141 } 8142 8143 return NETDEV_TX_OK; 8144 8145 dma_error: 8146 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); 8147 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; 8148 drop: 8149 dev_kfree_skb_any(skb); 8150 drop_nofree: 8151 tnapi->tx_dropped++; 8152 return NETDEV_TX_OK; 8153 } 8154 8155 static void tg3_mac_loopback(struct tg3 *tp, bool enable) 8156 { 8157 if (enable) { 8158 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | 8159 MAC_MODE_PORT_MODE_MASK); 8160 8161 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; 8162 8163 if (!tg3_flag(tp, 5705_PLUS)) 8164 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 8165 8166 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 8167 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 8168 else 8169 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 8170 } else { 8171 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; 8172 8173 if (tg3_flag(tp, 5705_PLUS) || 8174 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || 8175 tg3_asic_rev(tp) == ASIC_REV_5700) 8176 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 8177 } 8178 8179 tw32(MAC_MODE, tp->mac_mode); 8180 udelay(40); 8181 } 8182 8183 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) 8184 { 8185 u32 val, bmcr, mac_mode, ptest = 0; 8186 8187 tg3_phy_toggle_apd(tp, false); 8188 tg3_phy_toggle_automdix(tp, false); 8189 8190 if (extlpbk && tg3_phy_set_extloopbk(tp)) 8191 return -EIO; 8192 8193 bmcr = BMCR_FULLDPLX; 8194 switch (speed) { 8195 case SPEED_10: 8196 break; 8197 case SPEED_100: 8198 bmcr |= BMCR_SPEED100; 8199 break; 8200 case SPEED_1000: 8201 default: 8202 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 8203 speed = SPEED_100; 8204 bmcr |= BMCR_SPEED100; 8205 } else { 8206 speed = SPEED_1000; 8207 bmcr |= BMCR_SPEED1000; 8208 } 8209 } 8210 8211 if (extlpbk) { 8212 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 8213 tg3_readphy(tp, MII_CTRL1000, &val); 8214 val |= CTL1000_AS_MASTER | 8215 CTL1000_ENABLE_MASTER; 8216 tg3_writephy(tp, MII_CTRL1000, val); 8217 } else { 8218 ptest = MII_TG3_FET_PTEST_TRIM_SEL | 8219 MII_TG3_FET_PTEST_TRIM_2; 8220 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); 8221 } 8222 } else 8223 bmcr |= BMCR_LOOPBACK; 8224 8225 tg3_writephy(tp, MII_BMCR, bmcr); 8226 8227 /* The write needs to be flushed for the FETs */ 8228 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 8229 tg3_readphy(tp, MII_BMCR, &bmcr); 8230 8231 udelay(40); 8232 8233 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 8234 tg3_asic_rev(tp) == ASIC_REV_5785) { 8235 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | 8236 MII_TG3_FET_PTEST_FRC_TX_LINK | 8237 MII_TG3_FET_PTEST_FRC_TX_LOCK); 8238 8239 /* The write needs to be flushed for the AC131 */ 8240 tg3_readphy(tp, MII_TG3_FET_PTEST, &val); 8241 } 8242 8243 /* Reset to prevent losing 1st rx packet intermittently */ 8244 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 8245 tg3_flag(tp, 5780_CLASS)) { 8246 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 8247 udelay(10); 8248 tw32_f(MAC_RX_MODE, tp->rx_mode); 8249 } 8250 8251 mac_mode = tp->mac_mode & 8252 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 8253 if (speed == SPEED_1000) 8254 mac_mode |= MAC_MODE_PORT_MODE_GMII; 8255 else 8256 mac_mode |= MAC_MODE_PORT_MODE_MII; 8257 8258 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 8259 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; 8260 8261 if (masked_phy_id == TG3_PHY_ID_BCM5401) 8262 mac_mode &= ~MAC_MODE_LINK_POLARITY; 8263 else if (masked_phy_id == TG3_PHY_ID_BCM5411) 8264 mac_mode |= MAC_MODE_LINK_POLARITY; 8265 8266 tg3_writephy(tp, MII_TG3_EXT_CTRL, 8267 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 8268 } 8269 8270 tw32(MAC_MODE, mac_mode); 8271 udelay(40); 8272 8273 return 0; 8274 } 8275 8276 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) 8277 { 8278 struct tg3 *tp = netdev_priv(dev); 8279 8280 if (features & NETIF_F_LOOPBACK) { 8281 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) 8282 return; 8283 8284 spin_lock_bh(&tp->lock); 8285 tg3_mac_loopback(tp, true); 8286 netif_carrier_on(tp->dev); 8287 spin_unlock_bh(&tp->lock); 8288 netdev_info(dev, "Internal MAC loopback mode enabled.\n"); 8289 } else { 8290 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 8291 return; 8292 8293 spin_lock_bh(&tp->lock); 8294 tg3_mac_loopback(tp, false); 8295 /* Force link status check */ 8296 tg3_setup_phy(tp, true); 8297 spin_unlock_bh(&tp->lock); 8298 netdev_info(dev, "Internal MAC loopback mode disabled.\n"); 8299 } 8300 } 8301 8302 static netdev_features_t tg3_fix_features(struct net_device *dev, 8303 netdev_features_t features) 8304 { 8305 struct tg3 *tp = netdev_priv(dev); 8306 8307 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) 8308 features &= ~NETIF_F_ALL_TSO; 8309 8310 return features; 8311 } 8312 8313 static int tg3_set_features(struct net_device *dev, netdev_features_t features) 8314 { 8315 netdev_features_t changed = dev->features ^ features; 8316 8317 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) 8318 tg3_set_loopback(dev, features); 8319 8320 return 0; 8321 } 8322 8323 static void tg3_rx_prodring_free(struct tg3 *tp, 8324 struct tg3_rx_prodring_set *tpr) 8325 { 8326 int i; 8327 8328 if (tpr != &tp->napi[0].prodring) { 8329 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; 8330 i = (i + 1) & tp->rx_std_ring_mask) 8331 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8332 tp->rx_pkt_map_sz); 8333 8334 if (tg3_flag(tp, JUMBO_CAPABLE)) { 8335 for (i = tpr->rx_jmb_cons_idx; 8336 i != tpr->rx_jmb_prod_idx; 8337 i = (i + 1) & tp->rx_jmb_ring_mask) { 8338 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8339 TG3_RX_JMB_MAP_SZ); 8340 } 8341 } 8342 8343 return; 8344 } 8345 8346 for (i = 0; i <= tp->rx_std_ring_mask; i++) 8347 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8348 tp->rx_pkt_map_sz); 8349 8350 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8351 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) 8352 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8353 TG3_RX_JMB_MAP_SZ); 8354 } 8355 } 8356 8357 /* Initialize rx rings for packet processing. 8358 * 8359 * The chip has been shut down and the driver detached from 8360 * the networking, so no interrupts or new tx packets will 8361 * end up in the driver. tp->{tx,}lock are held and thus 8362 * we may not sleep. 8363 */ 8364 static int tg3_rx_prodring_alloc(struct tg3 *tp, 8365 struct tg3_rx_prodring_set *tpr) 8366 { 8367 u32 i, rx_pkt_dma_sz; 8368 8369 tpr->rx_std_cons_idx = 0; 8370 tpr->rx_std_prod_idx = 0; 8371 tpr->rx_jmb_cons_idx = 0; 8372 tpr->rx_jmb_prod_idx = 0; 8373 8374 if (tpr != &tp->napi[0].prodring) { 8375 memset(&tpr->rx_std_buffers[0], 0, 8376 TG3_RX_STD_BUFF_RING_SIZE(tp)); 8377 if (tpr->rx_jmb_buffers) 8378 memset(&tpr->rx_jmb_buffers[0], 0, 8379 TG3_RX_JMB_BUFF_RING_SIZE(tp)); 8380 goto done; 8381 } 8382 8383 /* Zero out all descriptors. */ 8384 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); 8385 8386 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; 8387 if (tg3_flag(tp, 5780_CLASS) && 8388 tp->dev->mtu > ETH_DATA_LEN) 8389 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; 8390 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); 8391 8392 /* Initialize invariants of the rings, we only set this 8393 * stuff once. This works because the card does not 8394 * write into the rx buffer posting rings. 8395 */ 8396 for (i = 0; i <= tp->rx_std_ring_mask; i++) { 8397 struct tg3_rx_buffer_desc *rxd; 8398 8399 rxd = &tpr->rx_std[i]; 8400 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; 8401 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); 8402 rxd->opaque = (RXD_OPAQUE_RING_STD | 8403 (i << RXD_OPAQUE_INDEX_SHIFT)); 8404 } 8405 8406 /* Now allocate fresh SKBs for each rx ring. */ 8407 for (i = 0; i < tp->rx_pending; i++) { 8408 unsigned int frag_size; 8409 8410 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i, 8411 &frag_size) < 0) { 8412 netdev_warn(tp->dev, 8413 "Using a smaller RX standard ring. Only " 8414 "%d out of %d buffers were allocated " 8415 "successfully\n", i, tp->rx_pending); 8416 if (i == 0) 8417 goto initfail; 8418 tp->rx_pending = i; 8419 break; 8420 } 8421 } 8422 8423 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 8424 goto done; 8425 8426 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); 8427 8428 if (!tg3_flag(tp, JUMBO_RING_ENABLE)) 8429 goto done; 8430 8431 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { 8432 struct tg3_rx_buffer_desc *rxd; 8433 8434 rxd = &tpr->rx_jmb[i].std; 8435 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; 8436 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | 8437 RXD_FLAG_JUMBO; 8438 rxd->opaque = (RXD_OPAQUE_RING_JUMBO | 8439 (i << RXD_OPAQUE_INDEX_SHIFT)); 8440 } 8441 8442 for (i = 0; i < tp->rx_jumbo_pending; i++) { 8443 unsigned int frag_size; 8444 8445 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i, 8446 &frag_size) < 0) { 8447 netdev_warn(tp->dev, 8448 "Using a smaller RX jumbo ring. Only %d " 8449 "out of %d buffers were allocated " 8450 "successfully\n", i, tp->rx_jumbo_pending); 8451 if (i == 0) 8452 goto initfail; 8453 tp->rx_jumbo_pending = i; 8454 break; 8455 } 8456 } 8457 8458 done: 8459 return 0; 8460 8461 initfail: 8462 tg3_rx_prodring_free(tp, tpr); 8463 return -ENOMEM; 8464 } 8465 8466 static void tg3_rx_prodring_fini(struct tg3 *tp, 8467 struct tg3_rx_prodring_set *tpr) 8468 { 8469 kfree(tpr->rx_std_buffers); 8470 tpr->rx_std_buffers = NULL; 8471 kfree(tpr->rx_jmb_buffers); 8472 tpr->rx_jmb_buffers = NULL; 8473 if (tpr->rx_std) { 8474 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), 8475 tpr->rx_std, tpr->rx_std_mapping); 8476 tpr->rx_std = NULL; 8477 } 8478 if (tpr->rx_jmb) { 8479 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), 8480 tpr->rx_jmb, tpr->rx_jmb_mapping); 8481 tpr->rx_jmb = NULL; 8482 } 8483 } 8484 8485 static int tg3_rx_prodring_init(struct tg3 *tp, 8486 struct tg3_rx_prodring_set *tpr) 8487 { 8488 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), 8489 GFP_KERNEL); 8490 if (!tpr->rx_std_buffers) 8491 return -ENOMEM; 8492 8493 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, 8494 TG3_RX_STD_RING_BYTES(tp), 8495 &tpr->rx_std_mapping, 8496 GFP_KERNEL); 8497 if (!tpr->rx_std) 8498 goto err_out; 8499 8500 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8501 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), 8502 GFP_KERNEL); 8503 if (!tpr->rx_jmb_buffers) 8504 goto err_out; 8505 8506 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, 8507 TG3_RX_JMB_RING_BYTES(tp), 8508 &tpr->rx_jmb_mapping, 8509 GFP_KERNEL); 8510 if (!tpr->rx_jmb) 8511 goto err_out; 8512 } 8513 8514 return 0; 8515 8516 err_out: 8517 tg3_rx_prodring_fini(tp, tpr); 8518 return -ENOMEM; 8519 } 8520 8521 /* Free up pending packets in all rx/tx rings. 8522 * 8523 * The chip has been shut down and the driver detached from 8524 * the networking, so no interrupts or new tx packets will 8525 * end up in the driver. tp->{tx,}lock is not held and we are not 8526 * in an interrupt context and thus may sleep. 8527 */ 8528 static void tg3_free_rings(struct tg3 *tp) 8529 { 8530 int i, j; 8531 8532 for (j = 0; j < tp->irq_cnt; j++) { 8533 struct tg3_napi *tnapi = &tp->napi[j]; 8534 8535 tg3_rx_prodring_free(tp, &tnapi->prodring); 8536 8537 if (!tnapi->tx_buffers) 8538 continue; 8539 8540 for (i = 0; i < TG3_TX_RING_SIZE; i++) { 8541 struct sk_buff *skb = tnapi->tx_buffers[i].skb; 8542 8543 if (!skb) 8544 continue; 8545 8546 tg3_tx_skb_unmap(tnapi, i, 8547 skb_shinfo(skb)->nr_frags - 1); 8548 8549 dev_consume_skb_any(skb); 8550 } 8551 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j)); 8552 } 8553 } 8554 8555 /* Initialize tx/rx rings for packet processing. 8556 * 8557 * The chip has been shut down and the driver detached from 8558 * the networking, so no interrupts or new tx packets will 8559 * end up in the driver. tp->{tx,}lock are held and thus 8560 * we may not sleep. 8561 */ 8562 static int tg3_init_rings(struct tg3 *tp) 8563 { 8564 int i; 8565 8566 /* Free up all the SKBs. */ 8567 tg3_free_rings(tp); 8568 8569 for (i = 0; i < tp->irq_cnt; i++) { 8570 struct tg3_napi *tnapi = &tp->napi[i]; 8571 8572 tnapi->last_tag = 0; 8573 tnapi->last_irq_tag = 0; 8574 tnapi->hw_status->status = 0; 8575 tnapi->hw_status->status_tag = 0; 8576 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8577 8578 tnapi->tx_prod = 0; 8579 tnapi->tx_cons = 0; 8580 if (tnapi->tx_ring) 8581 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); 8582 8583 tnapi->rx_rcb_ptr = 0; 8584 if (tnapi->rx_rcb) 8585 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 8586 8587 if (tnapi->prodring.rx_std && 8588 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { 8589 tg3_free_rings(tp); 8590 return -ENOMEM; 8591 } 8592 } 8593 8594 return 0; 8595 } 8596 8597 static void tg3_mem_tx_release(struct tg3 *tp) 8598 { 8599 int i; 8600 8601 for (i = 0; i < tp->irq_max; i++) { 8602 struct tg3_napi *tnapi = &tp->napi[i]; 8603 8604 if (tnapi->tx_ring) { 8605 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, 8606 tnapi->tx_ring, tnapi->tx_desc_mapping); 8607 tnapi->tx_ring = NULL; 8608 } 8609 8610 kfree(tnapi->tx_buffers); 8611 tnapi->tx_buffers = NULL; 8612 } 8613 } 8614 8615 static int tg3_mem_tx_acquire(struct tg3 *tp) 8616 { 8617 int i; 8618 struct tg3_napi *tnapi = &tp->napi[0]; 8619 8620 /* If multivector TSS is enabled, vector 0 does not handle 8621 * tx interrupts. Don't allocate any resources for it. 8622 */ 8623 if (tg3_flag(tp, ENABLE_TSS)) 8624 tnapi++; 8625 8626 for (i = 0; i < tp->txq_cnt; i++, tnapi++) { 8627 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE, 8628 sizeof(struct tg3_tx_ring_info), 8629 GFP_KERNEL); 8630 if (!tnapi->tx_buffers) 8631 goto err_out; 8632 8633 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, 8634 TG3_TX_RING_BYTES, 8635 &tnapi->tx_desc_mapping, 8636 GFP_KERNEL); 8637 if (!tnapi->tx_ring) 8638 goto err_out; 8639 } 8640 8641 return 0; 8642 8643 err_out: 8644 tg3_mem_tx_release(tp); 8645 return -ENOMEM; 8646 } 8647 8648 static void tg3_mem_rx_release(struct tg3 *tp) 8649 { 8650 int i; 8651 8652 for (i = 0; i < tp->irq_max; i++) { 8653 struct tg3_napi *tnapi = &tp->napi[i]; 8654 8655 tg3_rx_prodring_fini(tp, &tnapi->prodring); 8656 8657 if (!tnapi->rx_rcb) 8658 continue; 8659 8660 dma_free_coherent(&tp->pdev->dev, 8661 TG3_RX_RCB_RING_BYTES(tp), 8662 tnapi->rx_rcb, 8663 tnapi->rx_rcb_mapping); 8664 tnapi->rx_rcb = NULL; 8665 } 8666 } 8667 8668 static int tg3_mem_rx_acquire(struct tg3 *tp) 8669 { 8670 unsigned int i, limit; 8671 8672 limit = tp->rxq_cnt; 8673 8674 /* If RSS is enabled, we need a (dummy) producer ring 8675 * set on vector zero. This is the true hw prodring. 8676 */ 8677 if (tg3_flag(tp, ENABLE_RSS)) 8678 limit++; 8679 8680 for (i = 0; i < limit; i++) { 8681 struct tg3_napi *tnapi = &tp->napi[i]; 8682 8683 if (tg3_rx_prodring_init(tp, &tnapi->prodring)) 8684 goto err_out; 8685 8686 /* If multivector RSS is enabled, vector 0 8687 * does not handle rx or tx interrupts. 8688 * Don't allocate any resources for it. 8689 */ 8690 if (!i && tg3_flag(tp, ENABLE_RSS)) 8691 continue; 8692 8693 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, 8694 TG3_RX_RCB_RING_BYTES(tp), 8695 &tnapi->rx_rcb_mapping, 8696 GFP_KERNEL); 8697 if (!tnapi->rx_rcb) 8698 goto err_out; 8699 } 8700 8701 return 0; 8702 8703 err_out: 8704 tg3_mem_rx_release(tp); 8705 return -ENOMEM; 8706 } 8707 8708 /* 8709 * Must not be invoked with interrupt sources disabled and 8710 * the hardware shutdown down. 8711 */ 8712 static void tg3_free_consistent(struct tg3 *tp) 8713 { 8714 int i; 8715 8716 for (i = 0; i < tp->irq_cnt; i++) { 8717 struct tg3_napi *tnapi = &tp->napi[i]; 8718 8719 if (tnapi->hw_status) { 8720 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, 8721 tnapi->hw_status, 8722 tnapi->status_mapping); 8723 tnapi->hw_status = NULL; 8724 } 8725 } 8726 8727 tg3_mem_rx_release(tp); 8728 tg3_mem_tx_release(tp); 8729 8730 /* tp->hw_stats can be referenced safely: 8731 * 1. under rtnl_lock 8732 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set. 8733 */ 8734 if (tp->hw_stats) { 8735 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), 8736 tp->hw_stats, tp->stats_mapping); 8737 tp->hw_stats = NULL; 8738 } 8739 } 8740 8741 /* 8742 * Must not be invoked with interrupt sources disabled and 8743 * the hardware shutdown down. Can sleep. 8744 */ 8745 static int tg3_alloc_consistent(struct tg3 *tp) 8746 { 8747 int i; 8748 8749 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, 8750 sizeof(struct tg3_hw_stats), 8751 &tp->stats_mapping, GFP_KERNEL); 8752 if (!tp->hw_stats) 8753 goto err_out; 8754 8755 for (i = 0; i < tp->irq_cnt; i++) { 8756 struct tg3_napi *tnapi = &tp->napi[i]; 8757 struct tg3_hw_status *sblk; 8758 8759 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, 8760 TG3_HW_STATUS_SIZE, 8761 &tnapi->status_mapping, 8762 GFP_KERNEL); 8763 if (!tnapi->hw_status) 8764 goto err_out; 8765 8766 sblk = tnapi->hw_status; 8767 8768 if (tg3_flag(tp, ENABLE_RSS)) { 8769 u16 *prodptr = NULL; 8770 8771 /* 8772 * When RSS is enabled, the status block format changes 8773 * slightly. The "rx_jumbo_consumer", "reserved", 8774 * and "rx_mini_consumer" members get mapped to the 8775 * other three rx return ring producer indexes. 8776 */ 8777 switch (i) { 8778 case 1: 8779 prodptr = &sblk->idx[0].rx_producer; 8780 break; 8781 case 2: 8782 prodptr = &sblk->rx_jumbo_consumer; 8783 break; 8784 case 3: 8785 prodptr = &sblk->reserved; 8786 break; 8787 case 4: 8788 prodptr = &sblk->rx_mini_consumer; 8789 break; 8790 } 8791 tnapi->rx_rcb_prod_idx = prodptr; 8792 } else { 8793 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; 8794 } 8795 } 8796 8797 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp)) 8798 goto err_out; 8799 8800 return 0; 8801 8802 err_out: 8803 tg3_free_consistent(tp); 8804 return -ENOMEM; 8805 } 8806 8807 #define MAX_WAIT_CNT 1000 8808 8809 /* To stop a block, clear the enable bit and poll till it 8810 * clears. tp->lock is held. 8811 */ 8812 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent) 8813 { 8814 unsigned int i; 8815 u32 val; 8816 8817 if (tg3_flag(tp, 5705_PLUS)) { 8818 switch (ofs) { 8819 case RCVLSC_MODE: 8820 case DMAC_MODE: 8821 case MBFREE_MODE: 8822 case BUFMGR_MODE: 8823 case MEMARB_MODE: 8824 /* We can't enable/disable these bits of the 8825 * 5705/5750, just say success. 8826 */ 8827 return 0; 8828 8829 default: 8830 break; 8831 } 8832 } 8833 8834 val = tr32(ofs); 8835 val &= ~enable_bit; 8836 tw32_f(ofs, val); 8837 8838 for (i = 0; i < MAX_WAIT_CNT; i++) { 8839 if (pci_channel_offline(tp->pdev)) { 8840 dev_err(&tp->pdev->dev, 8841 "tg3_stop_block device offline, " 8842 "ofs=%lx enable_bit=%x\n", 8843 ofs, enable_bit); 8844 return -ENODEV; 8845 } 8846 8847 udelay(100); 8848 val = tr32(ofs); 8849 if ((val & enable_bit) == 0) 8850 break; 8851 } 8852 8853 if (i == MAX_WAIT_CNT && !silent) { 8854 dev_err(&tp->pdev->dev, 8855 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", 8856 ofs, enable_bit); 8857 return -ENODEV; 8858 } 8859 8860 return 0; 8861 } 8862 8863 /* tp->lock is held. */ 8864 static int tg3_abort_hw(struct tg3 *tp, bool silent) 8865 { 8866 int i, err; 8867 8868 tg3_disable_ints(tp); 8869 8870 if (pci_channel_offline(tp->pdev)) { 8871 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE); 8872 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8873 err = -ENODEV; 8874 goto err_no_dev; 8875 } 8876 8877 tp->rx_mode &= ~RX_MODE_ENABLE; 8878 tw32_f(MAC_RX_MODE, tp->rx_mode); 8879 udelay(10); 8880 8881 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); 8882 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); 8883 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); 8884 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); 8885 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); 8886 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); 8887 8888 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); 8889 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); 8890 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); 8891 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); 8892 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); 8893 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); 8894 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); 8895 8896 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8897 tw32_f(MAC_MODE, tp->mac_mode); 8898 udelay(40); 8899 8900 tp->tx_mode &= ~TX_MODE_ENABLE; 8901 tw32_f(MAC_TX_MODE, tp->tx_mode); 8902 8903 for (i = 0; i < MAX_WAIT_CNT; i++) { 8904 udelay(100); 8905 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) 8906 break; 8907 } 8908 if (i >= MAX_WAIT_CNT) { 8909 dev_err(&tp->pdev->dev, 8910 "%s timed out, TX_MODE_ENABLE will not clear " 8911 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); 8912 err |= -ENODEV; 8913 } 8914 8915 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); 8916 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); 8917 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); 8918 8919 tw32(FTQ_RESET, 0xffffffff); 8920 tw32(FTQ_RESET, 0x00000000); 8921 8922 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); 8923 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); 8924 8925 err_no_dev: 8926 for (i = 0; i < tp->irq_cnt; i++) { 8927 struct tg3_napi *tnapi = &tp->napi[i]; 8928 if (tnapi->hw_status) 8929 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8930 } 8931 8932 return err; 8933 } 8934 8935 /* Save PCI command register before chip reset */ 8936 static void tg3_save_pci_state(struct tg3 *tp) 8937 { 8938 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); 8939 } 8940 8941 /* Restore PCI state after chip reset */ 8942 static void tg3_restore_pci_state(struct tg3 *tp) 8943 { 8944 u32 val; 8945 8946 /* Re-enable indirect register accesses. */ 8947 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 8948 tp->misc_host_ctrl); 8949 8950 /* Set MAX PCI retry to zero. */ 8951 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); 8952 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 8953 tg3_flag(tp, PCIX_MODE)) 8954 val |= PCISTATE_RETRY_SAME_DMA; 8955 /* Allow reads and writes to the APE register and memory space. */ 8956 if (tg3_flag(tp, ENABLE_APE)) 8957 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 8958 PCISTATE_ALLOW_APE_SHMEM_WR | 8959 PCISTATE_ALLOW_APE_PSPACE_WR; 8960 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); 8961 8962 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); 8963 8964 if (!tg3_flag(tp, PCI_EXPRESS)) { 8965 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 8966 tp->pci_cacheline_sz); 8967 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 8968 tp->pci_lat_timer); 8969 } 8970 8971 /* Make sure PCI-X relaxed ordering bit is clear. */ 8972 if (tg3_flag(tp, PCIX_MODE)) { 8973 u16 pcix_cmd; 8974 8975 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 8976 &pcix_cmd); 8977 pcix_cmd &= ~PCI_X_CMD_ERO; 8978 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 8979 pcix_cmd); 8980 } 8981 8982 if (tg3_flag(tp, 5780_CLASS)) { 8983 8984 /* Chip reset on 5780 will reset MSI enable bit, 8985 * so need to restore it. 8986 */ 8987 if (tg3_flag(tp, USING_MSI)) { 8988 u16 ctrl; 8989 8990 pci_read_config_word(tp->pdev, 8991 tp->msi_cap + PCI_MSI_FLAGS, 8992 &ctrl); 8993 pci_write_config_word(tp->pdev, 8994 tp->msi_cap + PCI_MSI_FLAGS, 8995 ctrl | PCI_MSI_FLAGS_ENABLE); 8996 val = tr32(MSGINT_MODE); 8997 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); 8998 } 8999 } 9000 } 9001 9002 static void tg3_override_clk(struct tg3 *tp) 9003 { 9004 u32 val; 9005 9006 switch (tg3_asic_rev(tp)) { 9007 case ASIC_REV_5717: 9008 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9009 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 9010 TG3_CPMU_MAC_ORIDE_ENABLE); 9011 break; 9012 9013 case ASIC_REV_5719: 9014 case ASIC_REV_5720: 9015 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 9016 break; 9017 9018 default: 9019 return; 9020 } 9021 } 9022 9023 static void tg3_restore_clk(struct tg3 *tp) 9024 { 9025 u32 val; 9026 9027 switch (tg3_asic_rev(tp)) { 9028 case ASIC_REV_5717: 9029 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9030 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, 9031 val & ~TG3_CPMU_MAC_ORIDE_ENABLE); 9032 break; 9033 9034 case ASIC_REV_5719: 9035 case ASIC_REV_5720: 9036 val = tr32(TG3_CPMU_CLCK_ORIDE); 9037 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 9038 break; 9039 9040 default: 9041 return; 9042 } 9043 } 9044 9045 /* tp->lock is held. */ 9046 static int tg3_chip_reset(struct tg3 *tp) 9047 __releases(tp->lock) 9048 __acquires(tp->lock) 9049 { 9050 u32 val; 9051 void (*write_op)(struct tg3 *, u32, u32); 9052 int i, err; 9053 9054 if (!pci_device_is_present(tp->pdev)) 9055 return -ENODEV; 9056 9057 tg3_nvram_lock(tp); 9058 9059 tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 9060 9061 /* No matching tg3_nvram_unlock() after this because 9062 * chip reset below will undo the nvram lock. 9063 */ 9064 tp->nvram_lock_cnt = 0; 9065 9066 /* GRC_MISC_CFG core clock reset will clear the memory 9067 * enable bit in PCI register 4 and the MSI enable bit 9068 * on some chips, so we save relevant registers here. 9069 */ 9070 tg3_save_pci_state(tp); 9071 9072 if (tg3_asic_rev(tp) == ASIC_REV_5752 || 9073 tg3_flag(tp, 5755_PLUS)) 9074 tw32(GRC_FASTBOOT_PC, 0); 9075 9076 /* 9077 * We must avoid the readl() that normally takes place. 9078 * It locks machines, causes machine checks, and other 9079 * fun things. So, temporarily disable the 5701 9080 * hardware workaround, while we do the reset. 9081 */ 9082 write_op = tp->write32; 9083 if (write_op == tg3_write_flush_reg32) 9084 tp->write32 = tg3_write32; 9085 9086 /* Prevent the irq handler from reading or writing PCI registers 9087 * during chip reset when the memory enable bit in the PCI command 9088 * register may be cleared. The chip does not generate interrupt 9089 * at this time, but the irq handler may still be called due to irq 9090 * sharing or irqpoll. 9091 */ 9092 tg3_flag_set(tp, CHIP_RESETTING); 9093 for (i = 0; i < tp->irq_cnt; i++) { 9094 struct tg3_napi *tnapi = &tp->napi[i]; 9095 if (tnapi->hw_status) { 9096 tnapi->hw_status->status = 0; 9097 tnapi->hw_status->status_tag = 0; 9098 } 9099 tnapi->last_tag = 0; 9100 tnapi->last_irq_tag = 0; 9101 } 9102 smp_mb(); 9103 9104 tg3_full_unlock(tp); 9105 9106 for (i = 0; i < tp->irq_cnt; i++) 9107 synchronize_irq(tp->napi[i].irq_vec); 9108 9109 tg3_full_lock(tp, 0); 9110 9111 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9112 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9113 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9114 } 9115 9116 /* do the reset */ 9117 val = GRC_MISC_CFG_CORECLK_RESET; 9118 9119 if (tg3_flag(tp, PCI_EXPRESS)) { 9120 /* Force PCIe 1.0a mode */ 9121 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 9122 !tg3_flag(tp, 57765_PLUS) && 9123 tr32(TG3_PCIE_PHY_TSTCTL) == 9124 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) 9125 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); 9126 9127 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) { 9128 tw32(GRC_MISC_CFG, (1 << 29)); 9129 val |= (1 << 29); 9130 } 9131 } 9132 9133 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 9134 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); 9135 tw32(GRC_VCPU_EXT_CTRL, 9136 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); 9137 } 9138 9139 /* Set the clock to the highest frequency to avoid timeouts. With link 9140 * aware mode, the clock speed could be slow and bootcode does not 9141 * complete within the expected time. Override the clock to allow the 9142 * bootcode to finish sooner and then restore it. 9143 */ 9144 tg3_override_clk(tp); 9145 9146 /* Manage gphy power for all CPMU absent PCIe devices. */ 9147 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) 9148 val |= GRC_MISC_CFG_KEEP_GPHY_POWER; 9149 9150 tw32(GRC_MISC_CFG, val); 9151 9152 /* restore 5701 hardware bug workaround write method */ 9153 tp->write32 = write_op; 9154 9155 /* Unfortunately, we have to delay before the PCI read back. 9156 * Some 575X chips even will not respond to a PCI cfg access 9157 * when the reset command is given to the chip. 9158 * 9159 * How do these hardware designers expect things to work 9160 * properly if the PCI write is posted for a long period 9161 * of time? It is always necessary to have some method by 9162 * which a register read back can occur to push the write 9163 * out which does the reset. 9164 * 9165 * For most tg3 variants the trick below was working. 9166 * Ho hum... 9167 */ 9168 udelay(120); 9169 9170 /* Flush PCI posted writes. The normal MMIO registers 9171 * are inaccessible at this time so this is the only 9172 * way to make this reliably (actually, this is no longer 9173 * the case, see above). I tried to use indirect 9174 * register read/write but this upset some 5701 variants. 9175 */ 9176 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); 9177 9178 udelay(120); 9179 9180 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) { 9181 u16 val16; 9182 9183 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) { 9184 int j; 9185 u32 cfg_val; 9186 9187 /* Wait for link training to complete. */ 9188 for (j = 0; j < 5000; j++) 9189 udelay(100); 9190 9191 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); 9192 pci_write_config_dword(tp->pdev, 0xc4, 9193 cfg_val | (1 << 15)); 9194 } 9195 9196 /* Clear the "no snoop" and "relaxed ordering" bits. */ 9197 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN; 9198 /* 9199 * Older PCIe devices only support the 128 byte 9200 * MPS setting. Enforce the restriction. 9201 */ 9202 if (!tg3_flag(tp, CPMU_PRESENT)) 9203 val16 |= PCI_EXP_DEVCTL_PAYLOAD; 9204 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16); 9205 9206 /* Clear error status */ 9207 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA, 9208 PCI_EXP_DEVSTA_CED | 9209 PCI_EXP_DEVSTA_NFED | 9210 PCI_EXP_DEVSTA_FED | 9211 PCI_EXP_DEVSTA_URD); 9212 } 9213 9214 tg3_restore_pci_state(tp); 9215 9216 tg3_flag_clear(tp, CHIP_RESETTING); 9217 tg3_flag_clear(tp, ERROR_PROCESSED); 9218 9219 val = 0; 9220 if (tg3_flag(tp, 5780_CLASS)) 9221 val = tr32(MEMARB_MODE); 9222 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 9223 9224 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) { 9225 tg3_stop_fw(tp); 9226 tw32(0x5000, 0x400); 9227 } 9228 9229 if (tg3_flag(tp, IS_SSB_CORE)) { 9230 /* 9231 * BCM4785: In order to avoid repercussions from using 9232 * potentially defective internal ROM, stop the Rx RISC CPU, 9233 * which is not required. 9234 */ 9235 tg3_stop_fw(tp); 9236 tg3_halt_cpu(tp, RX_CPU_BASE); 9237 } 9238 9239 err = tg3_poll_fw(tp); 9240 if (err) 9241 return err; 9242 9243 tw32(GRC_MODE, tp->grc_mode); 9244 9245 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) { 9246 val = tr32(0xc4); 9247 9248 tw32(0xc4, val | (1 << 15)); 9249 } 9250 9251 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && 9252 tg3_asic_rev(tp) == ASIC_REV_5705) { 9253 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; 9254 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) 9255 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; 9256 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 9257 } 9258 9259 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 9260 tp->mac_mode = MAC_MODE_PORT_MODE_TBI; 9261 val = tp->mac_mode; 9262 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 9263 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 9264 val = tp->mac_mode; 9265 } else 9266 val = 0; 9267 9268 tw32_f(MAC_MODE, val); 9269 udelay(40); 9270 9271 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); 9272 9273 tg3_mdio_start(tp); 9274 9275 if (tg3_flag(tp, PCI_EXPRESS) && 9276 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 9277 tg3_asic_rev(tp) != ASIC_REV_5785 && 9278 !tg3_flag(tp, 57765_PLUS)) { 9279 val = tr32(0x7c00); 9280 9281 tw32(0x7c00, val | (1 << 25)); 9282 } 9283 9284 tg3_restore_clk(tp); 9285 9286 /* Increase the core clock speed to fix tx timeout issue for 5762 9287 * with 100Mbps link speed. 9288 */ 9289 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 9290 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9291 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 9292 TG3_CPMU_MAC_ORIDE_ENABLE); 9293 } 9294 9295 /* Reprobe ASF enable state. */ 9296 tg3_flag_clear(tp, ENABLE_ASF); 9297 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 9298 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 9299 9300 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); 9301 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 9302 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 9303 u32 nic_cfg; 9304 9305 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 9306 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 9307 tg3_flag_set(tp, ENABLE_ASF); 9308 tp->last_event_jiffies = jiffies; 9309 if (tg3_flag(tp, 5750_PLUS)) 9310 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 9311 9312 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg); 9313 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK) 9314 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 9315 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID) 9316 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 9317 } 9318 } 9319 9320 return 0; 9321 } 9322 9323 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *); 9324 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *); 9325 static void __tg3_set_rx_mode(struct net_device *); 9326 9327 /* tp->lock is held. */ 9328 static int tg3_halt(struct tg3 *tp, int kind, bool silent) 9329 { 9330 int err, i; 9331 9332 tg3_stop_fw(tp); 9333 9334 tg3_write_sig_pre_reset(tp, kind); 9335 9336 tg3_abort_hw(tp, silent); 9337 err = tg3_chip_reset(tp); 9338 9339 __tg3_set_mac_addr(tp, false); 9340 9341 tg3_write_sig_legacy(tp, kind); 9342 tg3_write_sig_post_reset(tp, kind); 9343 9344 if (tp->hw_stats) { 9345 /* Save the stats across chip resets... */ 9346 tg3_get_nstats(tp, &tp->net_stats_prev); 9347 tg3_get_estats(tp, &tp->estats_prev); 9348 9349 /* And make sure the next sample is new data */ 9350 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); 9351 9352 for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) { 9353 struct tg3_napi *tnapi = &tp->napi[i]; 9354 9355 tnapi->rx_dropped = 0; 9356 tnapi->tx_dropped = 0; 9357 } 9358 } 9359 9360 return err; 9361 } 9362 9363 static int tg3_set_mac_addr(struct net_device *dev, void *p) 9364 { 9365 struct tg3 *tp = netdev_priv(dev); 9366 struct sockaddr *addr = p; 9367 int err = 0; 9368 bool skip_mac_1 = false; 9369 9370 if (!is_valid_ether_addr(addr->sa_data)) 9371 return -EADDRNOTAVAIL; 9372 9373 eth_hw_addr_set(dev, addr->sa_data); 9374 9375 if (!netif_running(dev)) 9376 return 0; 9377 9378 if (tg3_flag(tp, ENABLE_ASF)) { 9379 u32 addr0_high, addr0_low, addr1_high, addr1_low; 9380 9381 addr0_high = tr32(MAC_ADDR_0_HIGH); 9382 addr0_low = tr32(MAC_ADDR_0_LOW); 9383 addr1_high = tr32(MAC_ADDR_1_HIGH); 9384 addr1_low = tr32(MAC_ADDR_1_LOW); 9385 9386 /* Skip MAC addr 1 if ASF is using it. */ 9387 if ((addr0_high != addr1_high || addr0_low != addr1_low) && 9388 !(addr1_high == 0 && addr1_low == 0)) 9389 skip_mac_1 = true; 9390 } 9391 spin_lock_bh(&tp->lock); 9392 __tg3_set_mac_addr(tp, skip_mac_1); 9393 __tg3_set_rx_mode(dev); 9394 spin_unlock_bh(&tp->lock); 9395 9396 return err; 9397 } 9398 9399 /* tp->lock is held. */ 9400 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, 9401 dma_addr_t mapping, u32 maxlen_flags, 9402 u32 nic_addr) 9403 { 9404 tg3_write_mem(tp, 9405 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), 9406 ((u64) mapping >> 32)); 9407 tg3_write_mem(tp, 9408 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), 9409 ((u64) mapping & 0xffffffff)); 9410 tg3_write_mem(tp, 9411 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), 9412 maxlen_flags); 9413 9414 if (!tg3_flag(tp, 5705_PLUS)) 9415 tg3_write_mem(tp, 9416 (bdinfo_addr + TG3_BDINFO_NIC_ADDR), 9417 nic_addr); 9418 } 9419 9420 9421 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9422 { 9423 int i = 0; 9424 9425 if (!tg3_flag(tp, ENABLE_TSS)) { 9426 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); 9427 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); 9428 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); 9429 } else { 9430 tw32(HOSTCC_TXCOL_TICKS, 0); 9431 tw32(HOSTCC_TXMAX_FRAMES, 0); 9432 tw32(HOSTCC_TXCOAL_MAXF_INT, 0); 9433 9434 for (; i < tp->txq_cnt; i++) { 9435 u32 reg; 9436 9437 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; 9438 tw32(reg, ec->tx_coalesce_usecs); 9439 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; 9440 tw32(reg, ec->tx_max_coalesced_frames); 9441 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; 9442 tw32(reg, ec->tx_max_coalesced_frames_irq); 9443 } 9444 } 9445 9446 for (; i < tp->irq_max - 1; i++) { 9447 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); 9448 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); 9449 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9450 } 9451 } 9452 9453 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9454 { 9455 int i = 0; 9456 u32 limit = tp->rxq_cnt; 9457 9458 if (!tg3_flag(tp, ENABLE_RSS)) { 9459 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); 9460 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); 9461 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); 9462 limit--; 9463 } else { 9464 tw32(HOSTCC_RXCOL_TICKS, 0); 9465 tw32(HOSTCC_RXMAX_FRAMES, 0); 9466 tw32(HOSTCC_RXCOAL_MAXF_INT, 0); 9467 } 9468 9469 for (; i < limit; i++) { 9470 u32 reg; 9471 9472 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; 9473 tw32(reg, ec->rx_coalesce_usecs); 9474 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; 9475 tw32(reg, ec->rx_max_coalesced_frames); 9476 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; 9477 tw32(reg, ec->rx_max_coalesced_frames_irq); 9478 } 9479 9480 for (; i < tp->irq_max - 1; i++) { 9481 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); 9482 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); 9483 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9484 } 9485 } 9486 9487 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) 9488 { 9489 tg3_coal_tx_init(tp, ec); 9490 tg3_coal_rx_init(tp, ec); 9491 9492 if (!tg3_flag(tp, 5705_PLUS)) { 9493 u32 val = ec->stats_block_coalesce_usecs; 9494 9495 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); 9496 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); 9497 9498 if (!tp->link_up) 9499 val = 0; 9500 9501 tw32(HOSTCC_STAT_COAL_TICKS, val); 9502 } 9503 } 9504 9505 /* tp->lock is held. */ 9506 static void tg3_tx_rcbs_disable(struct tg3 *tp) 9507 { 9508 u32 txrcb, limit; 9509 9510 /* Disable all transmit rings but the first. */ 9511 if (!tg3_flag(tp, 5705_PLUS)) 9512 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; 9513 else if (tg3_flag(tp, 5717_PLUS)) 9514 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; 9515 else if (tg3_flag(tp, 57765_CLASS) || 9516 tg3_asic_rev(tp) == ASIC_REV_5762) 9517 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; 9518 else 9519 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9520 9521 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9522 txrcb < limit; txrcb += TG3_BDINFO_SIZE) 9523 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, 9524 BDINFO_FLAGS_DISABLED); 9525 } 9526 9527 /* tp->lock is held. */ 9528 static void tg3_tx_rcbs_init(struct tg3 *tp) 9529 { 9530 int i = 0; 9531 u32 txrcb = NIC_SRAM_SEND_RCB; 9532 9533 if (tg3_flag(tp, ENABLE_TSS)) 9534 i++; 9535 9536 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { 9537 struct tg3_napi *tnapi = &tp->napi[i]; 9538 9539 if (!tnapi->tx_ring) 9540 continue; 9541 9542 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, 9543 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), 9544 NIC_SRAM_TX_BUFFER_DESC); 9545 } 9546 } 9547 9548 /* tp->lock is held. */ 9549 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp) 9550 { 9551 u32 rxrcb, limit; 9552 9553 /* Disable all receive return rings but the first. */ 9554 if (tg3_flag(tp, 5717_PLUS)) 9555 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; 9556 else if (!tg3_flag(tp, 5705_PLUS)) 9557 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; 9558 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9559 tg3_asic_rev(tp) == ASIC_REV_5762 || 9560 tg3_flag(tp, 57765_CLASS)) 9561 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; 9562 else 9563 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9564 9565 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9566 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) 9567 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, 9568 BDINFO_FLAGS_DISABLED); 9569 } 9570 9571 /* tp->lock is held. */ 9572 static void tg3_rx_ret_rcbs_init(struct tg3 *tp) 9573 { 9574 int i = 0; 9575 u32 rxrcb = NIC_SRAM_RCV_RET_RCB; 9576 9577 if (tg3_flag(tp, ENABLE_RSS)) 9578 i++; 9579 9580 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { 9581 struct tg3_napi *tnapi = &tp->napi[i]; 9582 9583 if (!tnapi->rx_rcb) 9584 continue; 9585 9586 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, 9587 (tp->rx_ret_ring_mask + 1) << 9588 BDINFO_FLAGS_MAXLEN_SHIFT, 0); 9589 } 9590 } 9591 9592 /* tp->lock is held. */ 9593 static void tg3_rings_reset(struct tg3 *tp) 9594 { 9595 int i; 9596 u32 stblk; 9597 struct tg3_napi *tnapi = &tp->napi[0]; 9598 9599 tg3_tx_rcbs_disable(tp); 9600 9601 tg3_rx_ret_rcbs_disable(tp); 9602 9603 /* Disable interrupts */ 9604 tw32_mailbox_f(tp->napi[0].int_mbox, 1); 9605 tp->napi[0].chk_msi_cnt = 0; 9606 tp->napi[0].last_rx_cons = 0; 9607 tp->napi[0].last_tx_cons = 0; 9608 9609 /* Zero mailbox registers. */ 9610 if (tg3_flag(tp, SUPPORT_MSIX)) { 9611 for (i = 1; i < tp->irq_max; i++) { 9612 tp->napi[i].tx_prod = 0; 9613 tp->napi[i].tx_cons = 0; 9614 if (tg3_flag(tp, ENABLE_TSS)) 9615 tw32_mailbox(tp->napi[i].prodmbox, 0); 9616 tw32_rx_mbox(tp->napi[i].consmbox, 0); 9617 tw32_mailbox_f(tp->napi[i].int_mbox, 1); 9618 tp->napi[i].chk_msi_cnt = 0; 9619 tp->napi[i].last_rx_cons = 0; 9620 tp->napi[i].last_tx_cons = 0; 9621 } 9622 if (!tg3_flag(tp, ENABLE_TSS)) 9623 tw32_mailbox(tp->napi[0].prodmbox, 0); 9624 } else { 9625 tp->napi[0].tx_prod = 0; 9626 tp->napi[0].tx_cons = 0; 9627 tw32_mailbox(tp->napi[0].prodmbox, 0); 9628 tw32_rx_mbox(tp->napi[0].consmbox, 0); 9629 } 9630 9631 /* Make sure the NIC-based send BD rings are disabled. */ 9632 if (!tg3_flag(tp, 5705_PLUS)) { 9633 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; 9634 for (i = 0; i < 16; i++) 9635 tw32_tx_mbox(mbox + i * 8, 0); 9636 } 9637 9638 /* Clear status block in ram. */ 9639 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9640 9641 /* Set status block DMA address */ 9642 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 9643 ((u64) tnapi->status_mapping >> 32)); 9644 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 9645 ((u64) tnapi->status_mapping & 0xffffffff)); 9646 9647 stblk = HOSTCC_STATBLCK_RING1; 9648 9649 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { 9650 u64 mapping = (u64)tnapi->status_mapping; 9651 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); 9652 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); 9653 stblk += 8; 9654 9655 /* Clear status block in ram. */ 9656 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9657 } 9658 9659 tg3_tx_rcbs_init(tp); 9660 tg3_rx_ret_rcbs_init(tp); 9661 } 9662 9663 static void tg3_setup_rxbd_thresholds(struct tg3 *tp) 9664 { 9665 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; 9666 9667 if (!tg3_flag(tp, 5750_PLUS) || 9668 tg3_flag(tp, 5780_CLASS) || 9669 tg3_asic_rev(tp) == ASIC_REV_5750 || 9670 tg3_asic_rev(tp) == ASIC_REV_5752 || 9671 tg3_flag(tp, 57765_PLUS)) 9672 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; 9673 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9674 tg3_asic_rev(tp) == ASIC_REV_5787) 9675 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; 9676 else 9677 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; 9678 9679 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); 9680 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); 9681 9682 val = min(nic_rep_thresh, host_rep_thresh); 9683 tw32(RCVBDI_STD_THRESH, val); 9684 9685 if (tg3_flag(tp, 57765_PLUS)) 9686 tw32(STD_REPLENISH_LWM, bdcache_maxcnt); 9687 9688 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 9689 return; 9690 9691 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; 9692 9693 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); 9694 9695 val = min(bdcache_maxcnt / 2, host_rep_thresh); 9696 tw32(RCVBDI_JUMBO_THRESH, val); 9697 9698 if (tg3_flag(tp, 57765_PLUS)) 9699 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); 9700 } 9701 9702 static inline u32 calc_crc(unsigned char *buf, int len) 9703 { 9704 u32 reg; 9705 u32 tmp; 9706 int j, k; 9707 9708 reg = 0xffffffff; 9709 9710 for (j = 0; j < len; j++) { 9711 reg ^= buf[j]; 9712 9713 for (k = 0; k < 8; k++) { 9714 tmp = reg & 0x01; 9715 9716 reg >>= 1; 9717 9718 if (tmp) 9719 reg ^= CRC32_POLY_LE; 9720 } 9721 } 9722 9723 return ~reg; 9724 } 9725 9726 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) 9727 { 9728 /* accept or reject all multicast frames */ 9729 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); 9730 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); 9731 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); 9732 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); 9733 } 9734 9735 static void __tg3_set_rx_mode(struct net_device *dev) 9736 { 9737 struct tg3 *tp = netdev_priv(dev); 9738 u32 rx_mode; 9739 9740 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | 9741 RX_MODE_KEEP_VLAN_TAG); 9742 9743 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) 9744 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 9745 * flag clear. 9746 */ 9747 if (!tg3_flag(tp, ENABLE_ASF)) 9748 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 9749 #endif 9750 9751 if (dev->flags & IFF_PROMISC) { 9752 /* Promiscuous mode. */ 9753 rx_mode |= RX_MODE_PROMISC; 9754 } else if (dev->flags & IFF_ALLMULTI) { 9755 /* Accept all multicast. */ 9756 tg3_set_multi(tp, 1); 9757 } else if (netdev_mc_empty(dev)) { 9758 /* Reject all multicast. */ 9759 tg3_set_multi(tp, 0); 9760 } else { 9761 /* Accept one or more multicast(s). */ 9762 struct netdev_hw_addr *ha; 9763 u32 mc_filter[4] = { 0, }; 9764 u32 regidx; 9765 u32 bit; 9766 u32 crc; 9767 9768 netdev_for_each_mc_addr(ha, dev) { 9769 crc = calc_crc(ha->addr, ETH_ALEN); 9770 bit = ~crc & 0x7f; 9771 regidx = (bit & 0x60) >> 5; 9772 bit &= 0x1f; 9773 mc_filter[regidx] |= (1 << bit); 9774 } 9775 9776 tw32(MAC_HASH_REG_0, mc_filter[0]); 9777 tw32(MAC_HASH_REG_1, mc_filter[1]); 9778 tw32(MAC_HASH_REG_2, mc_filter[2]); 9779 tw32(MAC_HASH_REG_3, mc_filter[3]); 9780 } 9781 9782 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) { 9783 rx_mode |= RX_MODE_PROMISC; 9784 } else if (!(dev->flags & IFF_PROMISC)) { 9785 /* Add all entries into to the mac addr filter list */ 9786 int i = 0; 9787 struct netdev_hw_addr *ha; 9788 9789 netdev_for_each_uc_addr(ha, dev) { 9790 __tg3_set_one_mac_addr(tp, ha->addr, 9791 i + TG3_UCAST_ADDR_IDX(tp)); 9792 i++; 9793 } 9794 } 9795 9796 if (rx_mode != tp->rx_mode) { 9797 tp->rx_mode = rx_mode; 9798 tw32_f(MAC_RX_MODE, rx_mode); 9799 udelay(10); 9800 } 9801 } 9802 9803 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt) 9804 { 9805 int i; 9806 9807 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 9808 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt); 9809 } 9810 9811 static void tg3_rss_check_indir_tbl(struct tg3 *tp) 9812 { 9813 int i; 9814 9815 if (!tg3_flag(tp, SUPPORT_MSIX)) 9816 return; 9817 9818 if (tp->rxq_cnt == 1) { 9819 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); 9820 return; 9821 } 9822 9823 /* Validate table against current IRQ count */ 9824 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { 9825 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt) 9826 break; 9827 } 9828 9829 if (i != TG3_RSS_INDIR_TBL_SIZE) 9830 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt); 9831 } 9832 9833 static void tg3_rss_write_indir_tbl(struct tg3 *tp) 9834 { 9835 int i = 0; 9836 u32 reg = MAC_RSS_INDIR_TBL_0; 9837 9838 while (i < TG3_RSS_INDIR_TBL_SIZE) { 9839 u32 val = tp->rss_ind_tbl[i]; 9840 i++; 9841 for (; i % 8; i++) { 9842 val <<= 4; 9843 val |= tp->rss_ind_tbl[i]; 9844 } 9845 tw32(reg, val); 9846 reg += 4; 9847 } 9848 } 9849 9850 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp) 9851 { 9852 if (tg3_asic_rev(tp) == ASIC_REV_5719) 9853 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719; 9854 else 9855 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720; 9856 } 9857 9858 /* tp->lock is held. */ 9859 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) 9860 { 9861 u32 val, rdmac_mode; 9862 int i, err, limit; 9863 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 9864 9865 tg3_disable_ints(tp); 9866 9867 tg3_stop_fw(tp); 9868 9869 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); 9870 9871 if (tg3_flag(tp, INIT_COMPLETE)) 9872 tg3_abort_hw(tp, 1); 9873 9874 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 9875 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { 9876 tg3_phy_pull_config(tp); 9877 tg3_eee_pull_config(tp, NULL); 9878 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 9879 } 9880 9881 /* Enable MAC control of LPI */ 9882 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 9883 tg3_setup_eee(tp); 9884 9885 if (reset_phy) 9886 tg3_phy_reset(tp); 9887 9888 err = tg3_chip_reset(tp); 9889 if (err) 9890 return err; 9891 9892 tg3_write_sig_legacy(tp, RESET_KIND_INIT); 9893 9894 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 9895 val = tr32(TG3_CPMU_CTRL); 9896 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); 9897 tw32(TG3_CPMU_CTRL, val); 9898 9899 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 9900 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 9901 val |= CPMU_LSPD_10MB_MACCLK_6_25; 9902 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 9903 9904 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); 9905 val &= ~CPMU_LNK_AWARE_MACCLK_MASK; 9906 val |= CPMU_LNK_AWARE_MACCLK_6_25; 9907 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); 9908 9909 val = tr32(TG3_CPMU_HST_ACC); 9910 val &= ~CPMU_HST_ACC_MACCLK_MASK; 9911 val |= CPMU_HST_ACC_MACCLK_6_25; 9912 tw32(TG3_CPMU_HST_ACC, val); 9913 } 9914 9915 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9916 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; 9917 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | 9918 PCIE_PWR_MGMT_L1_THRESH_4MS; 9919 tw32(PCIE_PWR_MGMT_THRESH, val); 9920 9921 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; 9922 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); 9923 9924 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); 9925 9926 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9927 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9928 } 9929 9930 if (tg3_flag(tp, L1PLLPD_EN)) { 9931 u32 grc_mode = tr32(GRC_MODE); 9932 9933 /* Access the lower 1K of PL PCIE block registers. */ 9934 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9935 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 9936 9937 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); 9938 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, 9939 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); 9940 9941 tw32(GRC_MODE, grc_mode); 9942 } 9943 9944 if (tg3_flag(tp, 57765_CLASS)) { 9945 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 9946 u32 grc_mode = tr32(GRC_MODE); 9947 9948 /* Access the lower 1K of PL PCIE block registers. */ 9949 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9950 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 9951 9952 val = tr32(TG3_PCIE_TLDLPL_PORT + 9953 TG3_PCIE_PL_LO_PHYCTL5); 9954 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, 9955 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); 9956 9957 tw32(GRC_MODE, grc_mode); 9958 } 9959 9960 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) { 9961 u32 grc_mode; 9962 9963 /* Fix transmit hangs */ 9964 val = tr32(TG3_CPMU_PADRNG_CTL); 9965 val |= TG3_CPMU_PADRNG_CTL_RDIV2; 9966 tw32(TG3_CPMU_PADRNG_CTL, val); 9967 9968 grc_mode = tr32(GRC_MODE); 9969 9970 /* Access the lower 1K of DL PCIE block registers. */ 9971 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9972 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); 9973 9974 val = tr32(TG3_PCIE_TLDLPL_PORT + 9975 TG3_PCIE_DL_LO_FTSMAX); 9976 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; 9977 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, 9978 val | TG3_PCIE_DL_LO_FTSMAX_VAL); 9979 9980 tw32(GRC_MODE, grc_mode); 9981 } 9982 9983 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 9984 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 9985 val |= CPMU_LSPD_10MB_MACCLK_6_25; 9986 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 9987 } 9988 9989 /* This works around an issue with Athlon chipsets on 9990 * B3 tigon3 silicon. This bit has no effect on any 9991 * other revision. But do not set this on PCI Express 9992 * chips and don't even touch the clocks if the CPMU is present. 9993 */ 9994 if (!tg3_flag(tp, CPMU_PRESENT)) { 9995 if (!tg3_flag(tp, PCI_EXPRESS)) 9996 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; 9997 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 9998 } 9999 10000 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 10001 tg3_flag(tp, PCIX_MODE)) { 10002 val = tr32(TG3PCI_PCISTATE); 10003 val |= PCISTATE_RETRY_SAME_DMA; 10004 tw32(TG3PCI_PCISTATE, val); 10005 } 10006 10007 if (tg3_flag(tp, ENABLE_APE)) { 10008 /* Allow reads and writes to the 10009 * APE register and memory space. 10010 */ 10011 val = tr32(TG3PCI_PCISTATE); 10012 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 10013 PCISTATE_ALLOW_APE_SHMEM_WR | 10014 PCISTATE_ALLOW_APE_PSPACE_WR; 10015 tw32(TG3PCI_PCISTATE, val); 10016 } 10017 10018 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) { 10019 /* Enable some hw fixes. */ 10020 val = tr32(TG3PCI_MSI_DATA); 10021 val |= (1 << 26) | (1 << 28) | (1 << 29); 10022 tw32(TG3PCI_MSI_DATA, val); 10023 } 10024 10025 /* Descriptor ring init may make accesses to the 10026 * NIC SRAM area to setup the TX descriptors, so we 10027 * can only do this after the hardware has been 10028 * successfully reset. 10029 */ 10030 err = tg3_init_rings(tp); 10031 if (err) 10032 return err; 10033 10034 if (tg3_flag(tp, 57765_PLUS)) { 10035 val = tr32(TG3PCI_DMA_RW_CTRL) & 10036 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 10037 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 10038 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; 10039 if (!tg3_flag(tp, 57765_CLASS) && 10040 tg3_asic_rev(tp) != ASIC_REV_5717 && 10041 tg3_asic_rev(tp) != ASIC_REV_5762) 10042 val |= DMA_RWCTRL_TAGGED_STAT_WA; 10043 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); 10044 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 && 10045 tg3_asic_rev(tp) != ASIC_REV_5761) { 10046 /* This value is determined during the probe time DMA 10047 * engine test, tg3_test_dma. 10048 */ 10049 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 10050 } 10051 10052 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | 10053 GRC_MODE_4X_NIC_SEND_RINGS | 10054 GRC_MODE_NO_TX_PHDR_CSUM | 10055 GRC_MODE_NO_RX_PHDR_CSUM); 10056 tp->grc_mode |= GRC_MODE_HOST_SENDBDS; 10057 10058 /* Pseudo-header checksum is done by hardware logic and not 10059 * the offload processers, so make the chip do the pseudo- 10060 * header checksums on receive. For transmit it is more 10061 * convenient to do the pseudo-header checksum in software 10062 * as Linux does that on transmit for us in all cases. 10063 */ 10064 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; 10065 10066 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP; 10067 if (tp->rxptpctl) 10068 tw32(TG3_RX_PTP_CTL, 10069 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 10070 10071 if (tg3_flag(tp, PTP_CAPABLE)) 10072 val |= GRC_MODE_TIME_SYNC_ENABLE; 10073 10074 tw32(GRC_MODE, tp->grc_mode | val); 10075 10076 /* On one of the AMD platform, MRRS is restricted to 4000 because of 10077 * south bridge limitation. As a workaround, Driver is setting MRRS 10078 * to 2048 instead of default 4096. 10079 */ 10080 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 10081 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) { 10082 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK; 10083 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048); 10084 } 10085 10086 /* Setup the timer prescalar register. Clock is always 66Mhz. */ 10087 val = tr32(GRC_MISC_CFG); 10088 val &= ~0xff; 10089 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); 10090 tw32(GRC_MISC_CFG, val); 10091 10092 /* Initialize MBUF/DESC pool. */ 10093 if (tg3_flag(tp, 5750_PLUS)) { 10094 /* Do nothing. */ 10095 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) { 10096 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); 10097 if (tg3_asic_rev(tp) == ASIC_REV_5704) 10098 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); 10099 else 10100 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); 10101 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); 10102 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); 10103 } else if (tg3_flag(tp, TSO_CAPABLE)) { 10104 int fw_len; 10105 10106 fw_len = tp->fw_len; 10107 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); 10108 tw32(BUFMGR_MB_POOL_ADDR, 10109 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); 10110 tw32(BUFMGR_MB_POOL_SIZE, 10111 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); 10112 } 10113 10114 if (tp->dev->mtu <= ETH_DATA_LEN) { 10115 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10116 tp->bufmgr_config.mbuf_read_dma_low_water); 10117 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10118 tp->bufmgr_config.mbuf_mac_rx_low_water); 10119 tw32(BUFMGR_MB_HIGH_WATER, 10120 tp->bufmgr_config.mbuf_high_water); 10121 } else { 10122 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10123 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); 10124 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10125 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); 10126 tw32(BUFMGR_MB_HIGH_WATER, 10127 tp->bufmgr_config.mbuf_high_water_jumbo); 10128 } 10129 tw32(BUFMGR_DMA_LOW_WATER, 10130 tp->bufmgr_config.dma_low_water); 10131 tw32(BUFMGR_DMA_HIGH_WATER, 10132 tp->bufmgr_config.dma_high_water); 10133 10134 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; 10135 if (tg3_asic_rev(tp) == ASIC_REV_5719) 10136 val |= BUFMGR_MODE_NO_TX_UNDERRUN; 10137 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10138 tg3_asic_rev(tp) == ASIC_REV_5762 || 10139 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10140 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) 10141 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; 10142 tw32(BUFMGR_MODE, val); 10143 for (i = 0; i < 2000; i++) { 10144 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) 10145 break; 10146 udelay(10); 10147 } 10148 if (i >= 2000) { 10149 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); 10150 return -ENODEV; 10151 } 10152 10153 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1) 10154 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); 10155 10156 tg3_setup_rxbd_thresholds(tp); 10157 10158 /* Initialize TG3_BDINFO's at: 10159 * RCVDBDI_STD_BD: standard eth size rx ring 10160 * RCVDBDI_JUMBO_BD: jumbo frame rx ring 10161 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) 10162 * 10163 * like so: 10164 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring 10165 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | 10166 * ring attribute flags 10167 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM 10168 * 10169 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. 10170 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. 10171 * 10172 * The size of each ring is fixed in the firmware, but the location is 10173 * configurable. 10174 */ 10175 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10176 ((u64) tpr->rx_std_mapping >> 32)); 10177 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10178 ((u64) tpr->rx_std_mapping & 0xffffffff)); 10179 if (!tg3_flag(tp, 5717_PLUS)) 10180 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 10181 NIC_SRAM_RX_BUFFER_DESC); 10182 10183 /* Disable the mini ring */ 10184 if (!tg3_flag(tp, 5705_PLUS)) 10185 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, 10186 BDINFO_FLAGS_DISABLED); 10187 10188 /* Program the jumbo buffer descriptor ring control 10189 * blocks on those devices that have them. 10190 */ 10191 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10192 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { 10193 10194 if (tg3_flag(tp, JUMBO_RING_ENABLE)) { 10195 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10196 ((u64) tpr->rx_jmb_mapping >> 32)); 10197 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10198 ((u64) tpr->rx_jmb_mapping & 0xffffffff)); 10199 val = TG3_RX_JMB_RING_SIZE(tp) << 10200 BDINFO_FLAGS_MAXLEN_SHIFT; 10201 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10202 val | BDINFO_FLAGS_USE_EXT_RECV); 10203 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || 10204 tg3_flag(tp, 57765_CLASS) || 10205 tg3_asic_rev(tp) == ASIC_REV_5762) 10206 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 10207 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 10208 } else { 10209 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10210 BDINFO_FLAGS_DISABLED); 10211 } 10212 10213 if (tg3_flag(tp, 57765_PLUS)) { 10214 val = TG3_RX_STD_RING_SIZE(tp); 10215 val <<= BDINFO_FLAGS_MAXLEN_SHIFT; 10216 val |= (TG3_RX_STD_DMA_SZ << 2); 10217 } else 10218 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; 10219 } else 10220 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; 10221 10222 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); 10223 10224 tpr->rx_std_prod_idx = tp->rx_pending; 10225 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); 10226 10227 tpr->rx_jmb_prod_idx = 10228 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; 10229 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); 10230 10231 tg3_rings_reset(tp); 10232 10233 /* Initialize MAC address and backoff seed. */ 10234 __tg3_set_mac_addr(tp, false); 10235 10236 /* MTU + ethernet header + FCS + optional VLAN tag */ 10237 tw32(MAC_RX_MTU_SIZE, 10238 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 10239 10240 /* The slot time is changed by tg3_setup_phy if we 10241 * run at gigabit with half duplex. 10242 */ 10243 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 10244 (6 << TX_LENGTHS_IPG_SHIFT) | 10245 (32 << TX_LENGTHS_SLOT_TIME_SHIFT); 10246 10247 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10248 tg3_asic_rev(tp) == ASIC_REV_5762) 10249 val |= tr32(MAC_TX_LENGTHS) & 10250 (TX_LENGTHS_JMB_FRM_LEN_MSK | 10251 TX_LENGTHS_CNT_DWN_VAL_MSK); 10252 10253 tw32(MAC_TX_LENGTHS, val); 10254 10255 /* Receive rules. */ 10256 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); 10257 tw32(RCVLPC_CONFIG, 0x0181); 10258 10259 /* Calculate RDMAC_MODE setting early, we need it to determine 10260 * the RCVLPC_STATE_ENABLE mask. 10261 */ 10262 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | 10263 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | 10264 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | 10265 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 10266 RDMAC_MODE_LNGREAD_ENAB); 10267 10268 if (tg3_asic_rev(tp) == ASIC_REV_5717) 10269 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; 10270 10271 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 10272 tg3_asic_rev(tp) == ASIC_REV_5785 || 10273 tg3_asic_rev(tp) == ASIC_REV_57780) 10274 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | 10275 RDMAC_MODE_MBUF_RBD_CRPT_ENAB | 10276 RDMAC_MODE_MBUF_SBD_CRPT_ENAB; 10277 10278 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10279 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10280 if (tg3_flag(tp, TSO_CAPABLE)) { 10281 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; 10282 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10283 !tg3_flag(tp, IS_5788)) { 10284 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10285 } 10286 } 10287 10288 if (tg3_flag(tp, PCI_EXPRESS)) 10289 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10290 10291 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10292 tp->dma_limit = 0; 10293 if (tp->dev->mtu <= ETH_DATA_LEN) { 10294 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; 10295 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K; 10296 } 10297 } 10298 10299 if (tg3_flag(tp, HW_TSO_1) || 10300 tg3_flag(tp, HW_TSO_2) || 10301 tg3_flag(tp, HW_TSO_3)) 10302 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; 10303 10304 if (tg3_flag(tp, 57765_PLUS) || 10305 tg3_asic_rev(tp) == ASIC_REV_5785 || 10306 tg3_asic_rev(tp) == ASIC_REV_57780) 10307 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 10308 10309 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10310 tg3_asic_rev(tp) == ASIC_REV_5762) 10311 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; 10312 10313 if (tg3_asic_rev(tp) == ASIC_REV_5761 || 10314 tg3_asic_rev(tp) == ASIC_REV_5784 || 10315 tg3_asic_rev(tp) == ASIC_REV_5785 || 10316 tg3_asic_rev(tp) == ASIC_REV_57780 || 10317 tg3_flag(tp, 57765_PLUS)) { 10318 u32 tgtreg; 10319 10320 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10321 tgtreg = TG3_RDMA_RSRVCTRL_REG2; 10322 else 10323 tgtreg = TG3_RDMA_RSRVCTRL_REG; 10324 10325 val = tr32(tgtreg); 10326 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10327 tg3_asic_rev(tp) == ASIC_REV_5762) { 10328 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | 10329 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | 10330 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); 10331 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | 10332 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 10333 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; 10334 } 10335 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 10336 } 10337 10338 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10339 tg3_asic_rev(tp) == ASIC_REV_5720 || 10340 tg3_asic_rev(tp) == ASIC_REV_5762) { 10341 u32 tgtreg; 10342 10343 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10344 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2; 10345 else 10346 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL; 10347 10348 val = tr32(tgtreg); 10349 tw32(tgtreg, val | 10350 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | 10351 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); 10352 } 10353 10354 /* Receive/send statistics. */ 10355 if (tg3_flag(tp, 5750_PLUS)) { 10356 val = tr32(RCVLPC_STATS_ENABLE); 10357 val &= ~RCVLPC_STATSENAB_DACK_FIX; 10358 tw32(RCVLPC_STATS_ENABLE, val); 10359 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && 10360 tg3_flag(tp, TSO_CAPABLE)) { 10361 val = tr32(RCVLPC_STATS_ENABLE); 10362 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; 10363 tw32(RCVLPC_STATS_ENABLE, val); 10364 } else { 10365 tw32(RCVLPC_STATS_ENABLE, 0xffffff); 10366 } 10367 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); 10368 tw32(SNDDATAI_STATSENAB, 0xffffff); 10369 tw32(SNDDATAI_STATSCTRL, 10370 (SNDDATAI_SCTRL_ENABLE | 10371 SNDDATAI_SCTRL_FASTUPD)); 10372 10373 /* Setup host coalescing engine. */ 10374 tw32(HOSTCC_MODE, 0); 10375 for (i = 0; i < 2000; i++) { 10376 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) 10377 break; 10378 udelay(10); 10379 } 10380 10381 __tg3_set_coalesce(tp, &tp->coal); 10382 10383 if (!tg3_flag(tp, 5705_PLUS)) { 10384 /* Status/statistics block address. See tg3_timer, 10385 * the tg3_periodic_fetch_stats call there, and 10386 * tg3_get_stats to see how this works for 5705/5750 chips. 10387 */ 10388 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 10389 ((u64) tp->stats_mapping >> 32)); 10390 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 10391 ((u64) tp->stats_mapping & 0xffffffff)); 10392 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); 10393 10394 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); 10395 10396 /* Clear statistics and status block memory areas */ 10397 for (i = NIC_SRAM_STATS_BLK; 10398 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; 10399 i += sizeof(u32)) { 10400 tg3_write_mem(tp, i, 0); 10401 udelay(40); 10402 } 10403 } 10404 10405 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); 10406 10407 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); 10408 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); 10409 if (!tg3_flag(tp, 5705_PLUS)) 10410 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); 10411 10412 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 10413 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 10414 /* reset to prevent losing 1st rx packet intermittently */ 10415 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10416 udelay(10); 10417 } 10418 10419 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 10420 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | 10421 MAC_MODE_FHDE_ENABLE; 10422 if (tg3_flag(tp, ENABLE_APE)) 10423 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 10424 if (!tg3_flag(tp, 5705_PLUS) && 10425 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10426 tg3_asic_rev(tp) != ASIC_REV_5700) 10427 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 10428 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); 10429 udelay(40); 10430 10431 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). 10432 * If TG3_FLAG_IS_NIC is zero, we should read the 10433 * register to preserve the GPIO settings for LOMs. The GPIOs, 10434 * whether used as inputs or outputs, are set by boot code after 10435 * reset. 10436 */ 10437 if (!tg3_flag(tp, IS_NIC)) { 10438 u32 gpio_mask; 10439 10440 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | 10441 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | 10442 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; 10443 10444 if (tg3_asic_rev(tp) == ASIC_REV_5752) 10445 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | 10446 GRC_LCLCTRL_GPIO_OUTPUT3; 10447 10448 if (tg3_asic_rev(tp) == ASIC_REV_5755) 10449 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; 10450 10451 tp->grc_local_ctrl &= ~gpio_mask; 10452 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; 10453 10454 /* GPIO1 must be driven high for eeprom write protect */ 10455 if (tg3_flag(tp, EEPROM_WRITE_PROT)) 10456 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 10457 GRC_LCLCTRL_GPIO_OUTPUT1); 10458 } 10459 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10460 udelay(100); 10461 10462 if (tg3_flag(tp, USING_MSIX)) { 10463 val = tr32(MSGINT_MODE); 10464 val |= MSGINT_MODE_ENABLE; 10465 if (tp->irq_cnt > 1) 10466 val |= MSGINT_MODE_MULTIVEC_EN; 10467 if (!tg3_flag(tp, 1SHOT_MSI)) 10468 val |= MSGINT_MODE_ONE_SHOT_DISABLE; 10469 tw32(MSGINT_MODE, val); 10470 } 10471 10472 if (!tg3_flag(tp, 5705_PLUS)) { 10473 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 10474 udelay(40); 10475 } 10476 10477 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | 10478 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | 10479 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | 10480 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | 10481 WDMAC_MODE_LNGREAD_ENAB); 10482 10483 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10484 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10485 if (tg3_flag(tp, TSO_CAPABLE) && 10486 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 || 10487 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) { 10488 /* nothing */ 10489 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10490 !tg3_flag(tp, IS_5788)) { 10491 val |= WDMAC_MODE_RX_ACCEL; 10492 } 10493 } 10494 10495 /* Enable host coalescing bug fix */ 10496 if (tg3_flag(tp, 5755_PLUS)) 10497 val |= WDMAC_MODE_STATUS_TAG_FIX; 10498 10499 if (tg3_asic_rev(tp) == ASIC_REV_5785) 10500 val |= WDMAC_MODE_BURST_ALL_DATA; 10501 10502 tw32_f(WDMAC_MODE, val); 10503 udelay(40); 10504 10505 if (tg3_flag(tp, PCIX_MODE)) { 10506 u16 pcix_cmd; 10507 10508 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10509 &pcix_cmd); 10510 if (tg3_asic_rev(tp) == ASIC_REV_5703) { 10511 pcix_cmd &= ~PCI_X_CMD_MAX_READ; 10512 pcix_cmd |= PCI_X_CMD_READ_2K; 10513 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) { 10514 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); 10515 pcix_cmd |= PCI_X_CMD_READ_2K; 10516 } 10517 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10518 pcix_cmd); 10519 } 10520 10521 tw32_f(RDMAC_MODE, rdmac_mode); 10522 udelay(40); 10523 10524 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10525 tg3_asic_rev(tp) == ASIC_REV_5720) { 10526 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) { 10527 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp)) 10528 break; 10529 } 10530 if (i < TG3_NUM_RDMA_CHANNELS) { 10531 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10532 val |= tg3_lso_rd_dma_workaround_bit(tp); 10533 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10534 tg3_flag_set(tp, 5719_5720_RDMA_BUG); 10535 } 10536 } 10537 10538 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); 10539 if (!tg3_flag(tp, 5705_PLUS)) 10540 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); 10541 10542 if (tg3_asic_rev(tp) == ASIC_REV_5761) 10543 tw32(SNDDATAC_MODE, 10544 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); 10545 else 10546 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); 10547 10548 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); 10549 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); 10550 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; 10551 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 10552 val |= RCVDBDI_MODE_LRG_RING_SZ; 10553 tw32(RCVDBDI_MODE, val); 10554 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 10555 if (tg3_flag(tp, HW_TSO_1) || 10556 tg3_flag(tp, HW_TSO_2) || 10557 tg3_flag(tp, HW_TSO_3)) 10558 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); 10559 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; 10560 if (tg3_flag(tp, ENABLE_TSS)) 10561 val |= SNDBDI_MODE_MULTI_TXQ_EN; 10562 tw32(SNDBDI_MODE, val); 10563 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); 10564 10565 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 10566 err = tg3_load_5701_a0_firmware_fix(tp); 10567 if (err) 10568 return err; 10569 } 10570 10571 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10572 /* Ignore any errors for the firmware download. If download 10573 * fails, the device will operate with EEE disabled 10574 */ 10575 tg3_load_57766_firmware(tp); 10576 } 10577 10578 if (tg3_flag(tp, TSO_CAPABLE)) { 10579 err = tg3_load_tso_firmware(tp); 10580 if (err) 10581 return err; 10582 } 10583 10584 tp->tx_mode = TX_MODE_ENABLE; 10585 10586 if (tg3_flag(tp, 5755_PLUS) || 10587 tg3_asic_rev(tp) == ASIC_REV_5906) 10588 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; 10589 10590 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10591 tg3_asic_rev(tp) == ASIC_REV_5762) { 10592 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; 10593 tp->tx_mode &= ~val; 10594 tp->tx_mode |= tr32(MAC_TX_MODE) & val; 10595 } 10596 10597 tw32_f(MAC_TX_MODE, tp->tx_mode); 10598 udelay(100); 10599 10600 if (tg3_flag(tp, ENABLE_RSS)) { 10601 u32 rss_key[10]; 10602 10603 tg3_rss_write_indir_tbl(tp); 10604 10605 netdev_rss_key_fill(rss_key, 10 * sizeof(u32)); 10606 10607 for (i = 0; i < 10 ; i++) 10608 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]); 10609 } 10610 10611 tp->rx_mode = RX_MODE_ENABLE; 10612 if (tg3_flag(tp, 5755_PLUS)) 10613 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 10614 10615 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10616 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX; 10617 10618 if (tg3_flag(tp, ENABLE_RSS)) 10619 tp->rx_mode |= RX_MODE_RSS_ENABLE | 10620 RX_MODE_RSS_ITBL_HASH_BITS_7 | 10621 RX_MODE_RSS_IPV6_HASH_EN | 10622 RX_MODE_RSS_TCP_IPV6_HASH_EN | 10623 RX_MODE_RSS_IPV4_HASH_EN | 10624 RX_MODE_RSS_TCP_IPV4_HASH_EN; 10625 10626 tw32_f(MAC_RX_MODE, tp->rx_mode); 10627 udelay(10); 10628 10629 tw32(MAC_LED_CTRL, tp->led_ctrl); 10630 10631 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 10632 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10633 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10634 udelay(10); 10635 } 10636 tw32_f(MAC_RX_MODE, tp->rx_mode); 10637 udelay(10); 10638 10639 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10640 if ((tg3_asic_rev(tp) == ASIC_REV_5704) && 10641 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { 10642 /* Set drive transmission level to 1.2V */ 10643 /* only if the signal pre-emphasis bit is not set */ 10644 val = tr32(MAC_SERDES_CFG); 10645 val &= 0xfffff000; 10646 val |= 0x880; 10647 tw32(MAC_SERDES_CFG, val); 10648 } 10649 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) 10650 tw32(MAC_SERDES_CFG, 0x616000); 10651 } 10652 10653 /* Prevent chip from dropping frames when flow control 10654 * is enabled. 10655 */ 10656 if (tg3_flag(tp, 57765_CLASS)) 10657 val = 1; 10658 else 10659 val = 2; 10660 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); 10661 10662 if (tg3_asic_rev(tp) == ASIC_REV_5704 && 10663 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 10664 /* Use hardware link auto-negotiation */ 10665 tg3_flag_set(tp, HW_AUTONEG); 10666 } 10667 10668 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 10669 tg3_asic_rev(tp) == ASIC_REV_5714) { 10670 u32 tmp; 10671 10672 tmp = tr32(SERDES_RX_CTRL); 10673 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); 10674 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; 10675 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; 10676 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10677 } 10678 10679 if (!tg3_flag(tp, USE_PHYLIB)) { 10680 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 10681 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 10682 10683 err = tg3_setup_phy(tp, false); 10684 if (err) 10685 return err; 10686 10687 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10688 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 10689 u32 tmp; 10690 10691 /* Clear CRC stats. */ 10692 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { 10693 tg3_writephy(tp, MII_TG3_TEST1, 10694 tmp | MII_TG3_TEST1_CRC_EN); 10695 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); 10696 } 10697 } 10698 } 10699 10700 __tg3_set_rx_mode(tp->dev); 10701 10702 /* Initialize receive rules. */ 10703 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); 10704 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); 10705 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); 10706 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); 10707 10708 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) 10709 limit = 8; 10710 else 10711 limit = 16; 10712 if (tg3_flag(tp, ENABLE_ASF)) 10713 limit -= 4; 10714 switch (limit) { 10715 case 16: 10716 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); 10717 fallthrough; 10718 case 15: 10719 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); 10720 fallthrough; 10721 case 14: 10722 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); 10723 fallthrough; 10724 case 13: 10725 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); 10726 fallthrough; 10727 case 12: 10728 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); 10729 fallthrough; 10730 case 11: 10731 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); 10732 fallthrough; 10733 case 10: 10734 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); 10735 fallthrough; 10736 case 9: 10737 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); 10738 fallthrough; 10739 case 8: 10740 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); 10741 fallthrough; 10742 case 7: 10743 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); 10744 fallthrough; 10745 case 6: 10746 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); 10747 fallthrough; 10748 case 5: 10749 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); 10750 fallthrough; 10751 case 4: 10752 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ 10753 case 3: 10754 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ 10755 case 2: 10756 case 1: 10757 10758 default: 10759 break; 10760 } 10761 10762 if (tg3_flag(tp, ENABLE_APE)) 10763 /* Write our heartbeat update interval to APE. */ 10764 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, 10765 APE_HOST_HEARTBEAT_INT_5SEC); 10766 10767 tg3_write_sig_post_reset(tp, RESET_KIND_INIT); 10768 10769 return 0; 10770 } 10771 10772 /* Called at device open time to get the chip ready for 10773 * packet processing. Invoked with tp->lock held. 10774 */ 10775 static int tg3_init_hw(struct tg3 *tp, bool reset_phy) 10776 { 10777 /* Chip may have been just powered on. If so, the boot code may still 10778 * be running initialization. Wait for it to finish to avoid races in 10779 * accessing the hardware. 10780 */ 10781 tg3_enable_register_access(tp); 10782 tg3_poll_fw(tp); 10783 10784 tg3_switch_clocks(tp); 10785 10786 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 10787 10788 return tg3_reset_hw(tp, reset_phy); 10789 } 10790 10791 #ifdef CONFIG_TIGON3_HWMON 10792 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) 10793 { 10794 u32 off, len = TG3_OCIR_LEN; 10795 int i; 10796 10797 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) { 10798 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len); 10799 10800 if (ocir->signature != TG3_OCIR_SIG_MAGIC || 10801 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE)) 10802 memset(ocir, 0, len); 10803 } 10804 } 10805 10806 /* sysfs attributes for hwmon */ 10807 static ssize_t tg3_show_temp(struct device *dev, 10808 struct device_attribute *devattr, char *buf) 10809 { 10810 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 10811 struct tg3 *tp = dev_get_drvdata(dev); 10812 u32 temperature; 10813 10814 spin_lock_bh(&tp->lock); 10815 tg3_ape_scratchpad_read(tp, &temperature, attr->index, 10816 sizeof(temperature)); 10817 spin_unlock_bh(&tp->lock); 10818 return sprintf(buf, "%u\n", temperature * 1000); 10819 } 10820 10821 10822 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL, 10823 TG3_TEMP_SENSOR_OFFSET); 10824 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL, 10825 TG3_TEMP_CAUTION_OFFSET); 10826 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL, 10827 TG3_TEMP_MAX_OFFSET); 10828 10829 static struct attribute *tg3_attrs[] = { 10830 &sensor_dev_attr_temp1_input.dev_attr.attr, 10831 &sensor_dev_attr_temp1_crit.dev_attr.attr, 10832 &sensor_dev_attr_temp1_max.dev_attr.attr, 10833 NULL 10834 }; 10835 ATTRIBUTE_GROUPS(tg3); 10836 10837 static void tg3_hwmon_close(struct tg3 *tp) 10838 { 10839 if (tp->hwmon_dev) { 10840 hwmon_device_unregister(tp->hwmon_dev); 10841 tp->hwmon_dev = NULL; 10842 } 10843 } 10844 10845 static void tg3_hwmon_open(struct tg3 *tp) 10846 { 10847 int i; 10848 u32 size = 0; 10849 struct pci_dev *pdev = tp->pdev; 10850 struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; 10851 10852 tg3_sd_scan_scratchpad(tp, ocirs); 10853 10854 for (i = 0; i < TG3_SD_NUM_RECS; i++) { 10855 if (!ocirs[i].src_data_length) 10856 continue; 10857 10858 size += ocirs[i].src_hdr_length; 10859 size += ocirs[i].src_data_length; 10860 } 10861 10862 if (!size) 10863 return; 10864 10865 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", 10866 tp, tg3_groups); 10867 if (IS_ERR(tp->hwmon_dev)) { 10868 tp->hwmon_dev = NULL; 10869 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); 10870 } 10871 } 10872 #else 10873 static inline void tg3_hwmon_close(struct tg3 *tp) { } 10874 static inline void tg3_hwmon_open(struct tg3 *tp) { } 10875 #endif /* CONFIG_TIGON3_HWMON */ 10876 10877 10878 #define TG3_STAT_ADD32(PSTAT, REG) \ 10879 do { u32 __val = tr32(REG); \ 10880 (PSTAT)->low += __val; \ 10881 if ((PSTAT)->low < __val) \ 10882 (PSTAT)->high += 1; \ 10883 } while (0) 10884 10885 static void tg3_periodic_fetch_stats(struct tg3 *tp) 10886 { 10887 struct tg3_hw_stats *sp = tp->hw_stats; 10888 10889 if (!tp->link_up) 10890 return; 10891 10892 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); 10893 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); 10894 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); 10895 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); 10896 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); 10897 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); 10898 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); 10899 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); 10900 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); 10901 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); 10902 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); 10903 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); 10904 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); 10905 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) && 10906 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low + 10907 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) { 10908 u32 val; 10909 10910 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10911 val &= ~tg3_lso_rd_dma_workaround_bit(tp); 10912 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10913 tg3_flag_clear(tp, 5719_5720_RDMA_BUG); 10914 } 10915 10916 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); 10917 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); 10918 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); 10919 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); 10920 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); 10921 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); 10922 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); 10923 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); 10924 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); 10925 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); 10926 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); 10927 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); 10928 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); 10929 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); 10930 10931 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); 10932 if (tg3_asic_rev(tp) != ASIC_REV_5717 && 10933 tg3_asic_rev(tp) != ASIC_REV_5762 && 10934 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 && 10935 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) { 10936 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); 10937 } else { 10938 u32 val = tr32(HOSTCC_FLOW_ATTN); 10939 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; 10940 if (val) { 10941 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); 10942 sp->rx_discards.low += val; 10943 if (sp->rx_discards.low < val) 10944 sp->rx_discards.high += 1; 10945 } 10946 sp->mbuf_lwm_thresh_hit = sp->rx_discards; 10947 } 10948 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); 10949 } 10950 10951 static void tg3_chk_missed_msi(struct tg3 *tp) 10952 { 10953 u32 i; 10954 10955 for (i = 0; i < tp->irq_cnt; i++) { 10956 struct tg3_napi *tnapi = &tp->napi[i]; 10957 10958 if (tg3_has_work(tnapi)) { 10959 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && 10960 tnapi->last_tx_cons == tnapi->tx_cons) { 10961 if (tnapi->chk_msi_cnt < 1) { 10962 tnapi->chk_msi_cnt++; 10963 return; 10964 } 10965 tg3_msi(0, tnapi); 10966 } 10967 } 10968 tnapi->chk_msi_cnt = 0; 10969 tnapi->last_rx_cons = tnapi->rx_rcb_ptr; 10970 tnapi->last_tx_cons = tnapi->tx_cons; 10971 } 10972 } 10973 10974 static void tg3_timer(struct timer_list *t) 10975 { 10976 struct tg3 *tp = from_timer(tp, t, timer); 10977 10978 spin_lock(&tp->lock); 10979 10980 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) { 10981 spin_unlock(&tp->lock); 10982 goto restart_timer; 10983 } 10984 10985 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10986 tg3_flag(tp, 57765_CLASS)) 10987 tg3_chk_missed_msi(tp); 10988 10989 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 10990 /* BCM4785: Flush posted writes from GbE to host memory. */ 10991 tr32(HOSTCC_MODE); 10992 } 10993 10994 if (!tg3_flag(tp, TAGGED_STATUS)) { 10995 /* All of this garbage is because when using non-tagged 10996 * IRQ status the mailbox/status_block protocol the chip 10997 * uses with the cpu is race prone. 10998 */ 10999 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { 11000 tw32(GRC_LOCAL_CTRL, 11001 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 11002 } else { 11003 tw32(HOSTCC_MODE, tp->coalesce_mode | 11004 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); 11005 } 11006 11007 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 11008 spin_unlock(&tp->lock); 11009 tg3_reset_task_schedule(tp); 11010 goto restart_timer; 11011 } 11012 } 11013 11014 /* This part only runs once per second. */ 11015 if (!--tp->timer_counter) { 11016 if (tg3_flag(tp, 5705_PLUS)) 11017 tg3_periodic_fetch_stats(tp); 11018 11019 if (tp->setlpicnt && !--tp->setlpicnt) 11020 tg3_phy_eee_enable(tp); 11021 11022 if (tg3_flag(tp, USE_LINKCHG_REG)) { 11023 u32 mac_stat; 11024 int phy_event; 11025 11026 mac_stat = tr32(MAC_STATUS); 11027 11028 phy_event = 0; 11029 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { 11030 if (mac_stat & MAC_STATUS_MI_INTERRUPT) 11031 phy_event = 1; 11032 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) 11033 phy_event = 1; 11034 11035 if (phy_event) 11036 tg3_setup_phy(tp, false); 11037 } else if (tg3_flag(tp, POLL_SERDES)) { 11038 u32 mac_stat = tr32(MAC_STATUS); 11039 int need_setup = 0; 11040 11041 if (tp->link_up && 11042 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { 11043 need_setup = 1; 11044 } 11045 if (!tp->link_up && 11046 (mac_stat & (MAC_STATUS_PCS_SYNCED | 11047 MAC_STATUS_SIGNAL_DET))) { 11048 need_setup = 1; 11049 } 11050 if (need_setup) { 11051 if (!tp->serdes_counter) { 11052 tw32_f(MAC_MODE, 11053 (tp->mac_mode & 11054 ~MAC_MODE_PORT_MODE_MASK)); 11055 udelay(40); 11056 tw32_f(MAC_MODE, tp->mac_mode); 11057 udelay(40); 11058 } 11059 tg3_setup_phy(tp, false); 11060 } 11061 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 11062 tg3_flag(tp, 5780_CLASS)) { 11063 tg3_serdes_parallel_detect(tp); 11064 } else if (tg3_flag(tp, POLL_CPMU_LINK)) { 11065 u32 cpmu = tr32(TG3_CPMU_STATUS); 11066 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) == 11067 TG3_CPMU_STATUS_LINK_MASK); 11068 11069 if (link_up != tp->link_up) 11070 tg3_setup_phy(tp, false); 11071 } 11072 11073 tp->timer_counter = tp->timer_multiplier; 11074 } 11075 11076 /* Heartbeat is only sent once every 2 seconds. 11077 * 11078 * The heartbeat is to tell the ASF firmware that the host 11079 * driver is still alive. In the event that the OS crashes, 11080 * ASF needs to reset the hardware to free up the FIFO space 11081 * that may be filled with rx packets destined for the host. 11082 * If the FIFO is full, ASF will no longer function properly. 11083 * 11084 * Unintended resets have been reported on real time kernels 11085 * where the timer doesn't run on time. Netpoll will also have 11086 * same problem. 11087 * 11088 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware 11089 * to check the ring condition when the heartbeat is expiring 11090 * before doing the reset. This will prevent most unintended 11091 * resets. 11092 */ 11093 if (!--tp->asf_counter) { 11094 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 11095 tg3_wait_for_event_ack(tp); 11096 11097 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, 11098 FWCMD_NICDRV_ALIVE3); 11099 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 11100 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 11101 TG3_FW_UPDATE_TIMEOUT_SEC); 11102 11103 tg3_generate_fw_event(tp); 11104 } 11105 tp->asf_counter = tp->asf_multiplier; 11106 } 11107 11108 /* Update the APE heartbeat every 5 seconds.*/ 11109 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL); 11110 11111 spin_unlock(&tp->lock); 11112 11113 restart_timer: 11114 tp->timer.expires = jiffies + tp->timer_offset; 11115 add_timer(&tp->timer); 11116 } 11117 11118 static void tg3_timer_init(struct tg3 *tp) 11119 { 11120 if (tg3_flag(tp, TAGGED_STATUS) && 11121 tg3_asic_rev(tp) != ASIC_REV_5717 && 11122 !tg3_flag(tp, 57765_CLASS)) 11123 tp->timer_offset = HZ; 11124 else 11125 tp->timer_offset = HZ / 10; 11126 11127 BUG_ON(tp->timer_offset > HZ); 11128 11129 tp->timer_multiplier = (HZ / tp->timer_offset); 11130 tp->asf_multiplier = (HZ / tp->timer_offset) * 11131 TG3_FW_UPDATE_FREQ_SEC; 11132 11133 timer_setup(&tp->timer, tg3_timer, 0); 11134 } 11135 11136 static void tg3_timer_start(struct tg3 *tp) 11137 { 11138 tp->asf_counter = tp->asf_multiplier; 11139 tp->timer_counter = tp->timer_multiplier; 11140 11141 tp->timer.expires = jiffies + tp->timer_offset; 11142 add_timer(&tp->timer); 11143 } 11144 11145 static void tg3_timer_stop(struct tg3 *tp) 11146 { 11147 del_timer_sync(&tp->timer); 11148 } 11149 11150 /* Restart hardware after configuration changes, self-test, etc. 11151 * Invoked with tp->lock held. 11152 */ 11153 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy) 11154 __releases(tp->lock) 11155 __acquires(tp->lock) 11156 { 11157 int err; 11158 11159 err = tg3_init_hw(tp, reset_phy); 11160 if (err) { 11161 netdev_err(tp->dev, 11162 "Failed to re-initialize device, aborting\n"); 11163 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11164 tg3_full_unlock(tp); 11165 tg3_timer_stop(tp); 11166 tp->irq_sync = 0; 11167 tg3_napi_enable(tp); 11168 dev_close(tp->dev); 11169 tg3_full_lock(tp, 0); 11170 } 11171 return err; 11172 } 11173 11174 static void tg3_reset_task(struct work_struct *work) 11175 { 11176 struct tg3 *tp = container_of(work, struct tg3, reset_task); 11177 int err; 11178 11179 rtnl_lock(); 11180 tg3_full_lock(tp, 0); 11181 11182 if (tp->pcierr_recovery || !netif_running(tp->dev)) { 11183 tg3_flag_clear(tp, RESET_TASK_PENDING); 11184 tg3_full_unlock(tp); 11185 rtnl_unlock(); 11186 return; 11187 } 11188 11189 tg3_full_unlock(tp); 11190 11191 tg3_phy_stop(tp); 11192 11193 tg3_netif_stop(tp); 11194 11195 tg3_full_lock(tp, 1); 11196 11197 if (tg3_flag(tp, TX_RECOVERY_PENDING)) { 11198 tp->write32_tx_mbox = tg3_write32_tx_mbox; 11199 tp->write32_rx_mbox = tg3_write_flush_reg32; 11200 tg3_flag_set(tp, MBOX_WRITE_REORDER); 11201 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 11202 } 11203 11204 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 11205 err = tg3_init_hw(tp, true); 11206 if (err) { 11207 tg3_full_unlock(tp); 11208 tp->irq_sync = 0; 11209 tg3_napi_enable(tp); 11210 /* Clear this flag so that tg3_reset_task_cancel() will not 11211 * call cancel_work_sync() and wait forever. 11212 */ 11213 tg3_flag_clear(tp, RESET_TASK_PENDING); 11214 dev_close(tp->dev); 11215 goto out; 11216 } 11217 11218 tg3_netif_start(tp); 11219 tg3_full_unlock(tp); 11220 tg3_phy_start(tp); 11221 tg3_flag_clear(tp, RESET_TASK_PENDING); 11222 out: 11223 rtnl_unlock(); 11224 } 11225 11226 static int tg3_request_irq(struct tg3 *tp, int irq_num) 11227 { 11228 irq_handler_t fn; 11229 unsigned long flags; 11230 char *name; 11231 struct tg3_napi *tnapi = &tp->napi[irq_num]; 11232 11233 if (tp->irq_cnt == 1) 11234 name = tp->dev->name; 11235 else { 11236 name = &tnapi->irq_lbl[0]; 11237 if (tnapi->tx_buffers && tnapi->rx_rcb) 11238 snprintf(name, IFNAMSIZ, 11239 "%s-txrx-%d", tp->dev->name, irq_num); 11240 else if (tnapi->tx_buffers) 11241 snprintf(name, IFNAMSIZ, 11242 "%s-tx-%d", tp->dev->name, irq_num); 11243 else if (tnapi->rx_rcb) 11244 snprintf(name, IFNAMSIZ, 11245 "%s-rx-%d", tp->dev->name, irq_num); 11246 else 11247 snprintf(name, IFNAMSIZ, 11248 "%s-%d", tp->dev->name, irq_num); 11249 name[IFNAMSIZ-1] = 0; 11250 } 11251 11252 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11253 fn = tg3_msi; 11254 if (tg3_flag(tp, 1SHOT_MSI)) 11255 fn = tg3_msi_1shot; 11256 flags = 0; 11257 } else { 11258 fn = tg3_interrupt; 11259 if (tg3_flag(tp, TAGGED_STATUS)) 11260 fn = tg3_interrupt_tagged; 11261 flags = IRQF_SHARED; 11262 } 11263 11264 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); 11265 } 11266 11267 static int tg3_test_interrupt(struct tg3 *tp) 11268 { 11269 struct tg3_napi *tnapi = &tp->napi[0]; 11270 struct net_device *dev = tp->dev; 11271 int err, i, intr_ok = 0; 11272 u32 val; 11273 11274 if (!netif_running(dev)) 11275 return -ENODEV; 11276 11277 tg3_disable_ints(tp); 11278 11279 free_irq(tnapi->irq_vec, tnapi); 11280 11281 /* 11282 * Turn off MSI one shot mode. Otherwise this test has no 11283 * observable way to know whether the interrupt was delivered. 11284 */ 11285 if (tg3_flag(tp, 57765_PLUS)) { 11286 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; 11287 tw32(MSGINT_MODE, val); 11288 } 11289 11290 err = request_irq(tnapi->irq_vec, tg3_test_isr, 11291 IRQF_SHARED, dev->name, tnapi); 11292 if (err) 11293 return err; 11294 11295 tnapi->hw_status->status &= ~SD_STATUS_UPDATED; 11296 tg3_enable_ints(tp); 11297 11298 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 11299 tnapi->coal_now); 11300 11301 for (i = 0; i < 5; i++) { 11302 u32 int_mbox, misc_host_ctrl; 11303 11304 int_mbox = tr32_mailbox(tnapi->int_mbox); 11305 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 11306 11307 if ((int_mbox != 0) || 11308 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { 11309 intr_ok = 1; 11310 break; 11311 } 11312 11313 if (tg3_flag(tp, 57765_PLUS) && 11314 tnapi->hw_status->status_tag != tnapi->last_tag) 11315 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 11316 11317 msleep(10); 11318 } 11319 11320 tg3_disable_ints(tp); 11321 11322 free_irq(tnapi->irq_vec, tnapi); 11323 11324 err = tg3_request_irq(tp, 0); 11325 11326 if (err) 11327 return err; 11328 11329 if (intr_ok) { 11330 /* Reenable MSI one shot mode. */ 11331 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) { 11332 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; 11333 tw32(MSGINT_MODE, val); 11334 } 11335 return 0; 11336 } 11337 11338 return -EIO; 11339 } 11340 11341 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is 11342 * successfully restored 11343 */ 11344 static int tg3_test_msi(struct tg3 *tp) 11345 { 11346 int err; 11347 u16 pci_cmd; 11348 11349 if (!tg3_flag(tp, USING_MSI)) 11350 return 0; 11351 11352 /* Turn off SERR reporting in case MSI terminates with Master 11353 * Abort. 11354 */ 11355 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 11356 pci_write_config_word(tp->pdev, PCI_COMMAND, 11357 pci_cmd & ~PCI_COMMAND_SERR); 11358 11359 err = tg3_test_interrupt(tp); 11360 11361 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 11362 11363 if (!err) 11364 return 0; 11365 11366 /* other failures */ 11367 if (err != -EIO) 11368 return err; 11369 11370 /* MSI test failed, go back to INTx mode */ 11371 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " 11372 "to INTx mode. Please report this failure to the PCI " 11373 "maintainer and include system chipset information\n"); 11374 11375 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11376 11377 pci_disable_msi(tp->pdev); 11378 11379 tg3_flag_clear(tp, USING_MSI); 11380 tp->napi[0].irq_vec = tp->pdev->irq; 11381 11382 err = tg3_request_irq(tp, 0); 11383 if (err) 11384 return err; 11385 11386 /* Need to reset the chip because the MSI cycle may have terminated 11387 * with Master Abort. 11388 */ 11389 tg3_full_lock(tp, 1); 11390 11391 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11392 err = tg3_init_hw(tp, true); 11393 11394 tg3_full_unlock(tp); 11395 11396 if (err) 11397 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11398 11399 return err; 11400 } 11401 11402 static int tg3_request_firmware(struct tg3 *tp) 11403 { 11404 const struct tg3_firmware_hdr *fw_hdr; 11405 11406 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { 11407 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", 11408 tp->fw_needed); 11409 return -ENOENT; 11410 } 11411 11412 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 11413 11414 /* Firmware blob starts with version numbers, followed by 11415 * start address and _full_ length including BSS sections 11416 * (which must be longer than the actual data, of course 11417 */ 11418 11419 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */ 11420 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) { 11421 netdev_err(tp->dev, "bogus length %d in \"%s\"\n", 11422 tp->fw_len, tp->fw_needed); 11423 release_firmware(tp->fw); 11424 tp->fw = NULL; 11425 return -EINVAL; 11426 } 11427 11428 /* We no longer need firmware; we have it. */ 11429 tp->fw_needed = NULL; 11430 return 0; 11431 } 11432 11433 static u32 tg3_irq_count(struct tg3 *tp) 11434 { 11435 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt); 11436 11437 if (irq_cnt > 1) { 11438 /* We want as many rx rings enabled as there are cpus. 11439 * In multiqueue MSI-X mode, the first MSI-X vector 11440 * only deals with link interrupts, etc, so we add 11441 * one to the number of vectors we are requesting. 11442 */ 11443 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max); 11444 } 11445 11446 return irq_cnt; 11447 } 11448 11449 static bool tg3_enable_msix(struct tg3 *tp) 11450 { 11451 int i, rc; 11452 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS]; 11453 11454 tp->txq_cnt = tp->txq_req; 11455 tp->rxq_cnt = tp->rxq_req; 11456 if (!tp->rxq_cnt) 11457 tp->rxq_cnt = netif_get_num_default_rss_queues(); 11458 if (tp->rxq_cnt > tp->rxq_max) 11459 tp->rxq_cnt = tp->rxq_max; 11460 11461 /* Disable multiple TX rings by default. Simple round-robin hardware 11462 * scheduling of the TX rings can cause starvation of rings with 11463 * small packets when other rings have TSO or jumbo packets. 11464 */ 11465 if (!tp->txq_req) 11466 tp->txq_cnt = 1; 11467 11468 tp->irq_cnt = tg3_irq_count(tp); 11469 11470 for (i = 0; i < tp->irq_max; i++) { 11471 msix_ent[i].entry = i; 11472 msix_ent[i].vector = 0; 11473 } 11474 11475 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt); 11476 if (rc < 0) { 11477 return false; 11478 } else if (rc < tp->irq_cnt) { 11479 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", 11480 tp->irq_cnt, rc); 11481 tp->irq_cnt = rc; 11482 tp->rxq_cnt = max(rc - 1, 1); 11483 if (tp->txq_cnt) 11484 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max); 11485 } 11486 11487 for (i = 0; i < tp->irq_max; i++) 11488 tp->napi[i].irq_vec = msix_ent[i].vector; 11489 11490 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) { 11491 pci_disable_msix(tp->pdev); 11492 return false; 11493 } 11494 11495 if (tp->irq_cnt == 1) 11496 return true; 11497 11498 tg3_flag_set(tp, ENABLE_RSS); 11499 11500 if (tp->txq_cnt > 1) 11501 tg3_flag_set(tp, ENABLE_TSS); 11502 11503 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt); 11504 11505 return true; 11506 } 11507 11508 static void tg3_ints_init(struct tg3 *tp) 11509 { 11510 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && 11511 !tg3_flag(tp, TAGGED_STATUS)) { 11512 /* All MSI supporting chips should support tagged 11513 * status. Assert that this is the case. 11514 */ 11515 netdev_warn(tp->dev, 11516 "MSI without TAGGED_STATUS? Not using MSI\n"); 11517 goto defcfg; 11518 } 11519 11520 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) 11521 tg3_flag_set(tp, USING_MSIX); 11522 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) 11523 tg3_flag_set(tp, USING_MSI); 11524 11525 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11526 u32 msi_mode = tr32(MSGINT_MODE); 11527 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) 11528 msi_mode |= MSGINT_MODE_MULTIVEC_EN; 11529 if (!tg3_flag(tp, 1SHOT_MSI)) 11530 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE; 11531 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); 11532 } 11533 defcfg: 11534 if (!tg3_flag(tp, USING_MSIX)) { 11535 tp->irq_cnt = 1; 11536 tp->napi[0].irq_vec = tp->pdev->irq; 11537 } 11538 11539 if (tp->irq_cnt == 1) { 11540 tp->txq_cnt = 1; 11541 tp->rxq_cnt = 1; 11542 netif_set_real_num_tx_queues(tp->dev, 1); 11543 netif_set_real_num_rx_queues(tp->dev, 1); 11544 } 11545 } 11546 11547 static void tg3_ints_fini(struct tg3 *tp) 11548 { 11549 if (tg3_flag(tp, USING_MSIX)) 11550 pci_disable_msix(tp->pdev); 11551 else if (tg3_flag(tp, USING_MSI)) 11552 pci_disable_msi(tp->pdev); 11553 tg3_flag_clear(tp, USING_MSI); 11554 tg3_flag_clear(tp, USING_MSIX); 11555 tg3_flag_clear(tp, ENABLE_RSS); 11556 tg3_flag_clear(tp, ENABLE_TSS); 11557 } 11558 11559 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, 11560 bool init) 11561 { 11562 struct net_device *dev = tp->dev; 11563 int i, err; 11564 11565 /* 11566 * Setup interrupts first so we know how 11567 * many NAPI resources to allocate 11568 */ 11569 tg3_ints_init(tp); 11570 11571 tg3_rss_check_indir_tbl(tp); 11572 11573 /* The placement of this call is tied 11574 * to the setup and use of Host TX descriptors. 11575 */ 11576 err = tg3_alloc_consistent(tp); 11577 if (err) 11578 goto out_ints_fini; 11579 11580 tg3_napi_init(tp); 11581 11582 tg3_napi_enable(tp); 11583 11584 for (i = 0; i < tp->irq_cnt; i++) { 11585 err = tg3_request_irq(tp, i); 11586 if (err) { 11587 for (i--; i >= 0; i--) { 11588 struct tg3_napi *tnapi = &tp->napi[i]; 11589 11590 free_irq(tnapi->irq_vec, tnapi); 11591 } 11592 goto out_napi_fini; 11593 } 11594 } 11595 11596 tg3_full_lock(tp, 0); 11597 11598 if (init) 11599 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 11600 11601 err = tg3_init_hw(tp, reset_phy); 11602 if (err) { 11603 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11604 tg3_free_rings(tp); 11605 } 11606 11607 tg3_full_unlock(tp); 11608 11609 if (err) 11610 goto out_free_irq; 11611 11612 if (test_irq && tg3_flag(tp, USING_MSI)) { 11613 err = tg3_test_msi(tp); 11614 11615 if (err) { 11616 tg3_full_lock(tp, 0); 11617 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11618 tg3_free_rings(tp); 11619 tg3_full_unlock(tp); 11620 11621 goto out_napi_fini; 11622 } 11623 11624 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { 11625 u32 val = tr32(PCIE_TRANSACTION_CFG); 11626 11627 tw32(PCIE_TRANSACTION_CFG, 11628 val | PCIE_TRANS_CFG_1SHOT_MSI); 11629 } 11630 } 11631 11632 tg3_phy_start(tp); 11633 11634 tg3_hwmon_open(tp); 11635 11636 tg3_full_lock(tp, 0); 11637 11638 tg3_timer_start(tp); 11639 tg3_flag_set(tp, INIT_COMPLETE); 11640 tg3_enable_ints(tp); 11641 11642 tg3_ptp_resume(tp); 11643 11644 tg3_full_unlock(tp); 11645 11646 netif_tx_start_all_queues(dev); 11647 11648 /* 11649 * Reset loopback feature if it was turned on while the device was down 11650 * make sure that it's installed properly now. 11651 */ 11652 if (dev->features & NETIF_F_LOOPBACK) 11653 tg3_set_loopback(dev, dev->features); 11654 11655 return 0; 11656 11657 out_free_irq: 11658 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11659 struct tg3_napi *tnapi = &tp->napi[i]; 11660 free_irq(tnapi->irq_vec, tnapi); 11661 } 11662 11663 out_napi_fini: 11664 tg3_napi_disable(tp); 11665 tg3_napi_fini(tp); 11666 tg3_free_consistent(tp); 11667 11668 out_ints_fini: 11669 tg3_ints_fini(tp); 11670 11671 return err; 11672 } 11673 11674 static void tg3_stop(struct tg3 *tp) 11675 { 11676 int i; 11677 11678 tg3_reset_task_cancel(tp); 11679 tg3_netif_stop(tp); 11680 11681 tg3_timer_stop(tp); 11682 11683 tg3_hwmon_close(tp); 11684 11685 tg3_phy_stop(tp); 11686 11687 tg3_full_lock(tp, 1); 11688 11689 tg3_disable_ints(tp); 11690 11691 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11692 tg3_free_rings(tp); 11693 tg3_flag_clear(tp, INIT_COMPLETE); 11694 11695 tg3_full_unlock(tp); 11696 11697 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11698 struct tg3_napi *tnapi = &tp->napi[i]; 11699 free_irq(tnapi->irq_vec, tnapi); 11700 } 11701 11702 tg3_ints_fini(tp); 11703 11704 tg3_napi_fini(tp); 11705 11706 tg3_free_consistent(tp); 11707 } 11708 11709 static int tg3_open(struct net_device *dev) 11710 { 11711 struct tg3 *tp = netdev_priv(dev); 11712 int err; 11713 11714 if (tp->pcierr_recovery) { 11715 netdev_err(dev, "Failed to open device. PCI error recovery " 11716 "in progress\n"); 11717 return -EAGAIN; 11718 } 11719 11720 if (tp->fw_needed) { 11721 err = tg3_request_firmware(tp); 11722 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 11723 if (err) { 11724 netdev_warn(tp->dev, "EEE capability disabled\n"); 11725 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 11726 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 11727 netdev_warn(tp->dev, "EEE capability restored\n"); 11728 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 11729 } 11730 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 11731 if (err) 11732 return err; 11733 } else if (err) { 11734 netdev_warn(tp->dev, "TSO capability disabled\n"); 11735 tg3_flag_clear(tp, TSO_CAPABLE); 11736 } else if (!tg3_flag(tp, TSO_CAPABLE)) { 11737 netdev_notice(tp->dev, "TSO capability restored\n"); 11738 tg3_flag_set(tp, TSO_CAPABLE); 11739 } 11740 } 11741 11742 tg3_carrier_off(tp); 11743 11744 err = tg3_power_up(tp); 11745 if (err) 11746 return err; 11747 11748 tg3_full_lock(tp, 0); 11749 11750 tg3_disable_ints(tp); 11751 tg3_flag_clear(tp, INIT_COMPLETE); 11752 11753 tg3_full_unlock(tp); 11754 11755 err = tg3_start(tp, 11756 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN), 11757 true, true); 11758 if (err) { 11759 tg3_frob_aux_power(tp, false); 11760 pci_set_power_state(tp->pdev, PCI_D3hot); 11761 } 11762 11763 return err; 11764 } 11765 11766 static int tg3_close(struct net_device *dev) 11767 { 11768 struct tg3 *tp = netdev_priv(dev); 11769 11770 if (tp->pcierr_recovery) { 11771 netdev_err(dev, "Failed to close device. PCI error recovery " 11772 "in progress\n"); 11773 return -EAGAIN; 11774 } 11775 11776 tg3_stop(tp); 11777 11778 if (pci_device_is_present(tp->pdev)) { 11779 tg3_power_down_prepare(tp); 11780 11781 tg3_carrier_off(tp); 11782 } 11783 return 0; 11784 } 11785 11786 static inline u64 get_stat64(tg3_stat64_t *val) 11787 { 11788 return ((u64)val->high << 32) | ((u64)val->low); 11789 } 11790 11791 static u64 tg3_calc_crc_errors(struct tg3 *tp) 11792 { 11793 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11794 11795 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 11796 (tg3_asic_rev(tp) == ASIC_REV_5700 || 11797 tg3_asic_rev(tp) == ASIC_REV_5701)) { 11798 u32 val; 11799 11800 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { 11801 tg3_writephy(tp, MII_TG3_TEST1, 11802 val | MII_TG3_TEST1_CRC_EN); 11803 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); 11804 } else 11805 val = 0; 11806 11807 tp->phy_crc_errors += val; 11808 11809 return tp->phy_crc_errors; 11810 } 11811 11812 return get_stat64(&hw_stats->rx_fcs_errors); 11813 } 11814 11815 #define ESTAT_ADD(member) \ 11816 estats->member = old_estats->member + \ 11817 get_stat64(&hw_stats->member) 11818 11819 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats) 11820 { 11821 struct tg3_ethtool_stats *old_estats = &tp->estats_prev; 11822 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11823 11824 ESTAT_ADD(rx_octets); 11825 ESTAT_ADD(rx_fragments); 11826 ESTAT_ADD(rx_ucast_packets); 11827 ESTAT_ADD(rx_mcast_packets); 11828 ESTAT_ADD(rx_bcast_packets); 11829 ESTAT_ADD(rx_fcs_errors); 11830 ESTAT_ADD(rx_align_errors); 11831 ESTAT_ADD(rx_xon_pause_rcvd); 11832 ESTAT_ADD(rx_xoff_pause_rcvd); 11833 ESTAT_ADD(rx_mac_ctrl_rcvd); 11834 ESTAT_ADD(rx_xoff_entered); 11835 ESTAT_ADD(rx_frame_too_long_errors); 11836 ESTAT_ADD(rx_jabbers); 11837 ESTAT_ADD(rx_undersize_packets); 11838 ESTAT_ADD(rx_in_length_errors); 11839 ESTAT_ADD(rx_out_length_errors); 11840 ESTAT_ADD(rx_64_or_less_octet_packets); 11841 ESTAT_ADD(rx_65_to_127_octet_packets); 11842 ESTAT_ADD(rx_128_to_255_octet_packets); 11843 ESTAT_ADD(rx_256_to_511_octet_packets); 11844 ESTAT_ADD(rx_512_to_1023_octet_packets); 11845 ESTAT_ADD(rx_1024_to_1522_octet_packets); 11846 ESTAT_ADD(rx_1523_to_2047_octet_packets); 11847 ESTAT_ADD(rx_2048_to_4095_octet_packets); 11848 ESTAT_ADD(rx_4096_to_8191_octet_packets); 11849 ESTAT_ADD(rx_8192_to_9022_octet_packets); 11850 11851 ESTAT_ADD(tx_octets); 11852 ESTAT_ADD(tx_collisions); 11853 ESTAT_ADD(tx_xon_sent); 11854 ESTAT_ADD(tx_xoff_sent); 11855 ESTAT_ADD(tx_flow_control); 11856 ESTAT_ADD(tx_mac_errors); 11857 ESTAT_ADD(tx_single_collisions); 11858 ESTAT_ADD(tx_mult_collisions); 11859 ESTAT_ADD(tx_deferred); 11860 ESTAT_ADD(tx_excessive_collisions); 11861 ESTAT_ADD(tx_late_collisions); 11862 ESTAT_ADD(tx_collide_2times); 11863 ESTAT_ADD(tx_collide_3times); 11864 ESTAT_ADD(tx_collide_4times); 11865 ESTAT_ADD(tx_collide_5times); 11866 ESTAT_ADD(tx_collide_6times); 11867 ESTAT_ADD(tx_collide_7times); 11868 ESTAT_ADD(tx_collide_8times); 11869 ESTAT_ADD(tx_collide_9times); 11870 ESTAT_ADD(tx_collide_10times); 11871 ESTAT_ADD(tx_collide_11times); 11872 ESTAT_ADD(tx_collide_12times); 11873 ESTAT_ADD(tx_collide_13times); 11874 ESTAT_ADD(tx_collide_14times); 11875 ESTAT_ADD(tx_collide_15times); 11876 ESTAT_ADD(tx_ucast_packets); 11877 ESTAT_ADD(tx_mcast_packets); 11878 ESTAT_ADD(tx_bcast_packets); 11879 ESTAT_ADD(tx_carrier_sense_errors); 11880 ESTAT_ADD(tx_discards); 11881 ESTAT_ADD(tx_errors); 11882 11883 ESTAT_ADD(dma_writeq_full); 11884 ESTAT_ADD(dma_write_prioq_full); 11885 ESTAT_ADD(rxbds_empty); 11886 ESTAT_ADD(rx_discards); 11887 ESTAT_ADD(rx_errors); 11888 ESTAT_ADD(rx_threshold_hit); 11889 11890 ESTAT_ADD(dma_readq_full); 11891 ESTAT_ADD(dma_read_prioq_full); 11892 ESTAT_ADD(tx_comp_queue_full); 11893 11894 ESTAT_ADD(ring_set_send_prod_index); 11895 ESTAT_ADD(ring_status_update); 11896 ESTAT_ADD(nic_irqs); 11897 ESTAT_ADD(nic_avoided_irqs); 11898 ESTAT_ADD(nic_tx_threshold_hit); 11899 11900 ESTAT_ADD(mbuf_lwm_thresh_hit); 11901 } 11902 11903 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) 11904 { 11905 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; 11906 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11907 unsigned long rx_dropped; 11908 unsigned long tx_dropped; 11909 int i; 11910 11911 stats->rx_packets = old_stats->rx_packets + 11912 get_stat64(&hw_stats->rx_ucast_packets) + 11913 get_stat64(&hw_stats->rx_mcast_packets) + 11914 get_stat64(&hw_stats->rx_bcast_packets); 11915 11916 stats->tx_packets = old_stats->tx_packets + 11917 get_stat64(&hw_stats->tx_ucast_packets) + 11918 get_stat64(&hw_stats->tx_mcast_packets) + 11919 get_stat64(&hw_stats->tx_bcast_packets); 11920 11921 stats->rx_bytes = old_stats->rx_bytes + 11922 get_stat64(&hw_stats->rx_octets); 11923 stats->tx_bytes = old_stats->tx_bytes + 11924 get_stat64(&hw_stats->tx_octets); 11925 11926 stats->rx_errors = old_stats->rx_errors + 11927 get_stat64(&hw_stats->rx_errors); 11928 stats->tx_errors = old_stats->tx_errors + 11929 get_stat64(&hw_stats->tx_errors) + 11930 get_stat64(&hw_stats->tx_mac_errors) + 11931 get_stat64(&hw_stats->tx_carrier_sense_errors) + 11932 get_stat64(&hw_stats->tx_discards); 11933 11934 stats->multicast = old_stats->multicast + 11935 get_stat64(&hw_stats->rx_mcast_packets); 11936 stats->collisions = old_stats->collisions + 11937 get_stat64(&hw_stats->tx_collisions); 11938 11939 stats->rx_length_errors = old_stats->rx_length_errors + 11940 get_stat64(&hw_stats->rx_frame_too_long_errors) + 11941 get_stat64(&hw_stats->rx_undersize_packets); 11942 11943 stats->rx_frame_errors = old_stats->rx_frame_errors + 11944 get_stat64(&hw_stats->rx_align_errors); 11945 stats->tx_aborted_errors = old_stats->tx_aborted_errors + 11946 get_stat64(&hw_stats->tx_discards); 11947 stats->tx_carrier_errors = old_stats->tx_carrier_errors + 11948 get_stat64(&hw_stats->tx_carrier_sense_errors); 11949 11950 stats->rx_crc_errors = old_stats->rx_crc_errors + 11951 tg3_calc_crc_errors(tp); 11952 11953 stats->rx_missed_errors = old_stats->rx_missed_errors + 11954 get_stat64(&hw_stats->rx_discards); 11955 11956 /* Aggregate per-queue counters. The per-queue counters are updated 11957 * by a single writer, race-free. The result computed by this loop 11958 * might not be 100% accurate (counters can be updated in the middle of 11959 * the loop) but the next tg3_get_nstats() will recompute the current 11960 * value so it is acceptable. 11961 * 11962 * Note that these counters wrap around at 4G on 32bit machines. 11963 */ 11964 rx_dropped = (unsigned long)(old_stats->rx_dropped); 11965 tx_dropped = (unsigned long)(old_stats->tx_dropped); 11966 11967 for (i = 0; i < tp->irq_cnt; i++) { 11968 struct tg3_napi *tnapi = &tp->napi[i]; 11969 11970 rx_dropped += tnapi->rx_dropped; 11971 tx_dropped += tnapi->tx_dropped; 11972 } 11973 11974 stats->rx_dropped = rx_dropped; 11975 stats->tx_dropped = tx_dropped; 11976 } 11977 11978 static int tg3_get_regs_len(struct net_device *dev) 11979 { 11980 return TG3_REG_BLK_SIZE; 11981 } 11982 11983 static void tg3_get_regs(struct net_device *dev, 11984 struct ethtool_regs *regs, void *_p) 11985 { 11986 struct tg3 *tp = netdev_priv(dev); 11987 11988 regs->version = 0; 11989 11990 memset(_p, 0, TG3_REG_BLK_SIZE); 11991 11992 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11993 return; 11994 11995 tg3_full_lock(tp, 0); 11996 11997 tg3_dump_legacy_regs(tp, (u32 *)_p); 11998 11999 tg3_full_unlock(tp); 12000 } 12001 12002 static int tg3_get_eeprom_len(struct net_device *dev) 12003 { 12004 struct tg3 *tp = netdev_priv(dev); 12005 12006 return tp->nvram_size; 12007 } 12008 12009 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 12010 { 12011 struct tg3 *tp = netdev_priv(dev); 12012 int ret, cpmu_restore = 0; 12013 u8 *pd; 12014 u32 i, offset, len, b_offset, b_count, cpmu_val = 0; 12015 __be32 val; 12016 12017 if (tg3_flag(tp, NO_NVRAM)) 12018 return -EINVAL; 12019 12020 offset = eeprom->offset; 12021 len = eeprom->len; 12022 eeprom->len = 0; 12023 12024 eeprom->magic = TG3_EEPROM_MAGIC; 12025 12026 /* Override clock, link aware and link idle modes */ 12027 if (tg3_flag(tp, CPMU_PRESENT)) { 12028 cpmu_val = tr32(TG3_CPMU_CTRL); 12029 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE | 12030 CPMU_CTRL_LINK_IDLE_MODE)) { 12031 tw32(TG3_CPMU_CTRL, cpmu_val & 12032 ~(CPMU_CTRL_LINK_AWARE_MODE | 12033 CPMU_CTRL_LINK_IDLE_MODE)); 12034 cpmu_restore = 1; 12035 } 12036 } 12037 tg3_override_clk(tp); 12038 12039 if (offset & 3) { 12040 /* adjustments to start on required 4 byte boundary */ 12041 b_offset = offset & 3; 12042 b_count = 4 - b_offset; 12043 if (b_count > len) { 12044 /* i.e. offset=1 len=2 */ 12045 b_count = len; 12046 } 12047 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); 12048 if (ret) 12049 goto eeprom_done; 12050 memcpy(data, ((char *)&val) + b_offset, b_count); 12051 len -= b_count; 12052 offset += b_count; 12053 eeprom->len += b_count; 12054 } 12055 12056 /* read bytes up to the last 4 byte boundary */ 12057 pd = &data[eeprom->len]; 12058 for (i = 0; i < (len - (len & 3)); i += 4) { 12059 ret = tg3_nvram_read_be32(tp, offset + i, &val); 12060 if (ret) { 12061 if (i) 12062 i -= 4; 12063 eeprom->len += i; 12064 goto eeprom_done; 12065 } 12066 memcpy(pd + i, &val, 4); 12067 if (need_resched()) { 12068 if (signal_pending(current)) { 12069 eeprom->len += i; 12070 ret = -EINTR; 12071 goto eeprom_done; 12072 } 12073 cond_resched(); 12074 } 12075 } 12076 eeprom->len += i; 12077 12078 if (len & 3) { 12079 /* read last bytes not ending on 4 byte boundary */ 12080 pd = &data[eeprom->len]; 12081 b_count = len & 3; 12082 b_offset = offset + len - b_count; 12083 ret = tg3_nvram_read_be32(tp, b_offset, &val); 12084 if (ret) 12085 goto eeprom_done; 12086 memcpy(pd, &val, b_count); 12087 eeprom->len += b_count; 12088 } 12089 ret = 0; 12090 12091 eeprom_done: 12092 /* Restore clock, link aware and link idle modes */ 12093 tg3_restore_clk(tp); 12094 if (cpmu_restore) 12095 tw32(TG3_CPMU_CTRL, cpmu_val); 12096 12097 return ret; 12098 } 12099 12100 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 12101 { 12102 struct tg3 *tp = netdev_priv(dev); 12103 int ret; 12104 u32 offset, len, b_offset, odd_len; 12105 u8 *buf; 12106 __be32 start = 0, end; 12107 12108 if (tg3_flag(tp, NO_NVRAM) || 12109 eeprom->magic != TG3_EEPROM_MAGIC) 12110 return -EINVAL; 12111 12112 offset = eeprom->offset; 12113 len = eeprom->len; 12114 12115 if ((b_offset = (offset & 3))) { 12116 /* adjustments to start on required 4 byte boundary */ 12117 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); 12118 if (ret) 12119 return ret; 12120 len += b_offset; 12121 offset &= ~3; 12122 if (len < 4) 12123 len = 4; 12124 } 12125 12126 odd_len = 0; 12127 if (len & 3) { 12128 /* adjustments to end on required 4 byte boundary */ 12129 odd_len = 1; 12130 len = (len + 3) & ~3; 12131 ret = tg3_nvram_read_be32(tp, offset+len-4, &end); 12132 if (ret) 12133 return ret; 12134 } 12135 12136 buf = data; 12137 if (b_offset || odd_len) { 12138 buf = kmalloc(len, GFP_KERNEL); 12139 if (!buf) 12140 return -ENOMEM; 12141 if (b_offset) 12142 memcpy(buf, &start, 4); 12143 if (odd_len) 12144 memcpy(buf+len-4, &end, 4); 12145 memcpy(buf + b_offset, data, eeprom->len); 12146 } 12147 12148 ret = tg3_nvram_write_block(tp, offset, len, buf); 12149 12150 if (buf != data) 12151 kfree(buf); 12152 12153 return ret; 12154 } 12155 12156 static int tg3_get_link_ksettings(struct net_device *dev, 12157 struct ethtool_link_ksettings *cmd) 12158 { 12159 struct tg3 *tp = netdev_priv(dev); 12160 u32 supported, advertising; 12161 12162 if (tg3_flag(tp, USE_PHYLIB)) { 12163 struct phy_device *phydev; 12164 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12165 return -EAGAIN; 12166 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12167 phy_ethtool_ksettings_get(phydev, cmd); 12168 12169 return 0; 12170 } 12171 12172 supported = (SUPPORTED_Autoneg); 12173 12174 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12175 supported |= (SUPPORTED_1000baseT_Half | 12176 SUPPORTED_1000baseT_Full); 12177 12178 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12179 supported |= (SUPPORTED_100baseT_Half | 12180 SUPPORTED_100baseT_Full | 12181 SUPPORTED_10baseT_Half | 12182 SUPPORTED_10baseT_Full | 12183 SUPPORTED_TP); 12184 cmd->base.port = PORT_TP; 12185 } else { 12186 supported |= SUPPORTED_FIBRE; 12187 cmd->base.port = PORT_FIBRE; 12188 } 12189 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 12190 supported); 12191 12192 advertising = tp->link_config.advertising; 12193 if (tg3_flag(tp, PAUSE_AUTONEG)) { 12194 if (tp->link_config.flowctrl & FLOW_CTRL_RX) { 12195 if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12196 advertising |= ADVERTISED_Pause; 12197 } else { 12198 advertising |= ADVERTISED_Pause | 12199 ADVERTISED_Asym_Pause; 12200 } 12201 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12202 advertising |= ADVERTISED_Asym_Pause; 12203 } 12204 } 12205 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 12206 advertising); 12207 12208 if (netif_running(dev) && tp->link_up) { 12209 cmd->base.speed = tp->link_config.active_speed; 12210 cmd->base.duplex = tp->link_config.active_duplex; 12211 ethtool_convert_legacy_u32_to_link_mode( 12212 cmd->link_modes.lp_advertising, 12213 tp->link_config.rmt_adv); 12214 12215 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12216 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE) 12217 cmd->base.eth_tp_mdix = ETH_TP_MDI_X; 12218 else 12219 cmd->base.eth_tp_mdix = ETH_TP_MDI; 12220 } 12221 } else { 12222 cmd->base.speed = SPEED_UNKNOWN; 12223 cmd->base.duplex = DUPLEX_UNKNOWN; 12224 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; 12225 } 12226 cmd->base.phy_address = tp->phy_addr; 12227 cmd->base.autoneg = tp->link_config.autoneg; 12228 return 0; 12229 } 12230 12231 static int tg3_set_link_ksettings(struct net_device *dev, 12232 const struct ethtool_link_ksettings *cmd) 12233 { 12234 struct tg3 *tp = netdev_priv(dev); 12235 u32 speed = cmd->base.speed; 12236 u32 advertising; 12237 12238 if (tg3_flag(tp, USE_PHYLIB)) { 12239 struct phy_device *phydev; 12240 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12241 return -EAGAIN; 12242 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12243 return phy_ethtool_ksettings_set(phydev, cmd); 12244 } 12245 12246 if (cmd->base.autoneg != AUTONEG_ENABLE && 12247 cmd->base.autoneg != AUTONEG_DISABLE) 12248 return -EINVAL; 12249 12250 if (cmd->base.autoneg == AUTONEG_DISABLE && 12251 cmd->base.duplex != DUPLEX_FULL && 12252 cmd->base.duplex != DUPLEX_HALF) 12253 return -EINVAL; 12254 12255 ethtool_convert_link_mode_to_legacy_u32(&advertising, 12256 cmd->link_modes.advertising); 12257 12258 if (cmd->base.autoneg == AUTONEG_ENABLE) { 12259 u32 mask = ADVERTISED_Autoneg | 12260 ADVERTISED_Pause | 12261 ADVERTISED_Asym_Pause; 12262 12263 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12264 mask |= ADVERTISED_1000baseT_Half | 12265 ADVERTISED_1000baseT_Full; 12266 12267 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 12268 mask |= ADVERTISED_100baseT_Half | 12269 ADVERTISED_100baseT_Full | 12270 ADVERTISED_10baseT_Half | 12271 ADVERTISED_10baseT_Full | 12272 ADVERTISED_TP; 12273 else 12274 mask |= ADVERTISED_FIBRE; 12275 12276 if (advertising & ~mask) 12277 return -EINVAL; 12278 12279 mask &= (ADVERTISED_1000baseT_Half | 12280 ADVERTISED_1000baseT_Full | 12281 ADVERTISED_100baseT_Half | 12282 ADVERTISED_100baseT_Full | 12283 ADVERTISED_10baseT_Half | 12284 ADVERTISED_10baseT_Full); 12285 12286 advertising &= mask; 12287 } else { 12288 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { 12289 if (speed != SPEED_1000) 12290 return -EINVAL; 12291 12292 if (cmd->base.duplex != DUPLEX_FULL) 12293 return -EINVAL; 12294 } else { 12295 if (speed != SPEED_100 && 12296 speed != SPEED_10) 12297 return -EINVAL; 12298 } 12299 } 12300 12301 tg3_full_lock(tp, 0); 12302 12303 tp->link_config.autoneg = cmd->base.autoneg; 12304 if (cmd->base.autoneg == AUTONEG_ENABLE) { 12305 tp->link_config.advertising = (advertising | 12306 ADVERTISED_Autoneg); 12307 tp->link_config.speed = SPEED_UNKNOWN; 12308 tp->link_config.duplex = DUPLEX_UNKNOWN; 12309 } else { 12310 tp->link_config.advertising = 0; 12311 tp->link_config.speed = speed; 12312 tp->link_config.duplex = cmd->base.duplex; 12313 } 12314 12315 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12316 12317 tg3_warn_mgmt_link_flap(tp); 12318 12319 if (netif_running(dev)) 12320 tg3_setup_phy(tp, true); 12321 12322 tg3_full_unlock(tp); 12323 12324 return 0; 12325 } 12326 12327 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 12328 { 12329 struct tg3 *tp = netdev_priv(dev); 12330 12331 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 12332 strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version)); 12333 strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info)); 12334 } 12335 12336 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12337 { 12338 struct tg3 *tp = netdev_priv(dev); 12339 12340 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) 12341 wol->supported = WAKE_MAGIC; 12342 else 12343 wol->supported = 0; 12344 wol->wolopts = 0; 12345 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) 12346 wol->wolopts = WAKE_MAGIC; 12347 memset(&wol->sopass, 0, sizeof(wol->sopass)); 12348 } 12349 12350 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12351 { 12352 struct tg3 *tp = netdev_priv(dev); 12353 struct device *dp = &tp->pdev->dev; 12354 12355 if (wol->wolopts & ~WAKE_MAGIC) 12356 return -EINVAL; 12357 if ((wol->wolopts & WAKE_MAGIC) && 12358 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) 12359 return -EINVAL; 12360 12361 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); 12362 12363 if (device_may_wakeup(dp)) 12364 tg3_flag_set(tp, WOL_ENABLE); 12365 else 12366 tg3_flag_clear(tp, WOL_ENABLE); 12367 12368 return 0; 12369 } 12370 12371 static u32 tg3_get_msglevel(struct net_device *dev) 12372 { 12373 struct tg3 *tp = netdev_priv(dev); 12374 return tp->msg_enable; 12375 } 12376 12377 static void tg3_set_msglevel(struct net_device *dev, u32 value) 12378 { 12379 struct tg3 *tp = netdev_priv(dev); 12380 tp->msg_enable = value; 12381 } 12382 12383 static int tg3_nway_reset(struct net_device *dev) 12384 { 12385 struct tg3 *tp = netdev_priv(dev); 12386 int r; 12387 12388 if (!netif_running(dev)) 12389 return -EAGAIN; 12390 12391 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 12392 return -EINVAL; 12393 12394 tg3_warn_mgmt_link_flap(tp); 12395 12396 if (tg3_flag(tp, USE_PHYLIB)) { 12397 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12398 return -EAGAIN; 12399 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 12400 } else { 12401 u32 bmcr; 12402 12403 spin_lock_bh(&tp->lock); 12404 r = -EINVAL; 12405 tg3_readphy(tp, MII_BMCR, &bmcr); 12406 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 12407 ((bmcr & BMCR_ANENABLE) || 12408 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { 12409 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | 12410 BMCR_ANENABLE); 12411 r = 0; 12412 } 12413 spin_unlock_bh(&tp->lock); 12414 } 12415 12416 return r; 12417 } 12418 12419 static void tg3_get_ringparam(struct net_device *dev, 12420 struct ethtool_ringparam *ering, 12421 struct kernel_ethtool_ringparam *kernel_ering, 12422 struct netlink_ext_ack *extack) 12423 { 12424 struct tg3 *tp = netdev_priv(dev); 12425 12426 ering->rx_max_pending = tp->rx_std_ring_mask; 12427 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12428 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; 12429 else 12430 ering->rx_jumbo_max_pending = 0; 12431 12432 ering->tx_max_pending = TG3_TX_RING_SIZE - 1; 12433 12434 ering->rx_pending = tp->rx_pending; 12435 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12436 ering->rx_jumbo_pending = tp->rx_jumbo_pending; 12437 else 12438 ering->rx_jumbo_pending = 0; 12439 12440 ering->tx_pending = tp->napi[0].tx_pending; 12441 } 12442 12443 static int tg3_set_ringparam(struct net_device *dev, 12444 struct ethtool_ringparam *ering, 12445 struct kernel_ethtool_ringparam *kernel_ering, 12446 struct netlink_ext_ack *extack) 12447 { 12448 struct tg3 *tp = netdev_priv(dev); 12449 int i, irq_sync = 0, err = 0; 12450 bool reset_phy = false; 12451 12452 if ((ering->rx_pending > tp->rx_std_ring_mask) || 12453 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || 12454 (ering->tx_pending > TG3_TX_RING_SIZE - 1) || 12455 (ering->tx_pending <= MAX_SKB_FRAGS) || 12456 (tg3_flag(tp, TSO_BUG) && 12457 (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) 12458 return -EINVAL; 12459 12460 if (netif_running(dev)) { 12461 tg3_phy_stop(tp); 12462 tg3_netif_stop(tp); 12463 irq_sync = 1; 12464 } 12465 12466 tg3_full_lock(tp, irq_sync); 12467 12468 tp->rx_pending = ering->rx_pending; 12469 12470 if (tg3_flag(tp, MAX_RXPEND_64) && 12471 tp->rx_pending > 63) 12472 tp->rx_pending = 63; 12473 12474 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12475 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 12476 12477 for (i = 0; i < tp->irq_max; i++) 12478 tp->napi[i].tx_pending = ering->tx_pending; 12479 12480 if (netif_running(dev)) { 12481 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12482 /* Reset PHY to avoid PHY lock up */ 12483 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 12484 tg3_asic_rev(tp) == ASIC_REV_5719 || 12485 tg3_asic_rev(tp) == ASIC_REV_5720) 12486 reset_phy = true; 12487 12488 err = tg3_restart_hw(tp, reset_phy); 12489 if (!err) 12490 tg3_netif_start(tp); 12491 } 12492 12493 tg3_full_unlock(tp); 12494 12495 if (irq_sync && !err) 12496 tg3_phy_start(tp); 12497 12498 return err; 12499 } 12500 12501 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12502 { 12503 struct tg3 *tp = netdev_priv(dev); 12504 12505 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); 12506 12507 if (tp->link_config.flowctrl & FLOW_CTRL_RX) 12508 epause->rx_pause = 1; 12509 else 12510 epause->rx_pause = 0; 12511 12512 if (tp->link_config.flowctrl & FLOW_CTRL_TX) 12513 epause->tx_pause = 1; 12514 else 12515 epause->tx_pause = 0; 12516 } 12517 12518 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12519 { 12520 struct tg3 *tp = netdev_priv(dev); 12521 int err = 0; 12522 bool reset_phy = false; 12523 12524 if (tp->link_config.autoneg == AUTONEG_ENABLE) 12525 tg3_warn_mgmt_link_flap(tp); 12526 12527 if (tg3_flag(tp, USE_PHYLIB)) { 12528 struct phy_device *phydev; 12529 12530 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12531 12532 if (!phy_validate_pause(phydev, epause)) 12533 return -EINVAL; 12534 12535 tp->link_config.flowctrl = 0; 12536 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); 12537 if (epause->rx_pause) { 12538 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12539 12540 if (epause->tx_pause) { 12541 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12542 } 12543 } else if (epause->tx_pause) { 12544 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12545 } 12546 12547 if (epause->autoneg) 12548 tg3_flag_set(tp, PAUSE_AUTONEG); 12549 else 12550 tg3_flag_clear(tp, PAUSE_AUTONEG); 12551 12552 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 12553 if (phydev->autoneg) { 12554 /* phy_set_asym_pause() will 12555 * renegotiate the link to inform our 12556 * link partner of our flow control 12557 * settings, even if the flow control 12558 * is forced. Let tg3_adjust_link() 12559 * do the final flow control setup. 12560 */ 12561 return 0; 12562 } 12563 12564 if (!epause->autoneg) 12565 tg3_setup_flow_control(tp, 0, 0); 12566 } 12567 } else { 12568 int irq_sync = 0; 12569 12570 if (netif_running(dev)) { 12571 tg3_netif_stop(tp); 12572 irq_sync = 1; 12573 } 12574 12575 tg3_full_lock(tp, irq_sync); 12576 12577 if (epause->autoneg) 12578 tg3_flag_set(tp, PAUSE_AUTONEG); 12579 else 12580 tg3_flag_clear(tp, PAUSE_AUTONEG); 12581 if (epause->rx_pause) 12582 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12583 else 12584 tp->link_config.flowctrl &= ~FLOW_CTRL_RX; 12585 if (epause->tx_pause) 12586 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12587 else 12588 tp->link_config.flowctrl &= ~FLOW_CTRL_TX; 12589 12590 if (netif_running(dev)) { 12591 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12592 /* Reset PHY to avoid PHY lock up */ 12593 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 12594 tg3_asic_rev(tp) == ASIC_REV_5719 || 12595 tg3_asic_rev(tp) == ASIC_REV_5720) 12596 reset_phy = true; 12597 12598 err = tg3_restart_hw(tp, reset_phy); 12599 if (!err) 12600 tg3_netif_start(tp); 12601 } 12602 12603 tg3_full_unlock(tp); 12604 } 12605 12606 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12607 12608 return err; 12609 } 12610 12611 static int tg3_get_sset_count(struct net_device *dev, int sset) 12612 { 12613 switch (sset) { 12614 case ETH_SS_TEST: 12615 return TG3_NUM_TEST; 12616 case ETH_SS_STATS: 12617 return TG3_NUM_STATS; 12618 default: 12619 return -EOPNOTSUPP; 12620 } 12621 } 12622 12623 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 12624 u32 *rules __always_unused) 12625 { 12626 struct tg3 *tp = netdev_priv(dev); 12627 12628 if (!tg3_flag(tp, SUPPORT_MSIX)) 12629 return -EOPNOTSUPP; 12630 12631 switch (info->cmd) { 12632 case ETHTOOL_GRXRINGS: 12633 if (netif_running(tp->dev)) 12634 info->data = tp->rxq_cnt; 12635 else { 12636 info->data = num_online_cpus(); 12637 if (info->data > TG3_RSS_MAX_NUM_QS) 12638 info->data = TG3_RSS_MAX_NUM_QS; 12639 } 12640 12641 return 0; 12642 12643 default: 12644 return -EOPNOTSUPP; 12645 } 12646 } 12647 12648 static u32 tg3_get_rxfh_indir_size(struct net_device *dev) 12649 { 12650 u32 size = 0; 12651 struct tg3 *tp = netdev_priv(dev); 12652 12653 if (tg3_flag(tp, SUPPORT_MSIX)) 12654 size = TG3_RSS_INDIR_TBL_SIZE; 12655 12656 return size; 12657 } 12658 12659 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) 12660 { 12661 struct tg3 *tp = netdev_priv(dev); 12662 int i; 12663 12664 if (hfunc) 12665 *hfunc = ETH_RSS_HASH_TOP; 12666 if (!indir) 12667 return 0; 12668 12669 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12670 indir[i] = tp->rss_ind_tbl[i]; 12671 12672 return 0; 12673 } 12674 12675 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, 12676 const u8 hfunc) 12677 { 12678 struct tg3 *tp = netdev_priv(dev); 12679 size_t i; 12680 12681 /* We require at least one supported parameter to be changed and no 12682 * change in any of the unsupported parameters 12683 */ 12684 if (key || 12685 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 12686 return -EOPNOTSUPP; 12687 12688 if (!indir) 12689 return 0; 12690 12691 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12692 tp->rss_ind_tbl[i] = indir[i]; 12693 12694 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS)) 12695 return 0; 12696 12697 /* It is legal to write the indirection 12698 * table while the device is running. 12699 */ 12700 tg3_full_lock(tp, 0); 12701 tg3_rss_write_indir_tbl(tp); 12702 tg3_full_unlock(tp); 12703 12704 return 0; 12705 } 12706 12707 static void tg3_get_channels(struct net_device *dev, 12708 struct ethtool_channels *channel) 12709 { 12710 struct tg3 *tp = netdev_priv(dev); 12711 u32 deflt_qs = netif_get_num_default_rss_queues(); 12712 12713 channel->max_rx = tp->rxq_max; 12714 channel->max_tx = tp->txq_max; 12715 12716 if (netif_running(dev)) { 12717 channel->rx_count = tp->rxq_cnt; 12718 channel->tx_count = tp->txq_cnt; 12719 } else { 12720 if (tp->rxq_req) 12721 channel->rx_count = tp->rxq_req; 12722 else 12723 channel->rx_count = min(deflt_qs, tp->rxq_max); 12724 12725 if (tp->txq_req) 12726 channel->tx_count = tp->txq_req; 12727 else 12728 channel->tx_count = min(deflt_qs, tp->txq_max); 12729 } 12730 } 12731 12732 static int tg3_set_channels(struct net_device *dev, 12733 struct ethtool_channels *channel) 12734 { 12735 struct tg3 *tp = netdev_priv(dev); 12736 12737 if (!tg3_flag(tp, SUPPORT_MSIX)) 12738 return -EOPNOTSUPP; 12739 12740 if (channel->rx_count > tp->rxq_max || 12741 channel->tx_count > tp->txq_max) 12742 return -EINVAL; 12743 12744 tp->rxq_req = channel->rx_count; 12745 tp->txq_req = channel->tx_count; 12746 12747 if (!netif_running(dev)) 12748 return 0; 12749 12750 tg3_stop(tp); 12751 12752 tg3_carrier_off(tp); 12753 12754 tg3_start(tp, true, false, false); 12755 12756 return 0; 12757 } 12758 12759 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 12760 { 12761 switch (stringset) { 12762 case ETH_SS_STATS: 12763 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); 12764 break; 12765 case ETH_SS_TEST: 12766 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); 12767 break; 12768 default: 12769 WARN_ON(1); /* we need a WARN() */ 12770 break; 12771 } 12772 } 12773 12774 static int tg3_set_phys_id(struct net_device *dev, 12775 enum ethtool_phys_id_state state) 12776 { 12777 struct tg3 *tp = netdev_priv(dev); 12778 12779 switch (state) { 12780 case ETHTOOL_ID_ACTIVE: 12781 return 1; /* cycle on/off once per second */ 12782 12783 case ETHTOOL_ID_ON: 12784 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12785 LED_CTRL_1000MBPS_ON | 12786 LED_CTRL_100MBPS_ON | 12787 LED_CTRL_10MBPS_ON | 12788 LED_CTRL_TRAFFIC_OVERRIDE | 12789 LED_CTRL_TRAFFIC_BLINK | 12790 LED_CTRL_TRAFFIC_LED); 12791 break; 12792 12793 case ETHTOOL_ID_OFF: 12794 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12795 LED_CTRL_TRAFFIC_OVERRIDE); 12796 break; 12797 12798 case ETHTOOL_ID_INACTIVE: 12799 tw32(MAC_LED_CTRL, tp->led_ctrl); 12800 break; 12801 } 12802 12803 return 0; 12804 } 12805 12806 static void tg3_get_ethtool_stats(struct net_device *dev, 12807 struct ethtool_stats *estats, u64 *tmp_stats) 12808 { 12809 struct tg3 *tp = netdev_priv(dev); 12810 12811 if (tp->hw_stats) 12812 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats); 12813 else 12814 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats)); 12815 } 12816 12817 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen) 12818 { 12819 int i; 12820 __be32 *buf; 12821 u32 offset = 0, len = 0; 12822 u32 magic, val; 12823 12824 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) 12825 return NULL; 12826 12827 if (magic == TG3_EEPROM_MAGIC) { 12828 for (offset = TG3_NVM_DIR_START; 12829 offset < TG3_NVM_DIR_END; 12830 offset += TG3_NVM_DIRENT_SIZE) { 12831 if (tg3_nvram_read(tp, offset, &val)) 12832 return NULL; 12833 12834 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == 12835 TG3_NVM_DIRTYPE_EXTVPD) 12836 break; 12837 } 12838 12839 if (offset != TG3_NVM_DIR_END) { 12840 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; 12841 if (tg3_nvram_read(tp, offset + 4, &offset)) 12842 return NULL; 12843 12844 offset = tg3_nvram_logical_addr(tp, offset); 12845 } 12846 12847 if (!offset || !len) { 12848 offset = TG3_NVM_VPD_OFF; 12849 len = TG3_NVM_VPD_LEN; 12850 } 12851 12852 buf = kmalloc(len, GFP_KERNEL); 12853 if (!buf) 12854 return NULL; 12855 12856 for (i = 0; i < len; i += 4) { 12857 /* The data is in little-endian format in NVRAM. 12858 * Use the big-endian read routines to preserve 12859 * the byte order as it exists in NVRAM. 12860 */ 12861 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) 12862 goto error; 12863 } 12864 *vpdlen = len; 12865 } else { 12866 buf = pci_vpd_alloc(tp->pdev, vpdlen); 12867 if (IS_ERR(buf)) 12868 return NULL; 12869 } 12870 12871 return buf; 12872 12873 error: 12874 kfree(buf); 12875 return NULL; 12876 } 12877 12878 #define NVRAM_TEST_SIZE 0x100 12879 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 12880 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 12881 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c 12882 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 12883 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 12884 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 12885 #define NVRAM_SELFBOOT_HW_SIZE 0x20 12886 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c 12887 12888 static int tg3_test_nvram(struct tg3 *tp) 12889 { 12890 u32 csum, magic; 12891 __be32 *buf; 12892 int i, j, k, err = 0, size; 12893 unsigned int len; 12894 12895 if (tg3_flag(tp, NO_NVRAM)) 12896 return 0; 12897 12898 if (tg3_nvram_read(tp, 0, &magic) != 0) 12899 return -EIO; 12900 12901 if (magic == TG3_EEPROM_MAGIC) 12902 size = NVRAM_TEST_SIZE; 12903 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { 12904 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == 12905 TG3_EEPROM_SB_FORMAT_1) { 12906 switch (magic & TG3_EEPROM_SB_REVISION_MASK) { 12907 case TG3_EEPROM_SB_REVISION_0: 12908 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; 12909 break; 12910 case TG3_EEPROM_SB_REVISION_2: 12911 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; 12912 break; 12913 case TG3_EEPROM_SB_REVISION_3: 12914 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; 12915 break; 12916 case TG3_EEPROM_SB_REVISION_4: 12917 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; 12918 break; 12919 case TG3_EEPROM_SB_REVISION_5: 12920 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; 12921 break; 12922 case TG3_EEPROM_SB_REVISION_6: 12923 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; 12924 break; 12925 default: 12926 return -EIO; 12927 } 12928 } else 12929 return 0; 12930 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 12931 size = NVRAM_SELFBOOT_HW_SIZE; 12932 else 12933 return -EIO; 12934 12935 buf = kmalloc(size, GFP_KERNEL); 12936 if (buf == NULL) 12937 return -ENOMEM; 12938 12939 err = -EIO; 12940 for (i = 0, j = 0; i < size; i += 4, j++) { 12941 err = tg3_nvram_read_be32(tp, i, &buf[j]); 12942 if (err) 12943 break; 12944 } 12945 if (i < size) 12946 goto out; 12947 12948 /* Selfboot format */ 12949 magic = be32_to_cpu(buf[0]); 12950 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == 12951 TG3_EEPROM_MAGIC_FW) { 12952 u8 *buf8 = (u8 *) buf, csum8 = 0; 12953 12954 if ((magic & TG3_EEPROM_SB_REVISION_MASK) == 12955 TG3_EEPROM_SB_REVISION_2) { 12956 /* For rev 2, the csum doesn't include the MBA. */ 12957 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) 12958 csum8 += buf8[i]; 12959 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) 12960 csum8 += buf8[i]; 12961 } else { 12962 for (i = 0; i < size; i++) 12963 csum8 += buf8[i]; 12964 } 12965 12966 if (csum8 == 0) { 12967 err = 0; 12968 goto out; 12969 } 12970 12971 err = -EIO; 12972 goto out; 12973 } 12974 12975 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == 12976 TG3_EEPROM_MAGIC_HW) { 12977 u8 data[NVRAM_SELFBOOT_DATA_SIZE]; 12978 u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; 12979 u8 *buf8 = (u8 *) buf; 12980 12981 /* Separate the parity bits and the data bytes. */ 12982 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { 12983 if ((i == 0) || (i == 8)) { 12984 int l; 12985 u8 msk; 12986 12987 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) 12988 parity[k++] = buf8[i] & msk; 12989 i++; 12990 } else if (i == 16) { 12991 int l; 12992 u8 msk; 12993 12994 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) 12995 parity[k++] = buf8[i] & msk; 12996 i++; 12997 12998 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) 12999 parity[k++] = buf8[i] & msk; 13000 i++; 13001 } 13002 data[j++] = buf8[i]; 13003 } 13004 13005 err = -EIO; 13006 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { 13007 u8 hw8 = hweight8(data[i]); 13008 13009 if ((hw8 & 0x1) && parity[i]) 13010 goto out; 13011 else if (!(hw8 & 0x1) && !parity[i]) 13012 goto out; 13013 } 13014 err = 0; 13015 goto out; 13016 } 13017 13018 err = -EIO; 13019 13020 /* Bootstrap checksum at offset 0x10 */ 13021 csum = calc_crc((unsigned char *) buf, 0x10); 13022 if (csum != le32_to_cpu(buf[0x10/4])) 13023 goto out; 13024 13025 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ 13026 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); 13027 if (csum != le32_to_cpu(buf[0xfc/4])) 13028 goto out; 13029 13030 kfree(buf); 13031 13032 buf = tg3_vpd_readblock(tp, &len); 13033 if (!buf) 13034 return -ENOMEM; 13035 13036 err = pci_vpd_check_csum(buf, len); 13037 /* go on if no checksum found */ 13038 if (err == 1) 13039 err = 0; 13040 out: 13041 kfree(buf); 13042 return err; 13043 } 13044 13045 #define TG3_SERDES_TIMEOUT_SEC 2 13046 #define TG3_COPPER_TIMEOUT_SEC 6 13047 13048 static int tg3_test_link(struct tg3 *tp) 13049 { 13050 int i, max; 13051 13052 if (!netif_running(tp->dev)) 13053 return -ENODEV; 13054 13055 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 13056 max = TG3_SERDES_TIMEOUT_SEC; 13057 else 13058 max = TG3_COPPER_TIMEOUT_SEC; 13059 13060 for (i = 0; i < max; i++) { 13061 if (tp->link_up) 13062 return 0; 13063 13064 if (msleep_interruptible(1000)) 13065 break; 13066 } 13067 13068 return -EIO; 13069 } 13070 13071 /* Only test the commonly used registers */ 13072 static int tg3_test_registers(struct tg3 *tp) 13073 { 13074 int i, is_5705, is_5750; 13075 u32 offset, read_mask, write_mask, val, save_val, read_val; 13076 static struct { 13077 u16 offset; 13078 u16 flags; 13079 #define TG3_FL_5705 0x1 13080 #define TG3_FL_NOT_5705 0x2 13081 #define TG3_FL_NOT_5788 0x4 13082 #define TG3_FL_NOT_5750 0x8 13083 u32 read_mask; 13084 u32 write_mask; 13085 } reg_tbl[] = { 13086 /* MAC Control Registers */ 13087 { MAC_MODE, TG3_FL_NOT_5705, 13088 0x00000000, 0x00ef6f8c }, 13089 { MAC_MODE, TG3_FL_5705, 13090 0x00000000, 0x01ef6b8c }, 13091 { MAC_STATUS, TG3_FL_NOT_5705, 13092 0x03800107, 0x00000000 }, 13093 { MAC_STATUS, TG3_FL_5705, 13094 0x03800100, 0x00000000 }, 13095 { MAC_ADDR_0_HIGH, 0x0000, 13096 0x00000000, 0x0000ffff }, 13097 { MAC_ADDR_0_LOW, 0x0000, 13098 0x00000000, 0xffffffff }, 13099 { MAC_RX_MTU_SIZE, 0x0000, 13100 0x00000000, 0x0000ffff }, 13101 { MAC_TX_MODE, 0x0000, 13102 0x00000000, 0x00000070 }, 13103 { MAC_TX_LENGTHS, 0x0000, 13104 0x00000000, 0x00003fff }, 13105 { MAC_RX_MODE, TG3_FL_NOT_5705, 13106 0x00000000, 0x000007fc }, 13107 { MAC_RX_MODE, TG3_FL_5705, 13108 0x00000000, 0x000007dc }, 13109 { MAC_HASH_REG_0, 0x0000, 13110 0x00000000, 0xffffffff }, 13111 { MAC_HASH_REG_1, 0x0000, 13112 0x00000000, 0xffffffff }, 13113 { MAC_HASH_REG_2, 0x0000, 13114 0x00000000, 0xffffffff }, 13115 { MAC_HASH_REG_3, 0x0000, 13116 0x00000000, 0xffffffff }, 13117 13118 /* Receive Data and Receive BD Initiator Control Registers. */ 13119 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, 13120 0x00000000, 0xffffffff }, 13121 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, 13122 0x00000000, 0xffffffff }, 13123 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, 13124 0x00000000, 0x00000003 }, 13125 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, 13126 0x00000000, 0xffffffff }, 13127 { RCVDBDI_STD_BD+0, 0x0000, 13128 0x00000000, 0xffffffff }, 13129 { RCVDBDI_STD_BD+4, 0x0000, 13130 0x00000000, 0xffffffff }, 13131 { RCVDBDI_STD_BD+8, 0x0000, 13132 0x00000000, 0xffff0002 }, 13133 { RCVDBDI_STD_BD+0xc, 0x0000, 13134 0x00000000, 0xffffffff }, 13135 13136 /* Receive BD Initiator Control Registers. */ 13137 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, 13138 0x00000000, 0xffffffff }, 13139 { RCVBDI_STD_THRESH, TG3_FL_5705, 13140 0x00000000, 0x000003ff }, 13141 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, 13142 0x00000000, 0xffffffff }, 13143 13144 /* Host Coalescing Control Registers. */ 13145 { HOSTCC_MODE, TG3_FL_NOT_5705, 13146 0x00000000, 0x00000004 }, 13147 { HOSTCC_MODE, TG3_FL_5705, 13148 0x00000000, 0x000000f6 }, 13149 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, 13150 0x00000000, 0xffffffff }, 13151 { HOSTCC_RXCOL_TICKS, TG3_FL_5705, 13152 0x00000000, 0x000003ff }, 13153 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, 13154 0x00000000, 0xffffffff }, 13155 { HOSTCC_TXCOL_TICKS, TG3_FL_5705, 13156 0x00000000, 0x000003ff }, 13157 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, 13158 0x00000000, 0xffffffff }, 13159 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13160 0x00000000, 0x000000ff }, 13161 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, 13162 0x00000000, 0xffffffff }, 13163 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13164 0x00000000, 0x000000ff }, 13165 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, 13166 0x00000000, 0xffffffff }, 13167 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, 13168 0x00000000, 0xffffffff }, 13169 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13170 0x00000000, 0xffffffff }, 13171 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13172 0x00000000, 0x000000ff }, 13173 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13174 0x00000000, 0xffffffff }, 13175 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13176 0x00000000, 0x000000ff }, 13177 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, 13178 0x00000000, 0xffffffff }, 13179 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, 13180 0x00000000, 0xffffffff }, 13181 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, 13182 0x00000000, 0xffffffff }, 13183 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, 13184 0x00000000, 0xffffffff }, 13185 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, 13186 0x00000000, 0xffffffff }, 13187 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, 13188 0xffffffff, 0x00000000 }, 13189 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, 13190 0xffffffff, 0x00000000 }, 13191 13192 /* Buffer Manager Control Registers. */ 13193 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, 13194 0x00000000, 0x007fff80 }, 13195 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, 13196 0x00000000, 0x007fffff }, 13197 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, 13198 0x00000000, 0x0000003f }, 13199 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, 13200 0x00000000, 0x000001ff }, 13201 { BUFMGR_MB_HIGH_WATER, 0x0000, 13202 0x00000000, 0x000001ff }, 13203 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, 13204 0xffffffff, 0x00000000 }, 13205 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, 13206 0xffffffff, 0x00000000 }, 13207 13208 /* Mailbox Registers */ 13209 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, 13210 0x00000000, 0x000001ff }, 13211 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, 13212 0x00000000, 0x000001ff }, 13213 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, 13214 0x00000000, 0x000007ff }, 13215 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, 13216 0x00000000, 0x000001ff }, 13217 13218 { 0xffff, 0x0000, 0x00000000, 0x00000000 }, 13219 }; 13220 13221 is_5705 = is_5750 = 0; 13222 if (tg3_flag(tp, 5705_PLUS)) { 13223 is_5705 = 1; 13224 if (tg3_flag(tp, 5750_PLUS)) 13225 is_5750 = 1; 13226 } 13227 13228 for (i = 0; reg_tbl[i].offset != 0xffff; i++) { 13229 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) 13230 continue; 13231 13232 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) 13233 continue; 13234 13235 if (tg3_flag(tp, IS_5788) && 13236 (reg_tbl[i].flags & TG3_FL_NOT_5788)) 13237 continue; 13238 13239 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) 13240 continue; 13241 13242 offset = (u32) reg_tbl[i].offset; 13243 read_mask = reg_tbl[i].read_mask; 13244 write_mask = reg_tbl[i].write_mask; 13245 13246 /* Save the original register content */ 13247 save_val = tr32(offset); 13248 13249 /* Determine the read-only value. */ 13250 read_val = save_val & read_mask; 13251 13252 /* Write zero to the register, then make sure the read-only bits 13253 * are not changed and the read/write bits are all zeros. 13254 */ 13255 tw32(offset, 0); 13256 13257 val = tr32(offset); 13258 13259 /* Test the read-only and read/write bits. */ 13260 if (((val & read_mask) != read_val) || (val & write_mask)) 13261 goto out; 13262 13263 /* Write ones to all the bits defined by RdMask and WrMask, then 13264 * make sure the read-only bits are not changed and the 13265 * read/write bits are all ones. 13266 */ 13267 tw32(offset, read_mask | write_mask); 13268 13269 val = tr32(offset); 13270 13271 /* Test the read-only bits. */ 13272 if ((val & read_mask) != read_val) 13273 goto out; 13274 13275 /* Test the read/write bits. */ 13276 if ((val & write_mask) != write_mask) 13277 goto out; 13278 13279 tw32(offset, save_val); 13280 } 13281 13282 return 0; 13283 13284 out: 13285 if (netif_msg_hw(tp)) 13286 netdev_err(tp->dev, 13287 "Register test failed at offset %x\n", offset); 13288 tw32(offset, save_val); 13289 return -EIO; 13290 } 13291 13292 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) 13293 { 13294 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; 13295 int i; 13296 u32 j; 13297 13298 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { 13299 for (j = 0; j < len; j += 4) { 13300 u32 val; 13301 13302 tg3_write_mem(tp, offset + j, test_pattern[i]); 13303 tg3_read_mem(tp, offset + j, &val); 13304 if (val != test_pattern[i]) 13305 return -EIO; 13306 } 13307 } 13308 return 0; 13309 } 13310 13311 static int tg3_test_memory(struct tg3 *tp) 13312 { 13313 static struct mem_entry { 13314 u32 offset; 13315 u32 len; 13316 } mem_tbl_570x[] = { 13317 { 0x00000000, 0x00b50}, 13318 { 0x00002000, 0x1c000}, 13319 { 0xffffffff, 0x00000} 13320 }, mem_tbl_5705[] = { 13321 { 0x00000100, 0x0000c}, 13322 { 0x00000200, 0x00008}, 13323 { 0x00004000, 0x00800}, 13324 { 0x00006000, 0x01000}, 13325 { 0x00008000, 0x02000}, 13326 { 0x00010000, 0x0e000}, 13327 { 0xffffffff, 0x00000} 13328 }, mem_tbl_5755[] = { 13329 { 0x00000200, 0x00008}, 13330 { 0x00004000, 0x00800}, 13331 { 0x00006000, 0x00800}, 13332 { 0x00008000, 0x02000}, 13333 { 0x00010000, 0x0c000}, 13334 { 0xffffffff, 0x00000} 13335 }, mem_tbl_5906[] = { 13336 { 0x00000200, 0x00008}, 13337 { 0x00004000, 0x00400}, 13338 { 0x00006000, 0x00400}, 13339 { 0x00008000, 0x01000}, 13340 { 0x00010000, 0x01000}, 13341 { 0xffffffff, 0x00000} 13342 }, mem_tbl_5717[] = { 13343 { 0x00000200, 0x00008}, 13344 { 0x00010000, 0x0a000}, 13345 { 0x00020000, 0x13c00}, 13346 { 0xffffffff, 0x00000} 13347 }, mem_tbl_57765[] = { 13348 { 0x00000200, 0x00008}, 13349 { 0x00004000, 0x00800}, 13350 { 0x00006000, 0x09800}, 13351 { 0x00010000, 0x0a000}, 13352 { 0xffffffff, 0x00000} 13353 }; 13354 struct mem_entry *mem_tbl; 13355 int err = 0; 13356 int i; 13357 13358 if (tg3_flag(tp, 5717_PLUS)) 13359 mem_tbl = mem_tbl_5717; 13360 else if (tg3_flag(tp, 57765_CLASS) || 13361 tg3_asic_rev(tp) == ASIC_REV_5762) 13362 mem_tbl = mem_tbl_57765; 13363 else if (tg3_flag(tp, 5755_PLUS)) 13364 mem_tbl = mem_tbl_5755; 13365 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 13366 mem_tbl = mem_tbl_5906; 13367 else if (tg3_flag(tp, 5705_PLUS)) 13368 mem_tbl = mem_tbl_5705; 13369 else 13370 mem_tbl = mem_tbl_570x; 13371 13372 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { 13373 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); 13374 if (err) 13375 break; 13376 } 13377 13378 return err; 13379 } 13380 13381 #define TG3_TSO_MSS 500 13382 13383 #define TG3_TSO_IP_HDR_LEN 20 13384 #define TG3_TSO_TCP_HDR_LEN 20 13385 #define TG3_TSO_TCP_OPT_LEN 12 13386 13387 static const u8 tg3_tso_header[] = { 13388 0x08, 0x00, 13389 0x45, 0x00, 0x00, 0x00, 13390 0x00, 0x00, 0x40, 0x00, 13391 0x40, 0x06, 0x00, 0x00, 13392 0x0a, 0x00, 0x00, 0x01, 13393 0x0a, 0x00, 0x00, 0x02, 13394 0x0d, 0x00, 0xe0, 0x00, 13395 0x00, 0x00, 0x01, 0x00, 13396 0x00, 0x00, 0x02, 0x00, 13397 0x80, 0x10, 0x10, 0x00, 13398 0x14, 0x09, 0x00, 0x00, 13399 0x01, 0x01, 0x08, 0x0a, 13400 0x11, 0x11, 0x11, 0x11, 13401 0x11, 0x11, 0x11, 0x11, 13402 }; 13403 13404 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) 13405 { 13406 u32 rx_start_idx, rx_idx, tx_idx, opaque_key; 13407 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; 13408 u32 budget; 13409 struct sk_buff *skb; 13410 u8 *tx_data, *rx_data; 13411 dma_addr_t map; 13412 int num_pkts, tx_len, rx_len, i, err; 13413 struct tg3_rx_buffer_desc *desc; 13414 struct tg3_napi *tnapi, *rnapi; 13415 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 13416 13417 tnapi = &tp->napi[0]; 13418 rnapi = &tp->napi[0]; 13419 if (tp->irq_cnt > 1) { 13420 if (tg3_flag(tp, ENABLE_RSS)) 13421 rnapi = &tp->napi[1]; 13422 if (tg3_flag(tp, ENABLE_TSS)) 13423 tnapi = &tp->napi[1]; 13424 } 13425 coal_now = tnapi->coal_now | rnapi->coal_now; 13426 13427 err = -EIO; 13428 13429 tx_len = pktsz; 13430 skb = netdev_alloc_skb(tp->dev, tx_len); 13431 if (!skb) 13432 return -ENOMEM; 13433 13434 tx_data = skb_put(skb, tx_len); 13435 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN); 13436 memset(tx_data + ETH_ALEN, 0x0, 8); 13437 13438 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); 13439 13440 if (tso_loopback) { 13441 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; 13442 13443 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + 13444 TG3_TSO_TCP_OPT_LEN; 13445 13446 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, 13447 sizeof(tg3_tso_header)); 13448 mss = TG3_TSO_MSS; 13449 13450 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); 13451 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); 13452 13453 /* Set the total length field in the IP header */ 13454 iph->tot_len = htons((u16)(mss + hdr_len)); 13455 13456 base_flags = (TXD_FLAG_CPU_PRE_DMA | 13457 TXD_FLAG_CPU_POST_DMA); 13458 13459 if (tg3_flag(tp, HW_TSO_1) || 13460 tg3_flag(tp, HW_TSO_2) || 13461 tg3_flag(tp, HW_TSO_3)) { 13462 struct tcphdr *th; 13463 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; 13464 th = (struct tcphdr *)&tx_data[val]; 13465 th->check = 0; 13466 } else 13467 base_flags |= TXD_FLAG_TCPUDP_CSUM; 13468 13469 if (tg3_flag(tp, HW_TSO_3)) { 13470 mss |= (hdr_len & 0xc) << 12; 13471 if (hdr_len & 0x10) 13472 base_flags |= 0x00000010; 13473 base_flags |= (hdr_len & 0x3e0) << 5; 13474 } else if (tg3_flag(tp, HW_TSO_2)) 13475 mss |= hdr_len << 9; 13476 else if (tg3_flag(tp, HW_TSO_1) || 13477 tg3_asic_rev(tp) == ASIC_REV_5705) { 13478 mss |= (TG3_TSO_TCP_OPT_LEN << 9); 13479 } else { 13480 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); 13481 } 13482 13483 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); 13484 } else { 13485 num_pkts = 1; 13486 data_off = ETH_HLEN; 13487 13488 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 13489 tx_len > VLAN_ETH_FRAME_LEN) 13490 base_flags |= TXD_FLAG_JMB_PKT; 13491 } 13492 13493 for (i = data_off; i < tx_len; i++) 13494 tx_data[i] = (u8) (i & 0xff); 13495 13496 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE); 13497 if (dma_mapping_error(&tp->pdev->dev, map)) { 13498 dev_kfree_skb(skb); 13499 return -EIO; 13500 } 13501 13502 val = tnapi->tx_prod; 13503 tnapi->tx_buffers[val].skb = skb; 13504 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); 13505 13506 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13507 rnapi->coal_now); 13508 13509 udelay(10); 13510 13511 rx_start_idx = rnapi->hw_status->idx[0].rx_producer; 13512 13513 budget = tg3_tx_avail(tnapi); 13514 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, 13515 base_flags | TXD_FLAG_END, mss, 0)) { 13516 tnapi->tx_buffers[val].skb = NULL; 13517 dev_kfree_skb(skb); 13518 return -EIO; 13519 } 13520 13521 tnapi->tx_prod++; 13522 13523 /* Sync BD data before updating mailbox */ 13524 wmb(); 13525 13526 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); 13527 tr32_mailbox(tnapi->prodmbox); 13528 13529 udelay(10); 13530 13531 /* 350 usec to allow enough time on some 10/100 Mbps devices. */ 13532 for (i = 0; i < 35; i++) { 13533 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13534 coal_now); 13535 13536 udelay(10); 13537 13538 tx_idx = tnapi->hw_status->idx[0].tx_consumer; 13539 rx_idx = rnapi->hw_status->idx[0].rx_producer; 13540 if ((tx_idx == tnapi->tx_prod) && 13541 (rx_idx == (rx_start_idx + num_pkts))) 13542 break; 13543 } 13544 13545 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1); 13546 dev_kfree_skb(skb); 13547 13548 if (tx_idx != tnapi->tx_prod) 13549 goto out; 13550 13551 if (rx_idx != rx_start_idx + num_pkts) 13552 goto out; 13553 13554 val = data_off; 13555 while (rx_idx != rx_start_idx) { 13556 desc = &rnapi->rx_rcb[rx_start_idx++]; 13557 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 13558 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 13559 13560 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 13561 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) 13562 goto out; 13563 13564 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) 13565 - ETH_FCS_LEN; 13566 13567 if (!tso_loopback) { 13568 if (rx_len != tx_len) 13569 goto out; 13570 13571 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { 13572 if (opaque_key != RXD_OPAQUE_RING_STD) 13573 goto out; 13574 } else { 13575 if (opaque_key != RXD_OPAQUE_RING_JUMBO) 13576 goto out; 13577 } 13578 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 13579 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 13580 >> RXD_TCPCSUM_SHIFT != 0xffff) { 13581 goto out; 13582 } 13583 13584 if (opaque_key == RXD_OPAQUE_RING_STD) { 13585 rx_data = tpr->rx_std_buffers[desc_idx].data; 13586 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], 13587 mapping); 13588 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 13589 rx_data = tpr->rx_jmb_buffers[desc_idx].data; 13590 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], 13591 mapping); 13592 } else 13593 goto out; 13594 13595 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len, 13596 DMA_FROM_DEVICE); 13597 13598 rx_data += TG3_RX_OFFSET(tp); 13599 for (i = data_off; i < rx_len; i++, val++) { 13600 if (*(rx_data + i) != (u8) (val & 0xff)) 13601 goto out; 13602 } 13603 } 13604 13605 err = 0; 13606 13607 /* tg3_free_rings will unmap and free the rx_data */ 13608 out: 13609 return err; 13610 } 13611 13612 #define TG3_STD_LOOPBACK_FAILED 1 13613 #define TG3_JMB_LOOPBACK_FAILED 2 13614 #define TG3_TSO_LOOPBACK_FAILED 4 13615 #define TG3_LOOPBACK_FAILED \ 13616 (TG3_STD_LOOPBACK_FAILED | \ 13617 TG3_JMB_LOOPBACK_FAILED | \ 13618 TG3_TSO_LOOPBACK_FAILED) 13619 13620 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk) 13621 { 13622 int err = -EIO; 13623 u32 eee_cap; 13624 u32 jmb_pkt_sz = 9000; 13625 13626 if (tp->dma_limit) 13627 jmb_pkt_sz = tp->dma_limit - ETH_HLEN; 13628 13629 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; 13630 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 13631 13632 if (!netif_running(tp->dev)) { 13633 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13634 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13635 if (do_extlpbk) 13636 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13637 goto done; 13638 } 13639 13640 err = tg3_reset_hw(tp, true); 13641 if (err) { 13642 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13643 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13644 if (do_extlpbk) 13645 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13646 goto done; 13647 } 13648 13649 if (tg3_flag(tp, ENABLE_RSS)) { 13650 int i; 13651 13652 /* Reroute all rx packets to the 1st queue */ 13653 for (i = MAC_RSS_INDIR_TBL_0; 13654 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) 13655 tw32(i, 0x0); 13656 } 13657 13658 /* HW errata - mac loopback fails in some cases on 5780. 13659 * Normal traffic and PHY loopback are not affected by 13660 * errata. Also, the MAC loopback test is deprecated for 13661 * all newer ASIC revisions. 13662 */ 13663 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 13664 !tg3_flag(tp, CPMU_PRESENT)) { 13665 tg3_mac_loopback(tp, true); 13666 13667 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13668 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13669 13670 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13671 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13672 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13673 13674 tg3_mac_loopback(tp, false); 13675 } 13676 13677 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 13678 !tg3_flag(tp, USE_PHYLIB)) { 13679 int i; 13680 13681 tg3_phy_lpbk_set(tp, 0, false); 13682 13683 /* Wait for link */ 13684 for (i = 0; i < 100; i++) { 13685 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 13686 break; 13687 mdelay(1); 13688 } 13689 13690 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13691 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13692 if (tg3_flag(tp, TSO_CAPABLE) && 13693 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13694 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED; 13695 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13696 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13697 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13698 13699 if (do_extlpbk) { 13700 tg3_phy_lpbk_set(tp, 0, true); 13701 13702 /* All link indications report up, but the hardware 13703 * isn't really ready for about 20 msec. Double it 13704 * to be sure. 13705 */ 13706 mdelay(40); 13707 13708 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13709 data[TG3_EXT_LOOPB_TEST] |= 13710 TG3_STD_LOOPBACK_FAILED; 13711 if (tg3_flag(tp, TSO_CAPABLE) && 13712 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13713 data[TG3_EXT_LOOPB_TEST] |= 13714 TG3_TSO_LOOPBACK_FAILED; 13715 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13716 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13717 data[TG3_EXT_LOOPB_TEST] |= 13718 TG3_JMB_LOOPBACK_FAILED; 13719 } 13720 13721 /* Re-enable gphy autopowerdown. */ 13722 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 13723 tg3_phy_toggle_apd(tp, true); 13724 } 13725 13726 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] | 13727 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0; 13728 13729 done: 13730 tp->phy_flags |= eee_cap; 13731 13732 return err; 13733 } 13734 13735 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, 13736 u64 *data) 13737 { 13738 struct tg3 *tp = netdev_priv(dev); 13739 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; 13740 13741 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 13742 if (tg3_power_up(tp)) { 13743 etest->flags |= ETH_TEST_FL_FAILED; 13744 memset(data, 1, sizeof(u64) * TG3_NUM_TEST); 13745 return; 13746 } 13747 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 13748 } 13749 13750 memset(data, 0, sizeof(u64) * TG3_NUM_TEST); 13751 13752 if (tg3_test_nvram(tp) != 0) { 13753 etest->flags |= ETH_TEST_FL_FAILED; 13754 data[TG3_NVRAM_TEST] = 1; 13755 } 13756 if (!doextlpbk && tg3_test_link(tp)) { 13757 etest->flags |= ETH_TEST_FL_FAILED; 13758 data[TG3_LINK_TEST] = 1; 13759 } 13760 if (etest->flags & ETH_TEST_FL_OFFLINE) { 13761 int err, err2 = 0, irq_sync = 0; 13762 13763 if (netif_running(dev)) { 13764 tg3_phy_stop(tp); 13765 tg3_netif_stop(tp); 13766 irq_sync = 1; 13767 } 13768 13769 tg3_full_lock(tp, irq_sync); 13770 tg3_halt(tp, RESET_KIND_SUSPEND, 1); 13771 err = tg3_nvram_lock(tp); 13772 tg3_halt_cpu(tp, RX_CPU_BASE); 13773 if (!tg3_flag(tp, 5705_PLUS)) 13774 tg3_halt_cpu(tp, TX_CPU_BASE); 13775 if (!err) 13776 tg3_nvram_unlock(tp); 13777 13778 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 13779 tg3_phy_reset(tp); 13780 13781 if (tg3_test_registers(tp) != 0) { 13782 etest->flags |= ETH_TEST_FL_FAILED; 13783 data[TG3_REGISTER_TEST] = 1; 13784 } 13785 13786 if (tg3_test_memory(tp) != 0) { 13787 etest->flags |= ETH_TEST_FL_FAILED; 13788 data[TG3_MEMORY_TEST] = 1; 13789 } 13790 13791 if (doextlpbk) 13792 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 13793 13794 if (tg3_test_loopback(tp, data, doextlpbk)) 13795 etest->flags |= ETH_TEST_FL_FAILED; 13796 13797 tg3_full_unlock(tp); 13798 13799 if (tg3_test_interrupt(tp) != 0) { 13800 etest->flags |= ETH_TEST_FL_FAILED; 13801 data[TG3_INTERRUPT_TEST] = 1; 13802 } 13803 13804 tg3_full_lock(tp, 0); 13805 13806 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 13807 if (netif_running(dev)) { 13808 tg3_flag_set(tp, INIT_COMPLETE); 13809 err2 = tg3_restart_hw(tp, true); 13810 if (!err2) 13811 tg3_netif_start(tp); 13812 } 13813 13814 tg3_full_unlock(tp); 13815 13816 if (irq_sync && !err2) 13817 tg3_phy_start(tp); 13818 } 13819 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 13820 tg3_power_down_prepare(tp); 13821 13822 } 13823 13824 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 13825 { 13826 struct tg3 *tp = netdev_priv(dev); 13827 struct hwtstamp_config stmpconf; 13828 13829 if (!tg3_flag(tp, PTP_CAPABLE)) 13830 return -EOPNOTSUPP; 13831 13832 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) 13833 return -EFAULT; 13834 13835 if (stmpconf.tx_type != HWTSTAMP_TX_ON && 13836 stmpconf.tx_type != HWTSTAMP_TX_OFF) 13837 return -ERANGE; 13838 13839 switch (stmpconf.rx_filter) { 13840 case HWTSTAMP_FILTER_NONE: 13841 tp->rxptpctl = 0; 13842 break; 13843 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 13844 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13845 TG3_RX_PTP_CTL_ALL_V1_EVENTS; 13846 break; 13847 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 13848 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13849 TG3_RX_PTP_CTL_SYNC_EVNT; 13850 break; 13851 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 13852 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13853 TG3_RX_PTP_CTL_DELAY_REQ; 13854 break; 13855 case HWTSTAMP_FILTER_PTP_V2_EVENT: 13856 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13857 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13858 break; 13859 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 13860 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13861 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13862 break; 13863 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 13864 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13865 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13866 break; 13867 case HWTSTAMP_FILTER_PTP_V2_SYNC: 13868 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13869 TG3_RX_PTP_CTL_SYNC_EVNT; 13870 break; 13871 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 13872 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13873 TG3_RX_PTP_CTL_SYNC_EVNT; 13874 break; 13875 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 13876 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13877 TG3_RX_PTP_CTL_SYNC_EVNT; 13878 break; 13879 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 13880 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13881 TG3_RX_PTP_CTL_DELAY_REQ; 13882 break; 13883 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 13884 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13885 TG3_RX_PTP_CTL_DELAY_REQ; 13886 break; 13887 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 13888 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13889 TG3_RX_PTP_CTL_DELAY_REQ; 13890 break; 13891 default: 13892 return -ERANGE; 13893 } 13894 13895 if (netif_running(dev) && tp->rxptpctl) 13896 tw32(TG3_RX_PTP_CTL, 13897 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 13898 13899 if (stmpconf.tx_type == HWTSTAMP_TX_ON) 13900 tg3_flag_set(tp, TX_TSTAMP_EN); 13901 else 13902 tg3_flag_clear(tp, TX_TSTAMP_EN); 13903 13904 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13905 -EFAULT : 0; 13906 } 13907 13908 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 13909 { 13910 struct tg3 *tp = netdev_priv(dev); 13911 struct hwtstamp_config stmpconf; 13912 13913 if (!tg3_flag(tp, PTP_CAPABLE)) 13914 return -EOPNOTSUPP; 13915 13916 stmpconf.flags = 0; 13917 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ? 13918 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF); 13919 13920 switch (tp->rxptpctl) { 13921 case 0: 13922 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE; 13923 break; 13924 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS: 13925 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 13926 break; 13927 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13928 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 13929 break; 13930 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13931 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 13932 break; 13933 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13934 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 13935 break; 13936 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13937 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 13938 break; 13939 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13940 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 13941 break; 13942 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13943 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 13944 break; 13945 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13946 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC; 13947 break; 13948 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13949 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 13950 break; 13951 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13952 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 13953 break; 13954 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13955 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ; 13956 break; 13957 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13958 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 13959 break; 13960 default: 13961 WARN_ON_ONCE(1); 13962 return -ERANGE; 13963 } 13964 13965 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13966 -EFAULT : 0; 13967 } 13968 13969 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 13970 { 13971 struct mii_ioctl_data *data = if_mii(ifr); 13972 struct tg3 *tp = netdev_priv(dev); 13973 int err; 13974 13975 if (tg3_flag(tp, USE_PHYLIB)) { 13976 struct phy_device *phydev; 13977 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 13978 return -EAGAIN; 13979 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 13980 return phy_mii_ioctl(phydev, ifr, cmd); 13981 } 13982 13983 switch (cmd) { 13984 case SIOCGMIIPHY: 13985 data->phy_id = tp->phy_addr; 13986 13987 fallthrough; 13988 case SIOCGMIIREG: { 13989 u32 mii_regval; 13990 13991 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 13992 break; /* We have no PHY */ 13993 13994 if (!netif_running(dev)) 13995 return -EAGAIN; 13996 13997 spin_lock_bh(&tp->lock); 13998 err = __tg3_readphy(tp, data->phy_id & 0x1f, 13999 data->reg_num & 0x1f, &mii_regval); 14000 spin_unlock_bh(&tp->lock); 14001 14002 data->val_out = mii_regval; 14003 14004 return err; 14005 } 14006 14007 case SIOCSMIIREG: 14008 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 14009 break; /* We have no PHY */ 14010 14011 if (!netif_running(dev)) 14012 return -EAGAIN; 14013 14014 spin_lock_bh(&tp->lock); 14015 err = __tg3_writephy(tp, data->phy_id & 0x1f, 14016 data->reg_num & 0x1f, data->val_in); 14017 spin_unlock_bh(&tp->lock); 14018 14019 return err; 14020 14021 case SIOCSHWTSTAMP: 14022 return tg3_hwtstamp_set(dev, ifr); 14023 14024 case SIOCGHWTSTAMP: 14025 return tg3_hwtstamp_get(dev, ifr); 14026 14027 default: 14028 /* do nothing */ 14029 break; 14030 } 14031 return -EOPNOTSUPP; 14032 } 14033 14034 static int tg3_get_coalesce(struct net_device *dev, 14035 struct ethtool_coalesce *ec, 14036 struct kernel_ethtool_coalesce *kernel_coal, 14037 struct netlink_ext_ack *extack) 14038 { 14039 struct tg3 *tp = netdev_priv(dev); 14040 14041 memcpy(ec, &tp->coal, sizeof(*ec)); 14042 return 0; 14043 } 14044 14045 static int tg3_set_coalesce(struct net_device *dev, 14046 struct ethtool_coalesce *ec, 14047 struct kernel_ethtool_coalesce *kernel_coal, 14048 struct netlink_ext_ack *extack) 14049 { 14050 struct tg3 *tp = netdev_priv(dev); 14051 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; 14052 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; 14053 14054 if (!tg3_flag(tp, 5705_PLUS)) { 14055 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; 14056 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; 14057 max_stat_coal_ticks = MAX_STAT_COAL_TICKS; 14058 min_stat_coal_ticks = MIN_STAT_COAL_TICKS; 14059 } 14060 14061 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || 14062 (!ec->rx_coalesce_usecs) || 14063 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || 14064 (!ec->tx_coalesce_usecs) || 14065 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || 14066 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || 14067 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || 14068 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || 14069 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || 14070 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || 14071 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || 14072 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) 14073 return -EINVAL; 14074 14075 /* Only copy relevant parameters, ignore all others. */ 14076 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; 14077 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; 14078 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; 14079 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; 14080 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; 14081 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; 14082 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; 14083 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; 14084 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; 14085 14086 if (netif_running(dev)) { 14087 tg3_full_lock(tp, 0); 14088 __tg3_set_coalesce(tp, &tp->coal); 14089 tg3_full_unlock(tp); 14090 } 14091 return 0; 14092 } 14093 14094 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) 14095 { 14096 struct tg3 *tp = netdev_priv(dev); 14097 14098 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14099 netdev_warn(tp->dev, "Board does not support EEE!\n"); 14100 return -EOPNOTSUPP; 14101 } 14102 14103 if (edata->advertised != tp->eee.advertised) { 14104 netdev_warn(tp->dev, 14105 "Direct manipulation of EEE advertisement is not supported\n"); 14106 return -EINVAL; 14107 } 14108 14109 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) { 14110 netdev_warn(tp->dev, 14111 "Maximal Tx Lpi timer supported is %#x(u)\n", 14112 TG3_CPMU_DBTMR1_LNKIDLE_MAX); 14113 return -EINVAL; 14114 } 14115 14116 tp->eee = *edata; 14117 14118 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 14119 tg3_warn_mgmt_link_flap(tp); 14120 14121 if (netif_running(tp->dev)) { 14122 tg3_full_lock(tp, 0); 14123 tg3_setup_eee(tp); 14124 tg3_phy_reset(tp); 14125 tg3_full_unlock(tp); 14126 } 14127 14128 return 0; 14129 } 14130 14131 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata) 14132 { 14133 struct tg3 *tp = netdev_priv(dev); 14134 14135 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14136 netdev_warn(tp->dev, 14137 "Board does not support EEE!\n"); 14138 return -EOPNOTSUPP; 14139 } 14140 14141 *edata = tp->eee; 14142 return 0; 14143 } 14144 14145 static const struct ethtool_ops tg3_ethtool_ops = { 14146 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 14147 ETHTOOL_COALESCE_MAX_FRAMES | 14148 ETHTOOL_COALESCE_USECS_IRQ | 14149 ETHTOOL_COALESCE_MAX_FRAMES_IRQ | 14150 ETHTOOL_COALESCE_STATS_BLOCK_USECS, 14151 .get_drvinfo = tg3_get_drvinfo, 14152 .get_regs_len = tg3_get_regs_len, 14153 .get_regs = tg3_get_regs, 14154 .get_wol = tg3_get_wol, 14155 .set_wol = tg3_set_wol, 14156 .get_msglevel = tg3_get_msglevel, 14157 .set_msglevel = tg3_set_msglevel, 14158 .nway_reset = tg3_nway_reset, 14159 .get_link = ethtool_op_get_link, 14160 .get_eeprom_len = tg3_get_eeprom_len, 14161 .get_eeprom = tg3_get_eeprom, 14162 .set_eeprom = tg3_set_eeprom, 14163 .get_ringparam = tg3_get_ringparam, 14164 .set_ringparam = tg3_set_ringparam, 14165 .get_pauseparam = tg3_get_pauseparam, 14166 .set_pauseparam = tg3_set_pauseparam, 14167 .self_test = tg3_self_test, 14168 .get_strings = tg3_get_strings, 14169 .set_phys_id = tg3_set_phys_id, 14170 .get_ethtool_stats = tg3_get_ethtool_stats, 14171 .get_coalesce = tg3_get_coalesce, 14172 .set_coalesce = tg3_set_coalesce, 14173 .get_sset_count = tg3_get_sset_count, 14174 .get_rxnfc = tg3_get_rxnfc, 14175 .get_rxfh_indir_size = tg3_get_rxfh_indir_size, 14176 .get_rxfh = tg3_get_rxfh, 14177 .set_rxfh = tg3_set_rxfh, 14178 .get_channels = tg3_get_channels, 14179 .set_channels = tg3_set_channels, 14180 .get_ts_info = tg3_get_ts_info, 14181 .get_eee = tg3_get_eee, 14182 .set_eee = tg3_set_eee, 14183 .get_link_ksettings = tg3_get_link_ksettings, 14184 .set_link_ksettings = tg3_set_link_ksettings, 14185 }; 14186 14187 static void tg3_get_stats64(struct net_device *dev, 14188 struct rtnl_link_stats64 *stats) 14189 { 14190 struct tg3 *tp = netdev_priv(dev); 14191 14192 spin_lock_bh(&tp->lock); 14193 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) { 14194 *stats = tp->net_stats_prev; 14195 spin_unlock_bh(&tp->lock); 14196 return; 14197 } 14198 14199 tg3_get_nstats(tp, stats); 14200 spin_unlock_bh(&tp->lock); 14201 } 14202 14203 static void tg3_set_rx_mode(struct net_device *dev) 14204 { 14205 struct tg3 *tp = netdev_priv(dev); 14206 14207 if (!netif_running(dev)) 14208 return; 14209 14210 tg3_full_lock(tp, 0); 14211 __tg3_set_rx_mode(dev); 14212 tg3_full_unlock(tp); 14213 } 14214 14215 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, 14216 int new_mtu) 14217 { 14218 dev->mtu = new_mtu; 14219 14220 if (new_mtu > ETH_DATA_LEN) { 14221 if (tg3_flag(tp, 5780_CLASS)) { 14222 netdev_update_features(dev); 14223 tg3_flag_clear(tp, TSO_CAPABLE); 14224 } else { 14225 tg3_flag_set(tp, JUMBO_RING_ENABLE); 14226 } 14227 } else { 14228 if (tg3_flag(tp, 5780_CLASS)) { 14229 tg3_flag_set(tp, TSO_CAPABLE); 14230 netdev_update_features(dev); 14231 } 14232 tg3_flag_clear(tp, JUMBO_RING_ENABLE); 14233 } 14234 } 14235 14236 static int tg3_change_mtu(struct net_device *dev, int new_mtu) 14237 { 14238 struct tg3 *tp = netdev_priv(dev); 14239 int err; 14240 bool reset_phy = false; 14241 14242 if (!netif_running(dev)) { 14243 /* We'll just catch it later when the 14244 * device is up'd. 14245 */ 14246 tg3_set_mtu(dev, tp, new_mtu); 14247 return 0; 14248 } 14249 14250 tg3_phy_stop(tp); 14251 14252 tg3_netif_stop(tp); 14253 14254 tg3_set_mtu(dev, tp, new_mtu); 14255 14256 tg3_full_lock(tp, 1); 14257 14258 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 14259 14260 /* Reset PHY, otherwise the read DMA engine will be in a mode that 14261 * breaks all requests to 256 bytes. 14262 */ 14263 if (tg3_asic_rev(tp) == ASIC_REV_57766 || 14264 tg3_asic_rev(tp) == ASIC_REV_5717 || 14265 tg3_asic_rev(tp) == ASIC_REV_5719 || 14266 tg3_asic_rev(tp) == ASIC_REV_5720) 14267 reset_phy = true; 14268 14269 err = tg3_restart_hw(tp, reset_phy); 14270 14271 if (!err) 14272 tg3_netif_start(tp); 14273 14274 tg3_full_unlock(tp); 14275 14276 if (!err) 14277 tg3_phy_start(tp); 14278 14279 return err; 14280 } 14281 14282 static const struct net_device_ops tg3_netdev_ops = { 14283 .ndo_open = tg3_open, 14284 .ndo_stop = tg3_close, 14285 .ndo_start_xmit = tg3_start_xmit, 14286 .ndo_get_stats64 = tg3_get_stats64, 14287 .ndo_validate_addr = eth_validate_addr, 14288 .ndo_set_rx_mode = tg3_set_rx_mode, 14289 .ndo_set_mac_address = tg3_set_mac_addr, 14290 .ndo_eth_ioctl = tg3_ioctl, 14291 .ndo_tx_timeout = tg3_tx_timeout, 14292 .ndo_change_mtu = tg3_change_mtu, 14293 .ndo_fix_features = tg3_fix_features, 14294 .ndo_set_features = tg3_set_features, 14295 #ifdef CONFIG_NET_POLL_CONTROLLER 14296 .ndo_poll_controller = tg3_poll_controller, 14297 #endif 14298 }; 14299 14300 static void tg3_get_eeprom_size(struct tg3 *tp) 14301 { 14302 u32 cursize, val, magic; 14303 14304 tp->nvram_size = EEPROM_CHIP_SIZE; 14305 14306 if (tg3_nvram_read(tp, 0, &magic) != 0) 14307 return; 14308 14309 if ((magic != TG3_EEPROM_MAGIC) && 14310 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && 14311 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) 14312 return; 14313 14314 /* 14315 * Size the chip by reading offsets at increasing powers of two. 14316 * When we encounter our validation signature, we know the addressing 14317 * has wrapped around, and thus have our chip size. 14318 */ 14319 cursize = 0x10; 14320 14321 while (cursize < tp->nvram_size) { 14322 if (tg3_nvram_read(tp, cursize, &val) != 0) 14323 return; 14324 14325 if (val == magic) 14326 break; 14327 14328 cursize <<= 1; 14329 } 14330 14331 tp->nvram_size = cursize; 14332 } 14333 14334 static void tg3_get_nvram_size(struct tg3 *tp) 14335 { 14336 u32 val; 14337 14338 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) 14339 return; 14340 14341 /* Selfboot format */ 14342 if (val != TG3_EEPROM_MAGIC) { 14343 tg3_get_eeprom_size(tp); 14344 return; 14345 } 14346 14347 if (tg3_nvram_read(tp, 0xf0, &val) == 0) { 14348 if (val != 0) { 14349 /* This is confusing. We want to operate on the 14350 * 16-bit value at offset 0xf2. The tg3_nvram_read() 14351 * call will read from NVRAM and byteswap the data 14352 * according to the byteswapping settings for all 14353 * other register accesses. This ensures the data we 14354 * want will always reside in the lower 16-bits. 14355 * However, the data in NVRAM is in LE format, which 14356 * means the data from the NVRAM read will always be 14357 * opposite the endianness of the CPU. The 16-bit 14358 * byteswap then brings the data to CPU endianness. 14359 */ 14360 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; 14361 return; 14362 } 14363 } 14364 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14365 } 14366 14367 static void tg3_get_nvram_info(struct tg3 *tp) 14368 { 14369 u32 nvcfg1; 14370 14371 nvcfg1 = tr32(NVRAM_CFG1); 14372 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { 14373 tg3_flag_set(tp, FLASH); 14374 } else { 14375 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14376 tw32(NVRAM_CFG1, nvcfg1); 14377 } 14378 14379 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 14380 tg3_flag(tp, 5780_CLASS)) { 14381 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { 14382 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: 14383 tp->nvram_jedecnum = JEDEC_ATMEL; 14384 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14385 tg3_flag_set(tp, NVRAM_BUFFERED); 14386 break; 14387 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: 14388 tp->nvram_jedecnum = JEDEC_ATMEL; 14389 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; 14390 break; 14391 case FLASH_VENDOR_ATMEL_EEPROM: 14392 tp->nvram_jedecnum = JEDEC_ATMEL; 14393 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14394 tg3_flag_set(tp, NVRAM_BUFFERED); 14395 break; 14396 case FLASH_VENDOR_ST: 14397 tp->nvram_jedecnum = JEDEC_ST; 14398 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; 14399 tg3_flag_set(tp, NVRAM_BUFFERED); 14400 break; 14401 case FLASH_VENDOR_SAIFUN: 14402 tp->nvram_jedecnum = JEDEC_SAIFUN; 14403 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; 14404 break; 14405 case FLASH_VENDOR_SST_SMALL: 14406 case FLASH_VENDOR_SST_LARGE: 14407 tp->nvram_jedecnum = JEDEC_SST; 14408 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; 14409 break; 14410 } 14411 } else { 14412 tp->nvram_jedecnum = JEDEC_ATMEL; 14413 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14414 tg3_flag_set(tp, NVRAM_BUFFERED); 14415 } 14416 } 14417 14418 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) 14419 { 14420 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { 14421 case FLASH_5752PAGE_SIZE_256: 14422 tp->nvram_pagesize = 256; 14423 break; 14424 case FLASH_5752PAGE_SIZE_512: 14425 tp->nvram_pagesize = 512; 14426 break; 14427 case FLASH_5752PAGE_SIZE_1K: 14428 tp->nvram_pagesize = 1024; 14429 break; 14430 case FLASH_5752PAGE_SIZE_2K: 14431 tp->nvram_pagesize = 2048; 14432 break; 14433 case FLASH_5752PAGE_SIZE_4K: 14434 tp->nvram_pagesize = 4096; 14435 break; 14436 case FLASH_5752PAGE_SIZE_264: 14437 tp->nvram_pagesize = 264; 14438 break; 14439 case FLASH_5752PAGE_SIZE_528: 14440 tp->nvram_pagesize = 528; 14441 break; 14442 } 14443 } 14444 14445 static void tg3_get_5752_nvram_info(struct tg3 *tp) 14446 { 14447 u32 nvcfg1; 14448 14449 nvcfg1 = tr32(NVRAM_CFG1); 14450 14451 /* NVRAM protection for TPM */ 14452 if (nvcfg1 & (1 << 27)) 14453 tg3_flag_set(tp, PROTECTED_NVRAM); 14454 14455 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14456 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 14457 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: 14458 tp->nvram_jedecnum = JEDEC_ATMEL; 14459 tg3_flag_set(tp, NVRAM_BUFFERED); 14460 break; 14461 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14462 tp->nvram_jedecnum = JEDEC_ATMEL; 14463 tg3_flag_set(tp, NVRAM_BUFFERED); 14464 tg3_flag_set(tp, FLASH); 14465 break; 14466 case FLASH_5752VENDOR_ST_M45PE10: 14467 case FLASH_5752VENDOR_ST_M45PE20: 14468 case FLASH_5752VENDOR_ST_M45PE40: 14469 tp->nvram_jedecnum = JEDEC_ST; 14470 tg3_flag_set(tp, NVRAM_BUFFERED); 14471 tg3_flag_set(tp, FLASH); 14472 break; 14473 } 14474 14475 if (tg3_flag(tp, FLASH)) { 14476 tg3_nvram_get_pagesize(tp, nvcfg1); 14477 } else { 14478 /* For eeprom, set pagesize to maximum eeprom size */ 14479 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14480 14481 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14482 tw32(NVRAM_CFG1, nvcfg1); 14483 } 14484 } 14485 14486 static void tg3_get_5755_nvram_info(struct tg3 *tp) 14487 { 14488 u32 nvcfg1, protect = 0; 14489 14490 nvcfg1 = tr32(NVRAM_CFG1); 14491 14492 /* NVRAM protection for TPM */ 14493 if (nvcfg1 & (1 << 27)) { 14494 tg3_flag_set(tp, PROTECTED_NVRAM); 14495 protect = 1; 14496 } 14497 14498 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14499 switch (nvcfg1) { 14500 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14501 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14502 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14503 case FLASH_5755VENDOR_ATMEL_FLASH_5: 14504 tp->nvram_jedecnum = JEDEC_ATMEL; 14505 tg3_flag_set(tp, NVRAM_BUFFERED); 14506 tg3_flag_set(tp, FLASH); 14507 tp->nvram_pagesize = 264; 14508 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || 14509 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) 14510 tp->nvram_size = (protect ? 0x3e200 : 14511 TG3_NVRAM_SIZE_512KB); 14512 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) 14513 tp->nvram_size = (protect ? 0x1f200 : 14514 TG3_NVRAM_SIZE_256KB); 14515 else 14516 tp->nvram_size = (protect ? 0x1f200 : 14517 TG3_NVRAM_SIZE_128KB); 14518 break; 14519 case FLASH_5752VENDOR_ST_M45PE10: 14520 case FLASH_5752VENDOR_ST_M45PE20: 14521 case FLASH_5752VENDOR_ST_M45PE40: 14522 tp->nvram_jedecnum = JEDEC_ST; 14523 tg3_flag_set(tp, NVRAM_BUFFERED); 14524 tg3_flag_set(tp, FLASH); 14525 tp->nvram_pagesize = 256; 14526 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) 14527 tp->nvram_size = (protect ? 14528 TG3_NVRAM_SIZE_64KB : 14529 TG3_NVRAM_SIZE_128KB); 14530 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) 14531 tp->nvram_size = (protect ? 14532 TG3_NVRAM_SIZE_64KB : 14533 TG3_NVRAM_SIZE_256KB); 14534 else 14535 tp->nvram_size = (protect ? 14536 TG3_NVRAM_SIZE_128KB : 14537 TG3_NVRAM_SIZE_512KB); 14538 break; 14539 } 14540 } 14541 14542 static void tg3_get_5787_nvram_info(struct tg3 *tp) 14543 { 14544 u32 nvcfg1; 14545 14546 nvcfg1 = tr32(NVRAM_CFG1); 14547 14548 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14549 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: 14550 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14551 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: 14552 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14553 tp->nvram_jedecnum = JEDEC_ATMEL; 14554 tg3_flag_set(tp, NVRAM_BUFFERED); 14555 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14556 14557 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14558 tw32(NVRAM_CFG1, nvcfg1); 14559 break; 14560 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14561 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14562 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14563 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14564 tp->nvram_jedecnum = JEDEC_ATMEL; 14565 tg3_flag_set(tp, NVRAM_BUFFERED); 14566 tg3_flag_set(tp, FLASH); 14567 tp->nvram_pagesize = 264; 14568 break; 14569 case FLASH_5752VENDOR_ST_M45PE10: 14570 case FLASH_5752VENDOR_ST_M45PE20: 14571 case FLASH_5752VENDOR_ST_M45PE40: 14572 tp->nvram_jedecnum = JEDEC_ST; 14573 tg3_flag_set(tp, NVRAM_BUFFERED); 14574 tg3_flag_set(tp, FLASH); 14575 tp->nvram_pagesize = 256; 14576 break; 14577 } 14578 } 14579 14580 static void tg3_get_5761_nvram_info(struct tg3 *tp) 14581 { 14582 u32 nvcfg1, protect = 0; 14583 14584 nvcfg1 = tr32(NVRAM_CFG1); 14585 14586 /* NVRAM protection for TPM */ 14587 if (nvcfg1 & (1 << 27)) { 14588 tg3_flag_set(tp, PROTECTED_NVRAM); 14589 protect = 1; 14590 } 14591 14592 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14593 switch (nvcfg1) { 14594 case FLASH_5761VENDOR_ATMEL_ADB021D: 14595 case FLASH_5761VENDOR_ATMEL_ADB041D: 14596 case FLASH_5761VENDOR_ATMEL_ADB081D: 14597 case FLASH_5761VENDOR_ATMEL_ADB161D: 14598 case FLASH_5761VENDOR_ATMEL_MDB021D: 14599 case FLASH_5761VENDOR_ATMEL_MDB041D: 14600 case FLASH_5761VENDOR_ATMEL_MDB081D: 14601 case FLASH_5761VENDOR_ATMEL_MDB161D: 14602 tp->nvram_jedecnum = JEDEC_ATMEL; 14603 tg3_flag_set(tp, NVRAM_BUFFERED); 14604 tg3_flag_set(tp, FLASH); 14605 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14606 tp->nvram_pagesize = 256; 14607 break; 14608 case FLASH_5761VENDOR_ST_A_M45PE20: 14609 case FLASH_5761VENDOR_ST_A_M45PE40: 14610 case FLASH_5761VENDOR_ST_A_M45PE80: 14611 case FLASH_5761VENDOR_ST_A_M45PE16: 14612 case FLASH_5761VENDOR_ST_M_M45PE20: 14613 case FLASH_5761VENDOR_ST_M_M45PE40: 14614 case FLASH_5761VENDOR_ST_M_M45PE80: 14615 case FLASH_5761VENDOR_ST_M_M45PE16: 14616 tp->nvram_jedecnum = JEDEC_ST; 14617 tg3_flag_set(tp, NVRAM_BUFFERED); 14618 tg3_flag_set(tp, FLASH); 14619 tp->nvram_pagesize = 256; 14620 break; 14621 } 14622 14623 if (protect) { 14624 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); 14625 } else { 14626 switch (nvcfg1) { 14627 case FLASH_5761VENDOR_ATMEL_ADB161D: 14628 case FLASH_5761VENDOR_ATMEL_MDB161D: 14629 case FLASH_5761VENDOR_ST_A_M45PE16: 14630 case FLASH_5761VENDOR_ST_M_M45PE16: 14631 tp->nvram_size = TG3_NVRAM_SIZE_2MB; 14632 break; 14633 case FLASH_5761VENDOR_ATMEL_ADB081D: 14634 case FLASH_5761VENDOR_ATMEL_MDB081D: 14635 case FLASH_5761VENDOR_ST_A_M45PE80: 14636 case FLASH_5761VENDOR_ST_M_M45PE80: 14637 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14638 break; 14639 case FLASH_5761VENDOR_ATMEL_ADB041D: 14640 case FLASH_5761VENDOR_ATMEL_MDB041D: 14641 case FLASH_5761VENDOR_ST_A_M45PE40: 14642 case FLASH_5761VENDOR_ST_M_M45PE40: 14643 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14644 break; 14645 case FLASH_5761VENDOR_ATMEL_ADB021D: 14646 case FLASH_5761VENDOR_ATMEL_MDB021D: 14647 case FLASH_5761VENDOR_ST_A_M45PE20: 14648 case FLASH_5761VENDOR_ST_M_M45PE20: 14649 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14650 break; 14651 } 14652 } 14653 } 14654 14655 static void tg3_get_5906_nvram_info(struct tg3 *tp) 14656 { 14657 tp->nvram_jedecnum = JEDEC_ATMEL; 14658 tg3_flag_set(tp, NVRAM_BUFFERED); 14659 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14660 } 14661 14662 static void tg3_get_57780_nvram_info(struct tg3 *tp) 14663 { 14664 u32 nvcfg1; 14665 14666 nvcfg1 = tr32(NVRAM_CFG1); 14667 14668 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14669 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14670 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14671 tp->nvram_jedecnum = JEDEC_ATMEL; 14672 tg3_flag_set(tp, NVRAM_BUFFERED); 14673 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14674 14675 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14676 tw32(NVRAM_CFG1, nvcfg1); 14677 return; 14678 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14679 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14680 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14681 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14682 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14683 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14684 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14685 tp->nvram_jedecnum = JEDEC_ATMEL; 14686 tg3_flag_set(tp, NVRAM_BUFFERED); 14687 tg3_flag_set(tp, FLASH); 14688 14689 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14690 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14691 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14692 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14693 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14694 break; 14695 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14696 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14697 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14698 break; 14699 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14700 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14701 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14702 break; 14703 } 14704 break; 14705 case FLASH_5752VENDOR_ST_M45PE10: 14706 case FLASH_5752VENDOR_ST_M45PE20: 14707 case FLASH_5752VENDOR_ST_M45PE40: 14708 tp->nvram_jedecnum = JEDEC_ST; 14709 tg3_flag_set(tp, NVRAM_BUFFERED); 14710 tg3_flag_set(tp, FLASH); 14711 14712 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14713 case FLASH_5752VENDOR_ST_M45PE10: 14714 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14715 break; 14716 case FLASH_5752VENDOR_ST_M45PE20: 14717 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14718 break; 14719 case FLASH_5752VENDOR_ST_M45PE40: 14720 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14721 break; 14722 } 14723 break; 14724 default: 14725 tg3_flag_set(tp, NO_NVRAM); 14726 return; 14727 } 14728 14729 tg3_nvram_get_pagesize(tp, nvcfg1); 14730 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14731 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14732 } 14733 14734 14735 static void tg3_get_5717_nvram_info(struct tg3 *tp) 14736 { 14737 u32 nvcfg1; 14738 14739 nvcfg1 = tr32(NVRAM_CFG1); 14740 14741 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14742 case FLASH_5717VENDOR_ATMEL_EEPROM: 14743 case FLASH_5717VENDOR_MICRO_EEPROM: 14744 tp->nvram_jedecnum = JEDEC_ATMEL; 14745 tg3_flag_set(tp, NVRAM_BUFFERED); 14746 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14747 14748 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14749 tw32(NVRAM_CFG1, nvcfg1); 14750 return; 14751 case FLASH_5717VENDOR_ATMEL_MDB011D: 14752 case FLASH_5717VENDOR_ATMEL_ADB011B: 14753 case FLASH_5717VENDOR_ATMEL_ADB011D: 14754 case FLASH_5717VENDOR_ATMEL_MDB021D: 14755 case FLASH_5717VENDOR_ATMEL_ADB021B: 14756 case FLASH_5717VENDOR_ATMEL_ADB021D: 14757 case FLASH_5717VENDOR_ATMEL_45USPT: 14758 tp->nvram_jedecnum = JEDEC_ATMEL; 14759 tg3_flag_set(tp, NVRAM_BUFFERED); 14760 tg3_flag_set(tp, FLASH); 14761 14762 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14763 case FLASH_5717VENDOR_ATMEL_MDB021D: 14764 /* Detect size with tg3_nvram_get_size() */ 14765 break; 14766 case FLASH_5717VENDOR_ATMEL_ADB021B: 14767 case FLASH_5717VENDOR_ATMEL_ADB021D: 14768 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14769 break; 14770 default: 14771 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14772 break; 14773 } 14774 break; 14775 case FLASH_5717VENDOR_ST_M_M25PE10: 14776 case FLASH_5717VENDOR_ST_A_M25PE10: 14777 case FLASH_5717VENDOR_ST_M_M45PE10: 14778 case FLASH_5717VENDOR_ST_A_M45PE10: 14779 case FLASH_5717VENDOR_ST_M_M25PE20: 14780 case FLASH_5717VENDOR_ST_A_M25PE20: 14781 case FLASH_5717VENDOR_ST_M_M45PE20: 14782 case FLASH_5717VENDOR_ST_A_M45PE20: 14783 case FLASH_5717VENDOR_ST_25USPT: 14784 case FLASH_5717VENDOR_ST_45USPT: 14785 tp->nvram_jedecnum = JEDEC_ST; 14786 tg3_flag_set(tp, NVRAM_BUFFERED); 14787 tg3_flag_set(tp, FLASH); 14788 14789 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14790 case FLASH_5717VENDOR_ST_M_M25PE20: 14791 case FLASH_5717VENDOR_ST_M_M45PE20: 14792 /* Detect size with tg3_nvram_get_size() */ 14793 break; 14794 case FLASH_5717VENDOR_ST_A_M25PE20: 14795 case FLASH_5717VENDOR_ST_A_M45PE20: 14796 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14797 break; 14798 default: 14799 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14800 break; 14801 } 14802 break; 14803 default: 14804 tg3_flag_set(tp, NO_NVRAM); 14805 return; 14806 } 14807 14808 tg3_nvram_get_pagesize(tp, nvcfg1); 14809 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14810 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14811 } 14812 14813 static void tg3_get_5720_nvram_info(struct tg3 *tp) 14814 { 14815 u32 nvcfg1, nvmpinstrp, nv_status; 14816 14817 nvcfg1 = tr32(NVRAM_CFG1); 14818 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; 14819 14820 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14821 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) { 14822 tg3_flag_set(tp, NO_NVRAM); 14823 return; 14824 } 14825 14826 switch (nvmpinstrp) { 14827 case FLASH_5762_MX25L_100: 14828 case FLASH_5762_MX25L_200: 14829 case FLASH_5762_MX25L_400: 14830 case FLASH_5762_MX25L_800: 14831 case FLASH_5762_MX25L_160_320: 14832 tp->nvram_pagesize = 4096; 14833 tp->nvram_jedecnum = JEDEC_MACRONIX; 14834 tg3_flag_set(tp, NVRAM_BUFFERED); 14835 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14836 tg3_flag_set(tp, FLASH); 14837 nv_status = tr32(NVRAM_AUTOSENSE_STATUS); 14838 tp->nvram_size = 14839 (1 << (nv_status >> AUTOSENSE_DEVID & 14840 AUTOSENSE_DEVID_MASK) 14841 << AUTOSENSE_SIZE_IN_MB); 14842 return; 14843 14844 case FLASH_5762_EEPROM_HD: 14845 nvmpinstrp = FLASH_5720_EEPROM_HD; 14846 break; 14847 case FLASH_5762_EEPROM_LD: 14848 nvmpinstrp = FLASH_5720_EEPROM_LD; 14849 break; 14850 case FLASH_5720VENDOR_M_ST_M45PE20: 14851 /* This pinstrap supports multiple sizes, so force it 14852 * to read the actual size from location 0xf0. 14853 */ 14854 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT; 14855 break; 14856 } 14857 } 14858 14859 switch (nvmpinstrp) { 14860 case FLASH_5720_EEPROM_HD: 14861 case FLASH_5720_EEPROM_LD: 14862 tp->nvram_jedecnum = JEDEC_ATMEL; 14863 tg3_flag_set(tp, NVRAM_BUFFERED); 14864 14865 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14866 tw32(NVRAM_CFG1, nvcfg1); 14867 if (nvmpinstrp == FLASH_5720_EEPROM_HD) 14868 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14869 else 14870 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; 14871 return; 14872 case FLASH_5720VENDOR_M_ATMEL_DB011D: 14873 case FLASH_5720VENDOR_A_ATMEL_DB011B: 14874 case FLASH_5720VENDOR_A_ATMEL_DB011D: 14875 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14876 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14877 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14878 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14879 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14880 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14881 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14882 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14883 case FLASH_5720VENDOR_ATMEL_45USPT: 14884 tp->nvram_jedecnum = JEDEC_ATMEL; 14885 tg3_flag_set(tp, NVRAM_BUFFERED); 14886 tg3_flag_set(tp, FLASH); 14887 14888 switch (nvmpinstrp) { 14889 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14890 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14891 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14892 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14893 break; 14894 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14895 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14896 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14897 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14898 break; 14899 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14900 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14901 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14902 break; 14903 default: 14904 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14905 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14906 break; 14907 } 14908 break; 14909 case FLASH_5720VENDOR_M_ST_M25PE10: 14910 case FLASH_5720VENDOR_M_ST_M45PE10: 14911 case FLASH_5720VENDOR_A_ST_M25PE10: 14912 case FLASH_5720VENDOR_A_ST_M45PE10: 14913 case FLASH_5720VENDOR_M_ST_M25PE20: 14914 case FLASH_5720VENDOR_M_ST_M45PE20: 14915 case FLASH_5720VENDOR_A_ST_M25PE20: 14916 case FLASH_5720VENDOR_A_ST_M45PE20: 14917 case FLASH_5720VENDOR_M_ST_M25PE40: 14918 case FLASH_5720VENDOR_M_ST_M45PE40: 14919 case FLASH_5720VENDOR_A_ST_M25PE40: 14920 case FLASH_5720VENDOR_A_ST_M45PE40: 14921 case FLASH_5720VENDOR_M_ST_M25PE80: 14922 case FLASH_5720VENDOR_M_ST_M45PE80: 14923 case FLASH_5720VENDOR_A_ST_M25PE80: 14924 case FLASH_5720VENDOR_A_ST_M45PE80: 14925 case FLASH_5720VENDOR_ST_25USPT: 14926 case FLASH_5720VENDOR_ST_45USPT: 14927 tp->nvram_jedecnum = JEDEC_ST; 14928 tg3_flag_set(tp, NVRAM_BUFFERED); 14929 tg3_flag_set(tp, FLASH); 14930 14931 switch (nvmpinstrp) { 14932 case FLASH_5720VENDOR_M_ST_M25PE20: 14933 case FLASH_5720VENDOR_M_ST_M45PE20: 14934 case FLASH_5720VENDOR_A_ST_M25PE20: 14935 case FLASH_5720VENDOR_A_ST_M45PE20: 14936 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14937 break; 14938 case FLASH_5720VENDOR_M_ST_M25PE40: 14939 case FLASH_5720VENDOR_M_ST_M45PE40: 14940 case FLASH_5720VENDOR_A_ST_M25PE40: 14941 case FLASH_5720VENDOR_A_ST_M45PE40: 14942 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14943 break; 14944 case FLASH_5720VENDOR_M_ST_M25PE80: 14945 case FLASH_5720VENDOR_M_ST_M45PE80: 14946 case FLASH_5720VENDOR_A_ST_M25PE80: 14947 case FLASH_5720VENDOR_A_ST_M45PE80: 14948 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14949 break; 14950 default: 14951 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14952 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14953 break; 14954 } 14955 break; 14956 default: 14957 tg3_flag_set(tp, NO_NVRAM); 14958 return; 14959 } 14960 14961 tg3_nvram_get_pagesize(tp, nvcfg1); 14962 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14963 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14964 14965 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14966 u32 val; 14967 14968 if (tg3_nvram_read(tp, 0, &val)) 14969 return; 14970 14971 if (val != TG3_EEPROM_MAGIC && 14972 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) 14973 tg3_flag_set(tp, NO_NVRAM); 14974 } 14975 } 14976 14977 /* Chips other than 5700/5701 use the NVRAM for fetching info. */ 14978 static void tg3_nvram_init(struct tg3 *tp) 14979 { 14980 if (tg3_flag(tp, IS_SSB_CORE)) { 14981 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */ 14982 tg3_flag_clear(tp, NVRAM); 14983 tg3_flag_clear(tp, NVRAM_BUFFERED); 14984 tg3_flag_set(tp, NO_NVRAM); 14985 return; 14986 } 14987 14988 tw32_f(GRC_EEPROM_ADDR, 14989 (EEPROM_ADDR_FSM_RESET | 14990 (EEPROM_DEFAULT_CLOCK_PERIOD << 14991 EEPROM_ADDR_CLKPERD_SHIFT))); 14992 14993 msleep(1); 14994 14995 /* Enable seeprom accesses. */ 14996 tw32_f(GRC_LOCAL_CTRL, 14997 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); 14998 udelay(100); 14999 15000 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 15001 tg3_asic_rev(tp) != ASIC_REV_5701) { 15002 tg3_flag_set(tp, NVRAM); 15003 15004 if (tg3_nvram_lock(tp)) { 15005 netdev_warn(tp->dev, 15006 "Cannot get nvram lock, %s failed\n", 15007 __func__); 15008 return; 15009 } 15010 tg3_enable_nvram_access(tp); 15011 15012 tp->nvram_size = 0; 15013 15014 if (tg3_asic_rev(tp) == ASIC_REV_5752) 15015 tg3_get_5752_nvram_info(tp); 15016 else if (tg3_asic_rev(tp) == ASIC_REV_5755) 15017 tg3_get_5755_nvram_info(tp); 15018 else if (tg3_asic_rev(tp) == ASIC_REV_5787 || 15019 tg3_asic_rev(tp) == ASIC_REV_5784 || 15020 tg3_asic_rev(tp) == ASIC_REV_5785) 15021 tg3_get_5787_nvram_info(tp); 15022 else if (tg3_asic_rev(tp) == ASIC_REV_5761) 15023 tg3_get_5761_nvram_info(tp); 15024 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 15025 tg3_get_5906_nvram_info(tp); 15026 else if (tg3_asic_rev(tp) == ASIC_REV_57780 || 15027 tg3_flag(tp, 57765_CLASS)) 15028 tg3_get_57780_nvram_info(tp); 15029 else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 15030 tg3_asic_rev(tp) == ASIC_REV_5719) 15031 tg3_get_5717_nvram_info(tp); 15032 else if (tg3_asic_rev(tp) == ASIC_REV_5720 || 15033 tg3_asic_rev(tp) == ASIC_REV_5762) 15034 tg3_get_5720_nvram_info(tp); 15035 else 15036 tg3_get_nvram_info(tp); 15037 15038 if (tp->nvram_size == 0) 15039 tg3_get_nvram_size(tp); 15040 15041 tg3_disable_nvram_access(tp); 15042 tg3_nvram_unlock(tp); 15043 15044 } else { 15045 tg3_flag_clear(tp, NVRAM); 15046 tg3_flag_clear(tp, NVRAM_BUFFERED); 15047 15048 tg3_get_eeprom_size(tp); 15049 } 15050 } 15051 15052 struct subsys_tbl_ent { 15053 u16 subsys_vendor, subsys_devid; 15054 u32 phy_id; 15055 }; 15056 15057 static struct subsys_tbl_ent subsys_id_to_phy_id[] = { 15058 /* Broadcom boards. */ 15059 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15060 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, 15061 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15062 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, 15063 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15064 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, 15065 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15066 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, 15067 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15068 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, 15069 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15070 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, 15071 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15072 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, 15073 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15074 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, 15075 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15076 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, 15077 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15078 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, 15079 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15080 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, 15081 15082 /* 3com boards. */ 15083 { TG3PCI_SUBVENDOR_ID_3COM, 15084 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, 15085 { TG3PCI_SUBVENDOR_ID_3COM, 15086 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, 15087 { TG3PCI_SUBVENDOR_ID_3COM, 15088 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, 15089 { TG3PCI_SUBVENDOR_ID_3COM, 15090 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, 15091 { TG3PCI_SUBVENDOR_ID_3COM, 15092 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, 15093 15094 /* DELL boards. */ 15095 { TG3PCI_SUBVENDOR_ID_DELL, 15096 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, 15097 { TG3PCI_SUBVENDOR_ID_DELL, 15098 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, 15099 { TG3PCI_SUBVENDOR_ID_DELL, 15100 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, 15101 { TG3PCI_SUBVENDOR_ID_DELL, 15102 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, 15103 15104 /* Compaq boards. */ 15105 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15106 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, 15107 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15108 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, 15109 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15110 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, 15111 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15112 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, 15113 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15114 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, 15115 15116 /* IBM boards. */ 15117 { TG3PCI_SUBVENDOR_ID_IBM, 15118 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } 15119 }; 15120 15121 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp) 15122 { 15123 int i; 15124 15125 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { 15126 if ((subsys_id_to_phy_id[i].subsys_vendor == 15127 tp->pdev->subsystem_vendor) && 15128 (subsys_id_to_phy_id[i].subsys_devid == 15129 tp->pdev->subsystem_device)) 15130 return &subsys_id_to_phy_id[i]; 15131 } 15132 return NULL; 15133 } 15134 15135 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp) 15136 { 15137 u32 val; 15138 15139 tp->phy_id = TG3_PHY_ID_INVALID; 15140 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15141 15142 /* Assume an onboard device and WOL capable by default. */ 15143 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15144 tg3_flag_set(tp, WOL_CAP); 15145 15146 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15147 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { 15148 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15149 tg3_flag_set(tp, IS_NIC); 15150 } 15151 val = tr32(VCPU_CFGSHDW); 15152 if (val & VCPU_CFGSHDW_ASPM_DBNC) 15153 tg3_flag_set(tp, ASPM_WORKAROUND); 15154 if ((val & VCPU_CFGSHDW_WOL_ENABLE) && 15155 (val & VCPU_CFGSHDW_WOL_MAGPKT)) { 15156 tg3_flag_set(tp, WOL_ENABLE); 15157 device_set_wakeup_enable(&tp->pdev->dev, true); 15158 } 15159 goto done; 15160 } 15161 15162 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 15163 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 15164 u32 nic_cfg, led_cfg; 15165 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0; 15166 u32 nic_phy_id, ver, eeprom_phy_id; 15167 int eeprom_phy_serdes = 0; 15168 15169 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 15170 tp->nic_sram_data_cfg = nic_cfg; 15171 15172 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); 15173 ver >>= NIC_SRAM_DATA_VER_SHIFT; 15174 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 15175 tg3_asic_rev(tp) != ASIC_REV_5701 && 15176 tg3_asic_rev(tp) != ASIC_REV_5703 && 15177 (ver > 0) && (ver < 0x100)) 15178 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); 15179 15180 if (tg3_asic_rev(tp) == ASIC_REV_5785) 15181 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); 15182 15183 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 15184 tg3_asic_rev(tp) == ASIC_REV_5719 || 15185 tg3_asic_rev(tp) == ASIC_REV_5720) 15186 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5); 15187 15188 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == 15189 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) 15190 eeprom_phy_serdes = 1; 15191 15192 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); 15193 if (nic_phy_id != 0) { 15194 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; 15195 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; 15196 15197 eeprom_phy_id = (id1 >> 16) << 10; 15198 eeprom_phy_id |= (id2 & 0xfc00) << 16; 15199 eeprom_phy_id |= (id2 & 0x03ff) << 0; 15200 } else 15201 eeprom_phy_id = 0; 15202 15203 tp->phy_id = eeprom_phy_id; 15204 if (eeprom_phy_serdes) { 15205 if (!tg3_flag(tp, 5705_PLUS)) 15206 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15207 else 15208 tp->phy_flags |= TG3_PHYFLG_MII_SERDES; 15209 } 15210 15211 if (tg3_flag(tp, 5750_PLUS)) 15212 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | 15213 SHASTA_EXT_LED_MODE_MASK); 15214 else 15215 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; 15216 15217 switch (led_cfg) { 15218 default: 15219 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: 15220 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15221 break; 15222 15223 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: 15224 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15225 break; 15226 15227 case NIC_SRAM_DATA_CFG_LED_MODE_MAC: 15228 tp->led_ctrl = LED_CTRL_MODE_MAC; 15229 15230 /* Default to PHY_1_MODE if 0 (MAC_MODE) is 15231 * read on some older 5700/5701 bootcode. 15232 */ 15233 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 15234 tg3_asic_rev(tp) == ASIC_REV_5701) 15235 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15236 15237 break; 15238 15239 case SHASTA_EXT_LED_SHARED: 15240 tp->led_ctrl = LED_CTRL_MODE_SHARED; 15241 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 15242 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1) 15243 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15244 LED_CTRL_MODE_PHY_2); 15245 15246 if (tg3_flag(tp, 5717_PLUS) || 15247 tg3_asic_rev(tp) == ASIC_REV_5762) 15248 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE | 15249 LED_CTRL_BLINK_RATE_MASK; 15250 15251 break; 15252 15253 case SHASTA_EXT_LED_MAC: 15254 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; 15255 break; 15256 15257 case SHASTA_EXT_LED_COMBO: 15258 tp->led_ctrl = LED_CTRL_MODE_COMBO; 15259 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) 15260 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15261 LED_CTRL_MODE_PHY_2); 15262 break; 15263 15264 } 15265 15266 if ((tg3_asic_rev(tp) == ASIC_REV_5700 || 15267 tg3_asic_rev(tp) == ASIC_REV_5701) && 15268 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) 15269 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15270 15271 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) 15272 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15273 15274 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { 15275 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15276 if ((tp->pdev->subsystem_vendor == 15277 PCI_VENDOR_ID_ARIMA) && 15278 (tp->pdev->subsystem_device == 0x205a || 15279 tp->pdev->subsystem_device == 0x2063)) 15280 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15281 } else { 15282 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15283 tg3_flag_set(tp, IS_NIC); 15284 } 15285 15286 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 15287 tg3_flag_set(tp, ENABLE_ASF); 15288 if (tg3_flag(tp, 5750_PLUS)) 15289 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 15290 } 15291 15292 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && 15293 tg3_flag(tp, 5750_PLUS)) 15294 tg3_flag_set(tp, ENABLE_APE); 15295 15296 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && 15297 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) 15298 tg3_flag_clear(tp, WOL_CAP); 15299 15300 if (tg3_flag(tp, WOL_CAP) && 15301 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { 15302 tg3_flag_set(tp, WOL_ENABLE); 15303 device_set_wakeup_enable(&tp->pdev->dev, true); 15304 } 15305 15306 if (cfg2 & (1 << 17)) 15307 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; 15308 15309 /* serdes signal pre-emphasis in register 0x590 set by */ 15310 /* bootcode if bit 18 is set */ 15311 if (cfg2 & (1 << 18)) 15312 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; 15313 15314 if ((tg3_flag(tp, 57765_PLUS) || 15315 (tg3_asic_rev(tp) == ASIC_REV_5784 && 15316 tg3_chip_rev(tp) != CHIPREV_5784_AX)) && 15317 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 15318 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; 15319 15320 if (tg3_flag(tp, PCI_EXPRESS)) { 15321 u32 cfg3; 15322 15323 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); 15324 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 15325 !tg3_flag(tp, 57765_PLUS) && 15326 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)) 15327 tg3_flag_set(tp, ASPM_WORKAROUND); 15328 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID) 15329 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 15330 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK) 15331 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 15332 } 15333 15334 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) 15335 tg3_flag_set(tp, RGMII_INBAND_DISABLE); 15336 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) 15337 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); 15338 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) 15339 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); 15340 15341 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV) 15342 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV; 15343 } 15344 done: 15345 if (tg3_flag(tp, WOL_CAP)) 15346 device_set_wakeup_enable(&tp->pdev->dev, 15347 tg3_flag(tp, WOL_ENABLE)); 15348 else 15349 device_set_wakeup_capable(&tp->pdev->dev, false); 15350 } 15351 15352 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val) 15353 { 15354 int i, err; 15355 u32 val2, off = offset * 8; 15356 15357 err = tg3_nvram_lock(tp); 15358 if (err) 15359 return err; 15360 15361 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE); 15362 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN | 15363 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START); 15364 tg3_ape_read32(tp, TG3_APE_OTP_CTRL); 15365 udelay(10); 15366 15367 for (i = 0; i < 100; i++) { 15368 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS); 15369 if (val2 & APE_OTP_STATUS_CMD_DONE) { 15370 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA); 15371 break; 15372 } 15373 udelay(10); 15374 } 15375 15376 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0); 15377 15378 tg3_nvram_unlock(tp); 15379 if (val2 & APE_OTP_STATUS_CMD_DONE) 15380 return 0; 15381 15382 return -EBUSY; 15383 } 15384 15385 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd) 15386 { 15387 int i; 15388 u32 val; 15389 15390 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); 15391 tw32(OTP_CTRL, cmd); 15392 15393 /* Wait for up to 1 ms for command to execute. */ 15394 for (i = 0; i < 100; i++) { 15395 val = tr32(OTP_STATUS); 15396 if (val & OTP_STATUS_CMD_DONE) 15397 break; 15398 udelay(10); 15399 } 15400 15401 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; 15402 } 15403 15404 /* Read the gphy configuration from the OTP region of the chip. The gphy 15405 * configuration is a 32-bit value that straddles the alignment boundary. 15406 * We do two 32-bit reads and then shift and merge the results. 15407 */ 15408 static u32 tg3_read_otp_phycfg(struct tg3 *tp) 15409 { 15410 u32 bhalf_otp, thalf_otp; 15411 15412 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); 15413 15414 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) 15415 return 0; 15416 15417 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); 15418 15419 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15420 return 0; 15421 15422 thalf_otp = tr32(OTP_READ_DATA); 15423 15424 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); 15425 15426 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15427 return 0; 15428 15429 bhalf_otp = tr32(OTP_READ_DATA); 15430 15431 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); 15432 } 15433 15434 static void tg3_phy_init_link_config(struct tg3 *tp) 15435 { 15436 u32 adv = ADVERTISED_Autoneg; 15437 15438 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 15439 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV)) 15440 adv |= ADVERTISED_1000baseT_Half; 15441 adv |= ADVERTISED_1000baseT_Full; 15442 } 15443 15444 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 15445 adv |= ADVERTISED_100baseT_Half | 15446 ADVERTISED_100baseT_Full | 15447 ADVERTISED_10baseT_Half | 15448 ADVERTISED_10baseT_Full | 15449 ADVERTISED_TP; 15450 else 15451 adv |= ADVERTISED_FIBRE; 15452 15453 tp->link_config.advertising = adv; 15454 tp->link_config.speed = SPEED_UNKNOWN; 15455 tp->link_config.duplex = DUPLEX_UNKNOWN; 15456 tp->link_config.autoneg = AUTONEG_ENABLE; 15457 tp->link_config.active_speed = SPEED_UNKNOWN; 15458 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 15459 15460 tp->old_link = -1; 15461 } 15462 15463 static int tg3_phy_probe(struct tg3 *tp) 15464 { 15465 u32 hw_phy_id_1, hw_phy_id_2; 15466 u32 hw_phy_id, hw_phy_id_masked; 15467 int err; 15468 15469 /* flow control autonegotiation is default behavior */ 15470 tg3_flag_set(tp, PAUSE_AUTONEG); 15471 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; 15472 15473 if (tg3_flag(tp, ENABLE_APE)) { 15474 switch (tp->pci_fn) { 15475 case 0: 15476 tp->phy_ape_lock = TG3_APE_LOCK_PHY0; 15477 break; 15478 case 1: 15479 tp->phy_ape_lock = TG3_APE_LOCK_PHY1; 15480 break; 15481 case 2: 15482 tp->phy_ape_lock = TG3_APE_LOCK_PHY2; 15483 break; 15484 case 3: 15485 tp->phy_ape_lock = TG3_APE_LOCK_PHY3; 15486 break; 15487 } 15488 } 15489 15490 if (!tg3_flag(tp, ENABLE_ASF) && 15491 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15492 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 15493 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 15494 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 15495 15496 if (tg3_flag(tp, USE_PHYLIB)) 15497 return tg3_phy_init(tp); 15498 15499 /* Reading the PHY ID register can conflict with ASF 15500 * firmware access to the PHY hardware. 15501 */ 15502 err = 0; 15503 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { 15504 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; 15505 } else { 15506 /* Now read the physical PHY_ID from the chip and verify 15507 * that it is sane. If it doesn't look good, we fall back 15508 * to either the hard-coded table based PHY_ID and failing 15509 * that the value found in the eeprom area. 15510 */ 15511 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); 15512 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); 15513 15514 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; 15515 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; 15516 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; 15517 15518 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; 15519 } 15520 15521 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { 15522 tp->phy_id = hw_phy_id; 15523 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) 15524 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15525 else 15526 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; 15527 } else { 15528 if (tp->phy_id != TG3_PHY_ID_INVALID) { 15529 /* Do nothing, phy ID already set up in 15530 * tg3_get_eeprom_hw_cfg(). 15531 */ 15532 } else { 15533 struct subsys_tbl_ent *p; 15534 15535 /* No eeprom signature? Try the hardcoded 15536 * subsys device table. 15537 */ 15538 p = tg3_lookup_by_subsys(tp); 15539 if (p) { 15540 tp->phy_id = p->phy_id; 15541 } else if (!tg3_flag(tp, IS_SSB_CORE)) { 15542 /* For now we saw the IDs 0xbc050cd0, 15543 * 0xbc050f80 and 0xbc050c30 on devices 15544 * connected to an BCM4785 and there are 15545 * probably more. Just assume that the phy is 15546 * supported when it is connected to a SSB core 15547 * for now. 15548 */ 15549 return -ENODEV; 15550 } 15551 15552 if (!tp->phy_id || 15553 tp->phy_id == TG3_PHY_ID_BCM8002) 15554 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15555 } 15556 } 15557 15558 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15559 (tg3_asic_rev(tp) == ASIC_REV_5719 || 15560 tg3_asic_rev(tp) == ASIC_REV_5720 || 15561 tg3_asic_rev(tp) == ASIC_REV_57766 || 15562 tg3_asic_rev(tp) == ASIC_REV_5762 || 15563 (tg3_asic_rev(tp) == ASIC_REV_5717 && 15564 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || 15565 (tg3_asic_rev(tp) == ASIC_REV_57765 && 15566 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) { 15567 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 15568 15569 tp->eee.supported = SUPPORTED_100baseT_Full | 15570 SUPPORTED_1000baseT_Full; 15571 tp->eee.advertised = ADVERTISED_100baseT_Full | 15572 ADVERTISED_1000baseT_Full; 15573 tp->eee.eee_enabled = 1; 15574 tp->eee.tx_lpi_enabled = 1; 15575 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; 15576 } 15577 15578 tg3_phy_init_link_config(tp); 15579 15580 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 15581 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15582 !tg3_flag(tp, ENABLE_APE) && 15583 !tg3_flag(tp, ENABLE_ASF)) { 15584 u32 bmsr, dummy; 15585 15586 tg3_readphy(tp, MII_BMSR, &bmsr); 15587 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 15588 (bmsr & BMSR_LSTATUS)) 15589 goto skip_phy_reset; 15590 15591 err = tg3_phy_reset(tp); 15592 if (err) 15593 return err; 15594 15595 tg3_phy_set_wirespeed(tp); 15596 15597 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) { 15598 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, 15599 tp->link_config.flowctrl); 15600 15601 tg3_writephy(tp, MII_BMCR, 15602 BMCR_ANENABLE | BMCR_ANRESTART); 15603 } 15604 } 15605 15606 skip_phy_reset: 15607 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 15608 err = tg3_init_5401phy_dsp(tp); 15609 if (err) 15610 return err; 15611 15612 err = tg3_init_5401phy_dsp(tp); 15613 } 15614 15615 return err; 15616 } 15617 15618 static void tg3_read_vpd(struct tg3 *tp) 15619 { 15620 u8 *vpd_data; 15621 unsigned int len, vpdlen; 15622 int i; 15623 15624 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); 15625 if (!vpd_data) 15626 goto out_no_vpd; 15627 15628 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15629 PCI_VPD_RO_KEYWORD_MFR_ID, &len); 15630 if (i < 0) 15631 goto partno; 15632 15633 if (len != 4 || memcmp(vpd_data + i, "1028", 4)) 15634 goto partno; 15635 15636 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15637 PCI_VPD_RO_KEYWORD_VENDOR0, &len); 15638 if (i < 0) 15639 goto partno; 15640 15641 memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); 15642 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i); 15643 15644 partno: 15645 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15646 PCI_VPD_RO_KEYWORD_PARTNO, &len); 15647 if (i < 0) 15648 goto out_not_found; 15649 15650 if (len > TG3_BPN_SIZE) 15651 goto out_not_found; 15652 15653 memcpy(tp->board_part_number, &vpd_data[i], len); 15654 15655 out_not_found: 15656 kfree(vpd_data); 15657 if (tp->board_part_number[0]) 15658 return; 15659 15660 out_no_vpd: 15661 if (tg3_asic_rev(tp) == ASIC_REV_5717) { 15662 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 15663 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C) 15664 strcpy(tp->board_part_number, "BCM5717"); 15665 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) 15666 strcpy(tp->board_part_number, "BCM5718"); 15667 else 15668 goto nomatch; 15669 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) { 15670 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) 15671 strcpy(tp->board_part_number, "BCM57780"); 15672 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) 15673 strcpy(tp->board_part_number, "BCM57760"); 15674 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) 15675 strcpy(tp->board_part_number, "BCM57790"); 15676 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) 15677 strcpy(tp->board_part_number, "BCM57788"); 15678 else 15679 goto nomatch; 15680 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) { 15681 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) 15682 strcpy(tp->board_part_number, "BCM57761"); 15683 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) 15684 strcpy(tp->board_part_number, "BCM57765"); 15685 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) 15686 strcpy(tp->board_part_number, "BCM57781"); 15687 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) 15688 strcpy(tp->board_part_number, "BCM57785"); 15689 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) 15690 strcpy(tp->board_part_number, "BCM57791"); 15691 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) 15692 strcpy(tp->board_part_number, "BCM57795"); 15693 else 15694 goto nomatch; 15695 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) { 15696 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) 15697 strcpy(tp->board_part_number, "BCM57762"); 15698 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) 15699 strcpy(tp->board_part_number, "BCM57766"); 15700 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782) 15701 strcpy(tp->board_part_number, "BCM57782"); 15702 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 15703 strcpy(tp->board_part_number, "BCM57786"); 15704 else 15705 goto nomatch; 15706 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15707 strcpy(tp->board_part_number, "BCM95906"); 15708 } else { 15709 nomatch: 15710 strcpy(tp->board_part_number, "none"); 15711 } 15712 } 15713 15714 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) 15715 { 15716 u32 val; 15717 15718 if (tg3_nvram_read(tp, offset, &val) || 15719 (val & 0xfc000000) != 0x0c000000 || 15720 tg3_nvram_read(tp, offset + 4, &val) || 15721 val != 0) 15722 return 0; 15723 15724 return 1; 15725 } 15726 15727 static void tg3_read_bc_ver(struct tg3 *tp) 15728 { 15729 u32 val, offset, start, ver_offset; 15730 int i, dst_off; 15731 bool newver = false; 15732 15733 if (tg3_nvram_read(tp, 0xc, &offset) || 15734 tg3_nvram_read(tp, 0x4, &start)) 15735 return; 15736 15737 offset = tg3_nvram_logical_addr(tp, offset); 15738 15739 if (tg3_nvram_read(tp, offset, &val)) 15740 return; 15741 15742 if ((val & 0xfc000000) == 0x0c000000) { 15743 if (tg3_nvram_read(tp, offset + 4, &val)) 15744 return; 15745 15746 if (val == 0) 15747 newver = true; 15748 } 15749 15750 dst_off = strlen(tp->fw_ver); 15751 15752 if (newver) { 15753 if (TG3_VER_SIZE - dst_off < 16 || 15754 tg3_nvram_read(tp, offset + 8, &ver_offset)) 15755 return; 15756 15757 offset = offset + ver_offset - start; 15758 for (i = 0; i < 16; i += 4) { 15759 __be32 v; 15760 if (tg3_nvram_read_be32(tp, offset + i, &v)) 15761 return; 15762 15763 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); 15764 } 15765 } else { 15766 u32 major, minor; 15767 15768 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) 15769 return; 15770 15771 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> 15772 TG3_NVM_BCVER_MAJSFT; 15773 minor = ver_offset & TG3_NVM_BCVER_MINMSK; 15774 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, 15775 "v%d.%02d", major, minor); 15776 } 15777 } 15778 15779 static void tg3_read_hwsb_ver(struct tg3 *tp) 15780 { 15781 u32 val, major, minor; 15782 15783 /* Use native endian representation */ 15784 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) 15785 return; 15786 15787 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> 15788 TG3_NVM_HWSB_CFG1_MAJSFT; 15789 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> 15790 TG3_NVM_HWSB_CFG1_MINSFT; 15791 15792 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); 15793 } 15794 15795 static void tg3_read_sb_ver(struct tg3 *tp, u32 val) 15796 { 15797 u32 offset, major, minor, build; 15798 15799 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); 15800 15801 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) 15802 return; 15803 15804 switch (val & TG3_EEPROM_SB_REVISION_MASK) { 15805 case TG3_EEPROM_SB_REVISION_0: 15806 offset = TG3_EEPROM_SB_F1R0_EDH_OFF; 15807 break; 15808 case TG3_EEPROM_SB_REVISION_2: 15809 offset = TG3_EEPROM_SB_F1R2_EDH_OFF; 15810 break; 15811 case TG3_EEPROM_SB_REVISION_3: 15812 offset = TG3_EEPROM_SB_F1R3_EDH_OFF; 15813 break; 15814 case TG3_EEPROM_SB_REVISION_4: 15815 offset = TG3_EEPROM_SB_F1R4_EDH_OFF; 15816 break; 15817 case TG3_EEPROM_SB_REVISION_5: 15818 offset = TG3_EEPROM_SB_F1R5_EDH_OFF; 15819 break; 15820 case TG3_EEPROM_SB_REVISION_6: 15821 offset = TG3_EEPROM_SB_F1R6_EDH_OFF; 15822 break; 15823 default: 15824 return; 15825 } 15826 15827 if (tg3_nvram_read(tp, offset, &val)) 15828 return; 15829 15830 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> 15831 TG3_EEPROM_SB_EDH_BLD_SHFT; 15832 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> 15833 TG3_EEPROM_SB_EDH_MAJ_SHFT; 15834 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; 15835 15836 if (minor > 99 || build > 26) 15837 return; 15838 15839 offset = strlen(tp->fw_ver); 15840 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, 15841 " v%d.%02d", major, minor); 15842 15843 if (build > 0) { 15844 offset = strlen(tp->fw_ver); 15845 if (offset < TG3_VER_SIZE - 1) 15846 tp->fw_ver[offset] = 'a' + build - 1; 15847 } 15848 } 15849 15850 static void tg3_read_mgmtfw_ver(struct tg3 *tp) 15851 { 15852 u32 val, offset, start; 15853 int i, vlen; 15854 15855 for (offset = TG3_NVM_DIR_START; 15856 offset < TG3_NVM_DIR_END; 15857 offset += TG3_NVM_DIRENT_SIZE) { 15858 if (tg3_nvram_read(tp, offset, &val)) 15859 return; 15860 15861 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) 15862 break; 15863 } 15864 15865 if (offset == TG3_NVM_DIR_END) 15866 return; 15867 15868 if (!tg3_flag(tp, 5705_PLUS)) 15869 start = 0x08000000; 15870 else if (tg3_nvram_read(tp, offset - 4, &start)) 15871 return; 15872 15873 if (tg3_nvram_read(tp, offset + 4, &offset) || 15874 !tg3_fw_img_is_valid(tp, offset) || 15875 tg3_nvram_read(tp, offset + 8, &val)) 15876 return; 15877 15878 offset += val - start; 15879 15880 vlen = strlen(tp->fw_ver); 15881 15882 tp->fw_ver[vlen++] = ','; 15883 tp->fw_ver[vlen++] = ' '; 15884 15885 for (i = 0; i < 4; i++) { 15886 __be32 v; 15887 if (tg3_nvram_read_be32(tp, offset, &v)) 15888 return; 15889 15890 offset += sizeof(v); 15891 15892 if (vlen > TG3_VER_SIZE - sizeof(v)) { 15893 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); 15894 break; 15895 } 15896 15897 memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); 15898 vlen += sizeof(v); 15899 } 15900 } 15901 15902 static void tg3_probe_ncsi(struct tg3 *tp) 15903 { 15904 u32 apedata; 15905 15906 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 15907 if (apedata != APE_SEG_SIG_MAGIC) 15908 return; 15909 15910 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 15911 if (!(apedata & APE_FW_STATUS_READY)) 15912 return; 15913 15914 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) 15915 tg3_flag_set(tp, APE_HAS_NCSI); 15916 } 15917 15918 static void tg3_read_dash_ver(struct tg3 *tp) 15919 { 15920 int vlen; 15921 u32 apedata; 15922 char *fwtype; 15923 15924 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); 15925 15926 if (tg3_flag(tp, APE_HAS_NCSI)) 15927 fwtype = "NCSI"; 15928 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725) 15929 fwtype = "SMASH"; 15930 else 15931 fwtype = "DASH"; 15932 15933 vlen = strlen(tp->fw_ver); 15934 15935 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", 15936 fwtype, 15937 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, 15938 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, 15939 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, 15940 (apedata & APE_FW_VERSION_BLDMSK)); 15941 } 15942 15943 static void tg3_read_otp_ver(struct tg3 *tp) 15944 { 15945 u32 val, val2; 15946 15947 if (tg3_asic_rev(tp) != ASIC_REV_5762) 15948 return; 15949 15950 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) && 15951 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) && 15952 TG3_OTP_MAGIC0_VALID(val)) { 15953 u64 val64 = (u64) val << 32 | val2; 15954 u32 ver = 0; 15955 int i, vlen; 15956 15957 for (i = 0; i < 7; i++) { 15958 if ((val64 & 0xff) == 0) 15959 break; 15960 ver = val64 & 0xff; 15961 val64 >>= 8; 15962 } 15963 vlen = strlen(tp->fw_ver); 15964 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver); 15965 } 15966 } 15967 15968 static void tg3_read_fw_ver(struct tg3 *tp) 15969 { 15970 u32 val; 15971 bool vpd_vers = false; 15972 15973 if (tp->fw_ver[0] != 0) 15974 vpd_vers = true; 15975 15976 if (tg3_flag(tp, NO_NVRAM)) { 15977 strcat(tp->fw_ver, "sb"); 15978 tg3_read_otp_ver(tp); 15979 return; 15980 } 15981 15982 if (tg3_nvram_read(tp, 0, &val)) 15983 return; 15984 15985 if (val == TG3_EEPROM_MAGIC) 15986 tg3_read_bc_ver(tp); 15987 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) 15988 tg3_read_sb_ver(tp, val); 15989 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 15990 tg3_read_hwsb_ver(tp); 15991 15992 if (tg3_flag(tp, ENABLE_ASF)) { 15993 if (tg3_flag(tp, ENABLE_APE)) { 15994 tg3_probe_ncsi(tp); 15995 if (!vpd_vers) 15996 tg3_read_dash_ver(tp); 15997 } else if (!vpd_vers) { 15998 tg3_read_mgmtfw_ver(tp); 15999 } 16000 } 16001 16002 tp->fw_ver[TG3_VER_SIZE - 1] = 0; 16003 } 16004 16005 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) 16006 { 16007 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 16008 return TG3_RX_RET_MAX_SIZE_5717; 16009 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) 16010 return TG3_RX_RET_MAX_SIZE_5700; 16011 else 16012 return TG3_RX_RET_MAX_SIZE_5705; 16013 } 16014 16015 static const struct pci_device_id tg3_write_reorder_chipsets[] = { 16016 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 16017 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, 16018 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, 16019 { }, 16020 }; 16021 16022 static struct pci_dev *tg3_find_peer(struct tg3 *tp) 16023 { 16024 struct pci_dev *peer; 16025 unsigned int func, devnr = tp->pdev->devfn & ~7; 16026 16027 for (func = 0; func < 8; func++) { 16028 peer = pci_get_slot(tp->pdev->bus, devnr | func); 16029 if (peer && peer != tp->pdev) 16030 break; 16031 pci_dev_put(peer); 16032 } 16033 /* 5704 can be configured in single-port mode, set peer to 16034 * tp->pdev in that case. 16035 */ 16036 if (!peer) { 16037 peer = tp->pdev; 16038 return peer; 16039 } 16040 16041 /* 16042 * We don't need to keep the refcount elevated; there's no way 16043 * to remove one half of this device without removing the other 16044 */ 16045 pci_dev_put(peer); 16046 16047 return peer; 16048 } 16049 16050 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) 16051 { 16052 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT; 16053 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) { 16054 u32 reg; 16055 16056 /* All devices that use the alternate 16057 * ASIC REV location have a CPMU. 16058 */ 16059 tg3_flag_set(tp, CPMU_PRESENT); 16060 16061 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 16062 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 16063 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 16064 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 16065 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 16066 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 16067 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 16068 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 16069 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 16070 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 16071 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) 16072 reg = TG3PCI_GEN2_PRODID_ASICREV; 16073 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || 16074 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || 16075 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || 16076 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || 16077 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || 16078 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || 16079 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 || 16080 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 || 16081 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 || 16082 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 16083 reg = TG3PCI_GEN15_PRODID_ASICREV; 16084 else 16085 reg = TG3PCI_PRODID_ASICREV; 16086 16087 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id); 16088 } 16089 16090 /* Wrong chip ID in 5752 A0. This code can be removed later 16091 * as A0 is not in production. 16092 */ 16093 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW) 16094 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; 16095 16096 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0) 16097 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0; 16098 16099 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16100 tg3_asic_rev(tp) == ASIC_REV_5719 || 16101 tg3_asic_rev(tp) == ASIC_REV_5720) 16102 tg3_flag_set(tp, 5717_PLUS); 16103 16104 if (tg3_asic_rev(tp) == ASIC_REV_57765 || 16105 tg3_asic_rev(tp) == ASIC_REV_57766) 16106 tg3_flag_set(tp, 57765_CLASS); 16107 16108 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) || 16109 tg3_asic_rev(tp) == ASIC_REV_5762) 16110 tg3_flag_set(tp, 57765_PLUS); 16111 16112 /* Intentionally exclude ASIC_REV_5906 */ 16113 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16114 tg3_asic_rev(tp) == ASIC_REV_5787 || 16115 tg3_asic_rev(tp) == ASIC_REV_5784 || 16116 tg3_asic_rev(tp) == ASIC_REV_5761 || 16117 tg3_asic_rev(tp) == ASIC_REV_5785 || 16118 tg3_asic_rev(tp) == ASIC_REV_57780 || 16119 tg3_flag(tp, 57765_PLUS)) 16120 tg3_flag_set(tp, 5755_PLUS); 16121 16122 if (tg3_asic_rev(tp) == ASIC_REV_5780 || 16123 tg3_asic_rev(tp) == ASIC_REV_5714) 16124 tg3_flag_set(tp, 5780_CLASS); 16125 16126 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16127 tg3_asic_rev(tp) == ASIC_REV_5752 || 16128 tg3_asic_rev(tp) == ASIC_REV_5906 || 16129 tg3_flag(tp, 5755_PLUS) || 16130 tg3_flag(tp, 5780_CLASS)) 16131 tg3_flag_set(tp, 5750_PLUS); 16132 16133 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 16134 tg3_flag(tp, 5750_PLUS)) 16135 tg3_flag_set(tp, 5705_PLUS); 16136 } 16137 16138 static bool tg3_10_100_only_device(struct tg3 *tp, 16139 const struct pci_device_id *ent) 16140 { 16141 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK; 16142 16143 if ((tg3_asic_rev(tp) == ASIC_REV_5703 && 16144 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 16145 (tp->phy_flags & TG3_PHYFLG_IS_FET)) 16146 return true; 16147 16148 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) { 16149 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 16150 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100) 16151 return true; 16152 } else { 16153 return true; 16154 } 16155 } 16156 16157 return false; 16158 } 16159 16160 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) 16161 { 16162 u32 misc_ctrl_reg; 16163 u32 pci_state_reg, grc_misc_cfg; 16164 u32 val; 16165 u16 pci_cmd; 16166 int err; 16167 16168 /* Force memory write invalidate off. If we leave it on, 16169 * then on 5700_BX chips we have to enable a workaround. 16170 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary 16171 * to match the cacheline size. The Broadcom driver have this 16172 * workaround but turns MWI off all the times so never uses 16173 * it. This seems to suggest that the workaround is insufficient. 16174 */ 16175 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16176 pci_cmd &= ~PCI_COMMAND_INVALIDATE; 16177 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16178 16179 /* Important! -- Make sure register accesses are byteswapped 16180 * correctly. Also, for those chips that require it, make 16181 * sure that indirect register accesses are enabled before 16182 * the first operation. 16183 */ 16184 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16185 &misc_ctrl_reg); 16186 tp->misc_host_ctrl |= (misc_ctrl_reg & 16187 MISC_HOST_CTRL_CHIPREV); 16188 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16189 tp->misc_host_ctrl); 16190 16191 tg3_detect_asic_rev(tp, misc_ctrl_reg); 16192 16193 /* If we have 5702/03 A1 or A2 on certain ICH chipsets, 16194 * we need to disable memory and use config. cycles 16195 * only to access all registers. The 5702/03 chips 16196 * can mistakenly decode the special cycles from the 16197 * ICH chipsets as memory write cycles, causing corruption 16198 * of register and memory space. Only certain ICH bridges 16199 * will drive special cycles with non-zero data during the 16200 * address phase which can fall within the 5703's address 16201 * range. This is not an ICH bug as the PCI spec allows 16202 * non-zero address during special cycles. However, only 16203 * these ICH bridges are known to drive non-zero addresses 16204 * during special cycles. 16205 * 16206 * Since special cycles do not cross PCI bridges, we only 16207 * enable this workaround if the 5703 is on the secondary 16208 * bus of these ICH bridges. 16209 */ 16210 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) || 16211 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) { 16212 static struct tg3_dev_id { 16213 u32 vendor; 16214 u32 device; 16215 u32 rev; 16216 } ich_chipsets[] = { 16217 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, 16218 PCI_ANY_ID }, 16219 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, 16220 PCI_ANY_ID }, 16221 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, 16222 0xa }, 16223 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, 16224 PCI_ANY_ID }, 16225 { }, 16226 }; 16227 struct tg3_dev_id *pci_id = &ich_chipsets[0]; 16228 struct pci_dev *bridge = NULL; 16229 16230 while (pci_id->vendor != 0) { 16231 bridge = pci_get_device(pci_id->vendor, pci_id->device, 16232 bridge); 16233 if (!bridge) { 16234 pci_id++; 16235 continue; 16236 } 16237 if (pci_id->rev != PCI_ANY_ID) { 16238 if (bridge->revision > pci_id->rev) 16239 continue; 16240 } 16241 if (bridge->subordinate && 16242 (bridge->subordinate->number == 16243 tp->pdev->bus->number)) { 16244 tg3_flag_set(tp, ICH_WORKAROUND); 16245 pci_dev_put(bridge); 16246 break; 16247 } 16248 } 16249 } 16250 16251 if (tg3_asic_rev(tp) == ASIC_REV_5701) { 16252 static struct tg3_dev_id { 16253 u32 vendor; 16254 u32 device; 16255 } bridge_chipsets[] = { 16256 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, 16257 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, 16258 { }, 16259 }; 16260 struct tg3_dev_id *pci_id = &bridge_chipsets[0]; 16261 struct pci_dev *bridge = NULL; 16262 16263 while (pci_id->vendor != 0) { 16264 bridge = pci_get_device(pci_id->vendor, 16265 pci_id->device, 16266 bridge); 16267 if (!bridge) { 16268 pci_id++; 16269 continue; 16270 } 16271 if (bridge->subordinate && 16272 (bridge->subordinate->number <= 16273 tp->pdev->bus->number) && 16274 (bridge->subordinate->busn_res.end >= 16275 tp->pdev->bus->number)) { 16276 tg3_flag_set(tp, 5701_DMA_BUG); 16277 pci_dev_put(bridge); 16278 break; 16279 } 16280 } 16281 } 16282 16283 /* The EPB bridge inside 5714, 5715, and 5780 cannot support 16284 * DMA addresses > 40-bit. This bridge may have other additional 16285 * 57xx devices behind it in some 4-port NIC designs for example. 16286 * Any tg3 device found behind the bridge will also need the 40-bit 16287 * DMA workaround. 16288 */ 16289 if (tg3_flag(tp, 5780_CLASS)) { 16290 tg3_flag_set(tp, 40BIT_DMA_BUG); 16291 tp->msi_cap = tp->pdev->msi_cap; 16292 } else { 16293 struct pci_dev *bridge = NULL; 16294 16295 do { 16296 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, 16297 PCI_DEVICE_ID_SERVERWORKS_EPB, 16298 bridge); 16299 if (bridge && bridge->subordinate && 16300 (bridge->subordinate->number <= 16301 tp->pdev->bus->number) && 16302 (bridge->subordinate->busn_res.end >= 16303 tp->pdev->bus->number)) { 16304 tg3_flag_set(tp, 40BIT_DMA_BUG); 16305 pci_dev_put(bridge); 16306 break; 16307 } 16308 } while (bridge); 16309 } 16310 16311 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16312 tg3_asic_rev(tp) == ASIC_REV_5714) 16313 tp->pdev_peer = tg3_find_peer(tp); 16314 16315 /* Determine TSO capabilities */ 16316 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0) 16317 ; /* Do nothing. HW bug. */ 16318 else if (tg3_flag(tp, 57765_PLUS)) 16319 tg3_flag_set(tp, HW_TSO_3); 16320 else if (tg3_flag(tp, 5755_PLUS) || 16321 tg3_asic_rev(tp) == ASIC_REV_5906) 16322 tg3_flag_set(tp, HW_TSO_2); 16323 else if (tg3_flag(tp, 5750_PLUS)) { 16324 tg3_flag_set(tp, HW_TSO_1); 16325 tg3_flag_set(tp, TSO_BUG); 16326 if (tg3_asic_rev(tp) == ASIC_REV_5750 && 16327 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2) 16328 tg3_flag_clear(tp, TSO_BUG); 16329 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 && 16330 tg3_asic_rev(tp) != ASIC_REV_5701 && 16331 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 16332 tg3_flag_set(tp, FW_TSO); 16333 tg3_flag_set(tp, TSO_BUG); 16334 if (tg3_asic_rev(tp) == ASIC_REV_5705) 16335 tp->fw_needed = FIRMWARE_TG3TSO5; 16336 else 16337 tp->fw_needed = FIRMWARE_TG3TSO; 16338 } 16339 16340 /* Selectively allow TSO based on operating conditions */ 16341 if (tg3_flag(tp, HW_TSO_1) || 16342 tg3_flag(tp, HW_TSO_2) || 16343 tg3_flag(tp, HW_TSO_3) || 16344 tg3_flag(tp, FW_TSO)) { 16345 /* For firmware TSO, assume ASF is disabled. 16346 * We'll disable TSO later if we discover ASF 16347 * is enabled in tg3_get_eeprom_hw_cfg(). 16348 */ 16349 tg3_flag_set(tp, TSO_CAPABLE); 16350 } else { 16351 tg3_flag_clear(tp, TSO_CAPABLE); 16352 tg3_flag_clear(tp, TSO_BUG); 16353 tp->fw_needed = NULL; 16354 } 16355 16356 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) 16357 tp->fw_needed = FIRMWARE_TG3; 16358 16359 if (tg3_asic_rev(tp) == ASIC_REV_57766) 16360 tp->fw_needed = FIRMWARE_TG357766; 16361 16362 tp->irq_max = 1; 16363 16364 if (tg3_flag(tp, 5750_PLUS)) { 16365 tg3_flag_set(tp, SUPPORT_MSI); 16366 if (tg3_chip_rev(tp) == CHIPREV_5750_AX || 16367 tg3_chip_rev(tp) == CHIPREV_5750_BX || 16368 (tg3_asic_rev(tp) == ASIC_REV_5714 && 16369 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 && 16370 tp->pdev_peer == tp->pdev)) 16371 tg3_flag_clear(tp, SUPPORT_MSI); 16372 16373 if (tg3_flag(tp, 5755_PLUS) || 16374 tg3_asic_rev(tp) == ASIC_REV_5906) { 16375 tg3_flag_set(tp, 1SHOT_MSI); 16376 } 16377 16378 if (tg3_flag(tp, 57765_PLUS)) { 16379 tg3_flag_set(tp, SUPPORT_MSIX); 16380 tp->irq_max = TG3_IRQ_MAX_VECS; 16381 } 16382 } 16383 16384 tp->txq_max = 1; 16385 tp->rxq_max = 1; 16386 if (tp->irq_max > 1) { 16387 tp->rxq_max = TG3_RSS_MAX_NUM_QS; 16388 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS); 16389 16390 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 16391 tg3_asic_rev(tp) == ASIC_REV_5720) 16392 tp->txq_max = tp->irq_max - 1; 16393 } 16394 16395 if (tg3_flag(tp, 5755_PLUS) || 16396 tg3_asic_rev(tp) == ASIC_REV_5906) 16397 tg3_flag_set(tp, SHORT_DMA_BUG); 16398 16399 if (tg3_asic_rev(tp) == ASIC_REV_5719) 16400 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; 16401 16402 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16403 tg3_asic_rev(tp) == ASIC_REV_5719 || 16404 tg3_asic_rev(tp) == ASIC_REV_5720 || 16405 tg3_asic_rev(tp) == ASIC_REV_5762) 16406 tg3_flag_set(tp, LRG_PROD_RING_CAP); 16407 16408 if (tg3_flag(tp, 57765_PLUS) && 16409 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0) 16410 tg3_flag_set(tp, USE_JUMBO_BDFLAG); 16411 16412 if (!tg3_flag(tp, 5705_PLUS) || 16413 tg3_flag(tp, 5780_CLASS) || 16414 tg3_flag(tp, USE_JUMBO_BDFLAG)) 16415 tg3_flag_set(tp, JUMBO_CAPABLE); 16416 16417 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16418 &pci_state_reg); 16419 16420 if (pci_is_pcie(tp->pdev)) { 16421 u16 lnkctl; 16422 16423 tg3_flag_set(tp, PCI_EXPRESS); 16424 16425 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl); 16426 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { 16427 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16428 tg3_flag_clear(tp, HW_TSO_2); 16429 tg3_flag_clear(tp, TSO_CAPABLE); 16430 } 16431 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 16432 tg3_asic_rev(tp) == ASIC_REV_5761 || 16433 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 || 16434 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1) 16435 tg3_flag_set(tp, CLKREQ_BUG); 16436 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) { 16437 tg3_flag_set(tp, L1PLLPD_EN); 16438 } 16439 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) { 16440 /* BCM5785 devices are effectively PCIe devices, and should 16441 * follow PCIe codepaths, but do not have a PCIe capabilities 16442 * section. 16443 */ 16444 tg3_flag_set(tp, PCI_EXPRESS); 16445 } else if (!tg3_flag(tp, 5705_PLUS) || 16446 tg3_flag(tp, 5780_CLASS)) { 16447 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); 16448 if (!tp->pcix_cap) { 16449 dev_err(&tp->pdev->dev, 16450 "Cannot find PCI-X capability, aborting\n"); 16451 return -EIO; 16452 } 16453 16454 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) 16455 tg3_flag_set(tp, PCIX_MODE); 16456 } 16457 16458 /* If we have an AMD 762 or VIA K8T800 chipset, write 16459 * reordering to the mailbox registers done by the host 16460 * controller can cause major troubles. We read back from 16461 * every mailbox register write to force the writes to be 16462 * posted to the chip in order. 16463 */ 16464 if (pci_dev_present(tg3_write_reorder_chipsets) && 16465 !tg3_flag(tp, PCI_EXPRESS)) 16466 tg3_flag_set(tp, MBOX_WRITE_REORDER); 16467 16468 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 16469 &tp->pci_cacheline_sz); 16470 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16471 &tp->pci_lat_timer); 16472 if (tg3_asic_rev(tp) == ASIC_REV_5703 && 16473 tp->pci_lat_timer < 64) { 16474 tp->pci_lat_timer = 64; 16475 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16476 tp->pci_lat_timer); 16477 } 16478 16479 /* Important! -- It is critical that the PCI-X hw workaround 16480 * situation is decided before the first MMIO register access. 16481 */ 16482 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) { 16483 /* 5700 BX chips need to have their TX producer index 16484 * mailboxes written twice to workaround a bug. 16485 */ 16486 tg3_flag_set(tp, TXD_MBOX_HWBUG); 16487 16488 /* If we are in PCI-X mode, enable register write workaround. 16489 * 16490 * The workaround is to use indirect register accesses 16491 * for all chip writes not to mailbox registers. 16492 */ 16493 if (tg3_flag(tp, PCIX_MODE)) { 16494 u32 pm_reg; 16495 16496 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16497 16498 /* The chip can have it's power management PCI config 16499 * space registers clobbered due to this bug. 16500 * So explicitly force the chip into D0 here. 16501 */ 16502 pci_read_config_dword(tp->pdev, 16503 tp->pdev->pm_cap + PCI_PM_CTRL, 16504 &pm_reg); 16505 pm_reg &= ~PCI_PM_CTRL_STATE_MASK; 16506 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; 16507 pci_write_config_dword(tp->pdev, 16508 tp->pdev->pm_cap + PCI_PM_CTRL, 16509 pm_reg); 16510 16511 /* Also, force SERR#/PERR# in PCI command. */ 16512 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16513 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; 16514 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16515 } 16516 } 16517 16518 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) 16519 tg3_flag_set(tp, PCI_HIGH_SPEED); 16520 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) 16521 tg3_flag_set(tp, PCI_32BIT); 16522 16523 /* Chip-specific fixup from Broadcom driver */ 16524 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) && 16525 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { 16526 pci_state_reg |= PCISTATE_RETRY_SAME_DMA; 16527 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); 16528 } 16529 16530 /* Default fast path register access methods */ 16531 tp->read32 = tg3_read32; 16532 tp->write32 = tg3_write32; 16533 tp->read32_mbox = tg3_read32; 16534 tp->write32_mbox = tg3_write32; 16535 tp->write32_tx_mbox = tg3_write32; 16536 tp->write32_rx_mbox = tg3_write32; 16537 16538 /* Various workaround register access methods */ 16539 if (tg3_flag(tp, PCIX_TARGET_HWBUG)) 16540 tp->write32 = tg3_write_indirect_reg32; 16541 else if (tg3_asic_rev(tp) == ASIC_REV_5701 || 16542 (tg3_flag(tp, PCI_EXPRESS) && 16543 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) { 16544 /* 16545 * Back to back register writes can cause problems on these 16546 * chips, the workaround is to read back all reg writes 16547 * except those to mailbox regs. 16548 * 16549 * See tg3_write_indirect_reg32(). 16550 */ 16551 tp->write32 = tg3_write_flush_reg32; 16552 } 16553 16554 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { 16555 tp->write32_tx_mbox = tg3_write32_tx_mbox; 16556 if (tg3_flag(tp, MBOX_WRITE_REORDER)) 16557 tp->write32_rx_mbox = tg3_write_flush_reg32; 16558 } 16559 16560 if (tg3_flag(tp, ICH_WORKAROUND)) { 16561 tp->read32 = tg3_read_indirect_reg32; 16562 tp->write32 = tg3_write_indirect_reg32; 16563 tp->read32_mbox = tg3_read_indirect_mbox; 16564 tp->write32_mbox = tg3_write_indirect_mbox; 16565 tp->write32_tx_mbox = tg3_write_indirect_mbox; 16566 tp->write32_rx_mbox = tg3_write_indirect_mbox; 16567 16568 iounmap(tp->regs); 16569 tp->regs = NULL; 16570 16571 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16572 pci_cmd &= ~PCI_COMMAND_MEMORY; 16573 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16574 } 16575 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16576 tp->read32_mbox = tg3_read32_mbox_5906; 16577 tp->write32_mbox = tg3_write32_mbox_5906; 16578 tp->write32_tx_mbox = tg3_write32_mbox_5906; 16579 tp->write32_rx_mbox = tg3_write32_mbox_5906; 16580 } 16581 16582 if (tp->write32 == tg3_write_indirect_reg32 || 16583 (tg3_flag(tp, PCIX_MODE) && 16584 (tg3_asic_rev(tp) == ASIC_REV_5700 || 16585 tg3_asic_rev(tp) == ASIC_REV_5701))) 16586 tg3_flag_set(tp, SRAM_USE_CONFIG); 16587 16588 /* The memory arbiter has to be enabled in order for SRAM accesses 16589 * to succeed. Normally on powerup the tg3 chip firmware will make 16590 * sure it is enabled, but other entities such as system netboot 16591 * code might disable it. 16592 */ 16593 val = tr32(MEMARB_MODE); 16594 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 16595 16596 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; 16597 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16598 tg3_flag(tp, 5780_CLASS)) { 16599 if (tg3_flag(tp, PCIX_MODE)) { 16600 pci_read_config_dword(tp->pdev, 16601 tp->pcix_cap + PCI_X_STATUS, 16602 &val); 16603 tp->pci_fn = val & 0x7; 16604 } 16605 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16606 tg3_asic_rev(tp) == ASIC_REV_5719 || 16607 tg3_asic_rev(tp) == ASIC_REV_5720) { 16608 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); 16609 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG) 16610 val = tr32(TG3_CPMU_STATUS); 16611 16612 if (tg3_asic_rev(tp) == ASIC_REV_5717) 16613 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0; 16614 else 16615 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >> 16616 TG3_CPMU_STATUS_FSHFT_5719; 16617 } 16618 16619 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 16620 tp->write32_tx_mbox = tg3_write_flush_reg32; 16621 tp->write32_rx_mbox = tg3_write_flush_reg32; 16622 } 16623 16624 /* Get eeprom hw config before calling tg3_set_power_state(). 16625 * In particular, the TG3_FLAG_IS_NIC flag must be 16626 * determined before calling tg3_set_power_state() so that 16627 * we know whether or not to switch out of Vaux power. 16628 * When the flag is set, it means that GPIO1 is used for eeprom 16629 * write protect and also implies that it is a LOM where GPIOs 16630 * are not used to switch power. 16631 */ 16632 tg3_get_eeprom_hw_cfg(tp); 16633 16634 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) { 16635 tg3_flag_clear(tp, TSO_CAPABLE); 16636 tg3_flag_clear(tp, TSO_BUG); 16637 tp->fw_needed = NULL; 16638 } 16639 16640 if (tg3_flag(tp, ENABLE_APE)) { 16641 /* Allow reads and writes to the 16642 * APE register and memory space. 16643 */ 16644 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | 16645 PCISTATE_ALLOW_APE_SHMEM_WR | 16646 PCISTATE_ALLOW_APE_PSPACE_WR; 16647 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, 16648 pci_state_reg); 16649 16650 tg3_ape_lock_init(tp); 16651 tp->ape_hb_interval = 16652 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC); 16653 } 16654 16655 /* Set up tp->grc_local_ctrl before calling 16656 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high 16657 * will bring 5700's external PHY out of reset. 16658 * It is also used as eeprom write protect on LOMs. 16659 */ 16660 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; 16661 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16662 tg3_flag(tp, EEPROM_WRITE_PROT)) 16663 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 16664 GRC_LCLCTRL_GPIO_OUTPUT1); 16665 /* Unused GPIO3 must be driven as output on 5752 because there 16666 * are no pull-up resistors on unused GPIO pins. 16667 */ 16668 else if (tg3_asic_rev(tp) == ASIC_REV_5752) 16669 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 16670 16671 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16672 tg3_asic_rev(tp) == ASIC_REV_57780 || 16673 tg3_flag(tp, 57765_CLASS)) 16674 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16675 16676 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 16677 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 16678 /* Turn off the debug UART. */ 16679 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16680 if (tg3_flag(tp, IS_NIC)) 16681 /* Keep VMain power. */ 16682 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 16683 GRC_LCLCTRL_GPIO_OUTPUT0; 16684 } 16685 16686 if (tg3_asic_rev(tp) == ASIC_REV_5762) 16687 tp->grc_local_ctrl |= 16688 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL; 16689 16690 /* Switch out of Vaux if it is a NIC */ 16691 tg3_pwrsrc_switch_to_vmain(tp); 16692 16693 /* Derive initial jumbo mode from MTU assigned in 16694 * ether_setup() via the alloc_etherdev() call 16695 */ 16696 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) 16697 tg3_flag_set(tp, JUMBO_RING_ENABLE); 16698 16699 /* Determine WakeOnLan speed to use. */ 16700 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16701 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16702 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16703 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) { 16704 tg3_flag_clear(tp, WOL_SPEED_100MB); 16705 } else { 16706 tg3_flag_set(tp, WOL_SPEED_100MB); 16707 } 16708 16709 if (tg3_asic_rev(tp) == ASIC_REV_5906) 16710 tp->phy_flags |= TG3_PHYFLG_IS_FET; 16711 16712 /* A few boards don't want Ethernet@WireSpeed phy feature */ 16713 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16714 (tg3_asic_rev(tp) == ASIC_REV_5705 && 16715 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) && 16716 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) || 16717 (tp->phy_flags & TG3_PHYFLG_IS_FET) || 16718 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 16719 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; 16720 16721 if (tg3_chip_rev(tp) == CHIPREV_5703_AX || 16722 tg3_chip_rev(tp) == CHIPREV_5704_AX) 16723 tp->phy_flags |= TG3_PHYFLG_ADC_BUG; 16724 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) 16725 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; 16726 16727 if (tg3_flag(tp, 5705_PLUS) && 16728 !(tp->phy_flags & TG3_PHYFLG_IS_FET) && 16729 tg3_asic_rev(tp) != ASIC_REV_5785 && 16730 tg3_asic_rev(tp) != ASIC_REV_57780 && 16731 !tg3_flag(tp, 57765_PLUS)) { 16732 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16733 tg3_asic_rev(tp) == ASIC_REV_5787 || 16734 tg3_asic_rev(tp) == ASIC_REV_5784 || 16735 tg3_asic_rev(tp) == ASIC_REV_5761) { 16736 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && 16737 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) 16738 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; 16739 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) 16740 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; 16741 } else 16742 tp->phy_flags |= TG3_PHYFLG_BER_BUG; 16743 } 16744 16745 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 16746 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 16747 tp->phy_otp = tg3_read_otp_phycfg(tp); 16748 if (tp->phy_otp == 0) 16749 tp->phy_otp = TG3_OTP_DEFAULT; 16750 } 16751 16752 if (tg3_flag(tp, CPMU_PRESENT)) 16753 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; 16754 else 16755 tp->mi_mode = MAC_MI_MODE_BASE; 16756 16757 tp->coalesce_mode = 0; 16758 if (tg3_chip_rev(tp) != CHIPREV_5700_AX && 16759 tg3_chip_rev(tp) != CHIPREV_5700_BX) 16760 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 16761 16762 /* Set these bits to enable statistics workaround. */ 16763 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16764 tg3_asic_rev(tp) == ASIC_REV_5762 || 16765 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 16766 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) { 16767 tp->coalesce_mode |= HOSTCC_MODE_ATTN; 16768 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; 16769 } 16770 16771 if (tg3_asic_rev(tp) == ASIC_REV_5785 || 16772 tg3_asic_rev(tp) == ASIC_REV_57780) 16773 tg3_flag_set(tp, USE_PHYLIB); 16774 16775 err = tg3_mdio_init(tp); 16776 if (err) 16777 return err; 16778 16779 /* Initialize data/descriptor byte/word swapping. */ 16780 val = tr32(GRC_MODE); 16781 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 16782 tg3_asic_rev(tp) == ASIC_REV_5762) 16783 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | 16784 GRC_MODE_WORD_SWAP_B2HRX_DATA | 16785 GRC_MODE_B2HRX_ENABLE | 16786 GRC_MODE_HTX2B_ENABLE | 16787 GRC_MODE_HOST_STACKUP); 16788 else 16789 val &= GRC_MODE_HOST_STACKUP; 16790 16791 tw32(GRC_MODE, val | tp->grc_mode); 16792 16793 tg3_switch_clocks(tp); 16794 16795 /* Clear this out for sanity. */ 16796 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 16797 16798 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ 16799 tw32(TG3PCI_REG_BASE_ADDR, 0); 16800 16801 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16802 &pci_state_reg); 16803 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && 16804 !tg3_flag(tp, PCIX_TARGET_HWBUG)) { 16805 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16806 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16807 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 || 16808 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) { 16809 void __iomem *sram_base; 16810 16811 /* Write some dummy words into the SRAM status block 16812 * area, see if it reads back correctly. If the return 16813 * value is bad, force enable the PCIX workaround. 16814 */ 16815 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; 16816 16817 writel(0x00000000, sram_base); 16818 writel(0x00000000, sram_base + 4); 16819 writel(0xffffffff, sram_base + 4); 16820 if (readl(sram_base) != 0x00000000) 16821 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16822 } 16823 } 16824 16825 udelay(50); 16826 tg3_nvram_init(tp); 16827 16828 /* If the device has an NVRAM, no need to load patch firmware */ 16829 if (tg3_asic_rev(tp) == ASIC_REV_57766 && 16830 !tg3_flag(tp, NO_NVRAM)) 16831 tp->fw_needed = NULL; 16832 16833 grc_misc_cfg = tr32(GRC_MISC_CFG); 16834 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; 16835 16836 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 16837 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || 16838 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 16839 tg3_flag_set(tp, IS_5788); 16840 16841 if (!tg3_flag(tp, IS_5788) && 16842 tg3_asic_rev(tp) != ASIC_REV_5700) 16843 tg3_flag_set(tp, TAGGED_STATUS); 16844 if (tg3_flag(tp, TAGGED_STATUS)) { 16845 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | 16846 HOSTCC_MODE_CLRTICK_TXBD); 16847 16848 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; 16849 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16850 tp->misc_host_ctrl); 16851 } 16852 16853 /* Preserve the APE MAC_MODE bits */ 16854 if (tg3_flag(tp, ENABLE_APE)) 16855 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 16856 else 16857 tp->mac_mode = 0; 16858 16859 if (tg3_10_100_only_device(tp, ent)) 16860 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; 16861 16862 err = tg3_phy_probe(tp); 16863 if (err) { 16864 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); 16865 /* ... but do not return immediately ... */ 16866 tg3_mdio_fini(tp); 16867 } 16868 16869 tg3_read_vpd(tp); 16870 tg3_read_fw_ver(tp); 16871 16872 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 16873 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16874 } else { 16875 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16876 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16877 else 16878 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16879 } 16880 16881 /* 5700 {AX,BX} chips have a broken status block link 16882 * change bit implementation, so we must use the 16883 * status register in those cases. 16884 */ 16885 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16886 tg3_flag_set(tp, USE_LINKCHG_REG); 16887 else 16888 tg3_flag_clear(tp, USE_LINKCHG_REG); 16889 16890 /* The led_ctrl is set during tg3_phy_probe, here we might 16891 * have to force the link status polling mechanism based 16892 * upon subsystem IDs. 16893 */ 16894 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 16895 tg3_asic_rev(tp) == ASIC_REV_5701 && 16896 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 16897 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16898 tg3_flag_set(tp, USE_LINKCHG_REG); 16899 } 16900 16901 /* For all SERDES we poll the MAC status register. */ 16902 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 16903 tg3_flag_set(tp, POLL_SERDES); 16904 else 16905 tg3_flag_clear(tp, POLL_SERDES); 16906 16907 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF)) 16908 tg3_flag_set(tp, POLL_CPMU_LINK); 16909 16910 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN; 16911 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; 16912 if (tg3_asic_rev(tp) == ASIC_REV_5701 && 16913 tg3_flag(tp, PCIX_MODE)) { 16914 tp->rx_offset = NET_SKB_PAD; 16915 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 16916 tp->rx_copy_thresh = ~(u16)0; 16917 #endif 16918 } 16919 16920 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; 16921 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; 16922 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; 16923 16924 tp->rx_std_max_post = tp->rx_std_ring_mask + 1; 16925 16926 /* Increment the rx prod index on the rx std ring by at most 16927 * 8 for these chips to workaround hw errata. 16928 */ 16929 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16930 tg3_asic_rev(tp) == ASIC_REV_5752 || 16931 tg3_asic_rev(tp) == ASIC_REV_5755) 16932 tp->rx_std_max_post = 8; 16933 16934 if (tg3_flag(tp, ASPM_WORKAROUND)) 16935 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & 16936 PCIE_PWR_MGMT_L1_THRESH_MSK; 16937 16938 return err; 16939 } 16940 16941 static int tg3_get_device_address(struct tg3 *tp, u8 *addr) 16942 { 16943 u32 hi, lo, mac_offset; 16944 int addr_ok = 0; 16945 int err; 16946 16947 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr)) 16948 return 0; 16949 16950 if (tg3_flag(tp, IS_SSB_CORE)) { 16951 err = ssb_gige_get_macaddr(tp->pdev, addr); 16952 if (!err && is_valid_ether_addr(addr)) 16953 return 0; 16954 } 16955 16956 mac_offset = 0x7c; 16957 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16958 tg3_flag(tp, 5780_CLASS)) { 16959 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 16960 mac_offset = 0xcc; 16961 if (tg3_nvram_lock(tp)) 16962 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); 16963 else 16964 tg3_nvram_unlock(tp); 16965 } else if (tg3_flag(tp, 5717_PLUS)) { 16966 if (tp->pci_fn & 1) 16967 mac_offset = 0xcc; 16968 if (tp->pci_fn > 1) 16969 mac_offset += 0x18c; 16970 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) 16971 mac_offset = 0x10; 16972 16973 /* First try to get it from MAC address mailbox. */ 16974 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); 16975 if ((hi >> 16) == 0x484b) { 16976 addr[0] = (hi >> 8) & 0xff; 16977 addr[1] = (hi >> 0) & 0xff; 16978 16979 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); 16980 addr[2] = (lo >> 24) & 0xff; 16981 addr[3] = (lo >> 16) & 0xff; 16982 addr[4] = (lo >> 8) & 0xff; 16983 addr[5] = (lo >> 0) & 0xff; 16984 16985 /* Some old bootcode may report a 0 MAC address in SRAM */ 16986 addr_ok = is_valid_ether_addr(addr); 16987 } 16988 if (!addr_ok) { 16989 /* Next, try NVRAM. */ 16990 if (!tg3_flag(tp, NO_NVRAM) && 16991 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && 16992 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { 16993 memcpy(&addr[0], ((char *)&hi) + 2, 2); 16994 memcpy(&addr[2], (char *)&lo, sizeof(lo)); 16995 } 16996 /* Finally just fetch it out of the MAC control regs. */ 16997 else { 16998 hi = tr32(MAC_ADDR_0_HIGH); 16999 lo = tr32(MAC_ADDR_0_LOW); 17000 17001 addr[5] = lo & 0xff; 17002 addr[4] = (lo >> 8) & 0xff; 17003 addr[3] = (lo >> 16) & 0xff; 17004 addr[2] = (lo >> 24) & 0xff; 17005 addr[1] = hi & 0xff; 17006 addr[0] = (hi >> 8) & 0xff; 17007 } 17008 } 17009 17010 if (!is_valid_ether_addr(addr)) 17011 return -EINVAL; 17012 return 0; 17013 } 17014 17015 #define BOUNDARY_SINGLE_CACHELINE 1 17016 #define BOUNDARY_MULTI_CACHELINE 2 17017 17018 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) 17019 { 17020 int cacheline_size; 17021 u8 byte; 17022 int goal; 17023 17024 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); 17025 if (byte == 0) 17026 cacheline_size = 1024; 17027 else 17028 cacheline_size = (int) byte * 4; 17029 17030 /* On 5703 and later chips, the boundary bits have no 17031 * effect. 17032 */ 17033 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17034 tg3_asic_rev(tp) != ASIC_REV_5701 && 17035 !tg3_flag(tp, PCI_EXPRESS)) 17036 goto out; 17037 17038 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) 17039 goal = BOUNDARY_MULTI_CACHELINE; 17040 #else 17041 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) 17042 goal = BOUNDARY_SINGLE_CACHELINE; 17043 #else 17044 goal = 0; 17045 #endif 17046 #endif 17047 17048 if (tg3_flag(tp, 57765_PLUS)) { 17049 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 17050 goto out; 17051 } 17052 17053 if (!goal) 17054 goto out; 17055 17056 /* PCI controllers on most RISC systems tend to disconnect 17057 * when a device tries to burst across a cache-line boundary. 17058 * Therefore, letting tg3 do so just wastes PCI bandwidth. 17059 * 17060 * Unfortunately, for PCI-E there are only limited 17061 * write-side controls for this, and thus for reads 17062 * we will still get the disconnects. We'll also waste 17063 * these PCI cycles for both read and write for chips 17064 * other than 5700 and 5701 which do not implement the 17065 * boundary bits. 17066 */ 17067 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { 17068 switch (cacheline_size) { 17069 case 16: 17070 case 32: 17071 case 64: 17072 case 128: 17073 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17074 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | 17075 DMA_RWCTRL_WRITE_BNDRY_128_PCIX); 17076 } else { 17077 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17078 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17079 } 17080 break; 17081 17082 case 256: 17083 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | 17084 DMA_RWCTRL_WRITE_BNDRY_256_PCIX); 17085 break; 17086 17087 default: 17088 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17089 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17090 break; 17091 } 17092 } else if (tg3_flag(tp, PCI_EXPRESS)) { 17093 switch (cacheline_size) { 17094 case 16: 17095 case 32: 17096 case 64: 17097 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17098 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17099 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; 17100 break; 17101 } 17102 fallthrough; 17103 case 128: 17104 default: 17105 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17106 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; 17107 break; 17108 } 17109 } else { 17110 switch (cacheline_size) { 17111 case 16: 17112 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17113 val |= (DMA_RWCTRL_READ_BNDRY_16 | 17114 DMA_RWCTRL_WRITE_BNDRY_16); 17115 break; 17116 } 17117 fallthrough; 17118 case 32: 17119 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17120 val |= (DMA_RWCTRL_READ_BNDRY_32 | 17121 DMA_RWCTRL_WRITE_BNDRY_32); 17122 break; 17123 } 17124 fallthrough; 17125 case 64: 17126 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17127 val |= (DMA_RWCTRL_READ_BNDRY_64 | 17128 DMA_RWCTRL_WRITE_BNDRY_64); 17129 break; 17130 } 17131 fallthrough; 17132 case 128: 17133 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17134 val |= (DMA_RWCTRL_READ_BNDRY_128 | 17135 DMA_RWCTRL_WRITE_BNDRY_128); 17136 break; 17137 } 17138 fallthrough; 17139 case 256: 17140 val |= (DMA_RWCTRL_READ_BNDRY_256 | 17141 DMA_RWCTRL_WRITE_BNDRY_256); 17142 break; 17143 case 512: 17144 val |= (DMA_RWCTRL_READ_BNDRY_512 | 17145 DMA_RWCTRL_WRITE_BNDRY_512); 17146 break; 17147 case 1024: 17148 default: 17149 val |= (DMA_RWCTRL_READ_BNDRY_1024 | 17150 DMA_RWCTRL_WRITE_BNDRY_1024); 17151 break; 17152 } 17153 } 17154 17155 out: 17156 return val; 17157 } 17158 17159 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, 17160 int size, bool to_device) 17161 { 17162 struct tg3_internal_buffer_desc test_desc; 17163 u32 sram_dma_descs; 17164 int i, ret; 17165 17166 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; 17167 17168 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); 17169 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); 17170 tw32(RDMAC_STATUS, 0); 17171 tw32(WDMAC_STATUS, 0); 17172 17173 tw32(BUFMGR_MODE, 0); 17174 tw32(FTQ_RESET, 0); 17175 17176 test_desc.addr_hi = ((u64) buf_dma) >> 32; 17177 test_desc.addr_lo = buf_dma & 0xffffffff; 17178 test_desc.nic_mbuf = 0x00002100; 17179 test_desc.len = size; 17180 17181 /* 17182 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz 17183 * the *second* time the tg3 driver was getting loaded after an 17184 * initial scan. 17185 * 17186 * Broadcom tells me: 17187 * ...the DMA engine is connected to the GRC block and a DMA 17188 * reset may affect the GRC block in some unpredictable way... 17189 * The behavior of resets to individual blocks has not been tested. 17190 * 17191 * Broadcom noted the GRC reset will also reset all sub-components. 17192 */ 17193 if (to_device) { 17194 test_desc.cqid_sqid = (13 << 8) | 2; 17195 17196 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); 17197 udelay(40); 17198 } else { 17199 test_desc.cqid_sqid = (16 << 8) | 7; 17200 17201 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); 17202 udelay(40); 17203 } 17204 test_desc.flags = 0x00000005; 17205 17206 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { 17207 u32 val; 17208 17209 val = *(((u32 *)&test_desc) + i); 17210 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 17211 sram_dma_descs + (i * sizeof(u32))); 17212 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 17213 } 17214 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 17215 17216 if (to_device) 17217 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); 17218 else 17219 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); 17220 17221 ret = -ENODEV; 17222 for (i = 0; i < 40; i++) { 17223 u32 val; 17224 17225 if (to_device) 17226 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); 17227 else 17228 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); 17229 if ((val & 0xffff) == sram_dma_descs) { 17230 ret = 0; 17231 break; 17232 } 17233 17234 udelay(100); 17235 } 17236 17237 return ret; 17238 } 17239 17240 #define TEST_BUFFER_SIZE 0x2000 17241 17242 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = { 17243 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, 17244 { }, 17245 }; 17246 17247 static int tg3_test_dma(struct tg3 *tp) 17248 { 17249 dma_addr_t buf_dma; 17250 u32 *buf, saved_dma_rwctrl; 17251 int ret = 0; 17252 17253 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, 17254 &buf_dma, GFP_KERNEL); 17255 if (!buf) { 17256 ret = -ENOMEM; 17257 goto out_nofree; 17258 } 17259 17260 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 17261 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 17262 17263 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 17264 17265 if (tg3_flag(tp, 57765_PLUS)) 17266 goto out; 17267 17268 if (tg3_flag(tp, PCI_EXPRESS)) { 17269 /* DMA read watermark not used on PCIE */ 17270 tp->dma_rwctrl |= 0x00180000; 17271 } else if (!tg3_flag(tp, PCIX_MODE)) { 17272 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 17273 tg3_asic_rev(tp) == ASIC_REV_5750) 17274 tp->dma_rwctrl |= 0x003f0000; 17275 else 17276 tp->dma_rwctrl |= 0x003f000f; 17277 } else { 17278 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17279 tg3_asic_rev(tp) == ASIC_REV_5704) { 17280 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); 17281 u32 read_water = 0x7; 17282 17283 /* If the 5704 is behind the EPB bridge, we can 17284 * do the less restrictive ONE_DMA workaround for 17285 * better performance. 17286 */ 17287 if (tg3_flag(tp, 40BIT_DMA_BUG) && 17288 tg3_asic_rev(tp) == ASIC_REV_5704) 17289 tp->dma_rwctrl |= 0x8000; 17290 else if (ccval == 0x6 || ccval == 0x7) 17291 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17292 17293 if (tg3_asic_rev(tp) == ASIC_REV_5703) 17294 read_water = 4; 17295 /* Set bit 23 to enable PCIX hw bug fix */ 17296 tp->dma_rwctrl |= 17297 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | 17298 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | 17299 (1 << 23); 17300 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) { 17301 /* 5780 always in PCIX mode */ 17302 tp->dma_rwctrl |= 0x00144000; 17303 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) { 17304 /* 5714 always in PCIX mode */ 17305 tp->dma_rwctrl |= 0x00148000; 17306 } else { 17307 tp->dma_rwctrl |= 0x001b000f; 17308 } 17309 } 17310 if (tg3_flag(tp, ONE_DMA_AT_ONCE)) 17311 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17312 17313 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17314 tg3_asic_rev(tp) == ASIC_REV_5704) 17315 tp->dma_rwctrl &= 0xfffffff0; 17316 17317 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 17318 tg3_asic_rev(tp) == ASIC_REV_5701) { 17319 /* Remove this if it causes problems for some boards. */ 17320 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; 17321 17322 /* On 5700/5701 chips, we need to set this bit. 17323 * Otherwise the chip will issue cacheline transactions 17324 * to streamable DMA memory with not all the byte 17325 * enables turned on. This is an error on several 17326 * RISC PCI controllers, in particular sparc64. 17327 * 17328 * On 5703/5704 chips, this bit has been reassigned 17329 * a different meaning. In particular, it is used 17330 * on those chips to enable a PCI-X workaround. 17331 */ 17332 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; 17333 } 17334 17335 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17336 17337 17338 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17339 tg3_asic_rev(tp) != ASIC_REV_5701) 17340 goto out; 17341 17342 /* It is best to perform DMA test with maximum write burst size 17343 * to expose the 5700/5701 write DMA bug. 17344 */ 17345 saved_dma_rwctrl = tp->dma_rwctrl; 17346 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17347 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17348 17349 while (1) { 17350 u32 *p = buf, i; 17351 17352 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) 17353 p[i] = i; 17354 17355 /* Send the buffer to the chip. */ 17356 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true); 17357 if (ret) { 17358 dev_err(&tp->pdev->dev, 17359 "%s: Buffer write failed. err = %d\n", 17360 __func__, ret); 17361 break; 17362 } 17363 17364 /* Now read it back. */ 17365 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false); 17366 if (ret) { 17367 dev_err(&tp->pdev->dev, "%s: Buffer read failed. " 17368 "err = %d\n", __func__, ret); 17369 break; 17370 } 17371 17372 /* Verify it. */ 17373 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { 17374 if (p[i] == i) 17375 continue; 17376 17377 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17378 DMA_RWCTRL_WRITE_BNDRY_16) { 17379 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17380 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17381 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17382 break; 17383 } else { 17384 dev_err(&tp->pdev->dev, 17385 "%s: Buffer corrupted on read back! " 17386 "(%d != %d)\n", __func__, p[i], i); 17387 ret = -ENODEV; 17388 goto out; 17389 } 17390 } 17391 17392 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { 17393 /* Success. */ 17394 ret = 0; 17395 break; 17396 } 17397 } 17398 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17399 DMA_RWCTRL_WRITE_BNDRY_16) { 17400 /* DMA test passed without adjusting DMA boundary, 17401 * now look for chipsets that are known to expose the 17402 * DMA bug without failing the test. 17403 */ 17404 if (pci_dev_present(tg3_dma_wait_state_chipsets)) { 17405 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17406 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17407 } else { 17408 /* Safe to use the calculated DMA boundary. */ 17409 tp->dma_rwctrl = saved_dma_rwctrl; 17410 } 17411 17412 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17413 } 17414 17415 out: 17416 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); 17417 out_nofree: 17418 return ret; 17419 } 17420 17421 static void tg3_init_bufmgr_config(struct tg3 *tp) 17422 { 17423 if (tg3_flag(tp, 57765_PLUS)) { 17424 tp->bufmgr_config.mbuf_read_dma_low_water = 17425 DEFAULT_MB_RDMA_LOW_WATER_5705; 17426 tp->bufmgr_config.mbuf_mac_rx_low_water = 17427 DEFAULT_MB_MACRX_LOW_WATER_57765; 17428 tp->bufmgr_config.mbuf_high_water = 17429 DEFAULT_MB_HIGH_WATER_57765; 17430 17431 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17432 DEFAULT_MB_RDMA_LOW_WATER_5705; 17433 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17434 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; 17435 tp->bufmgr_config.mbuf_high_water_jumbo = 17436 DEFAULT_MB_HIGH_WATER_JUMBO_57765; 17437 } else if (tg3_flag(tp, 5705_PLUS)) { 17438 tp->bufmgr_config.mbuf_read_dma_low_water = 17439 DEFAULT_MB_RDMA_LOW_WATER_5705; 17440 tp->bufmgr_config.mbuf_mac_rx_low_water = 17441 DEFAULT_MB_MACRX_LOW_WATER_5705; 17442 tp->bufmgr_config.mbuf_high_water = 17443 DEFAULT_MB_HIGH_WATER_5705; 17444 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 17445 tp->bufmgr_config.mbuf_mac_rx_low_water = 17446 DEFAULT_MB_MACRX_LOW_WATER_5906; 17447 tp->bufmgr_config.mbuf_high_water = 17448 DEFAULT_MB_HIGH_WATER_5906; 17449 } 17450 17451 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17452 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; 17453 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17454 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; 17455 tp->bufmgr_config.mbuf_high_water_jumbo = 17456 DEFAULT_MB_HIGH_WATER_JUMBO_5780; 17457 } else { 17458 tp->bufmgr_config.mbuf_read_dma_low_water = 17459 DEFAULT_MB_RDMA_LOW_WATER; 17460 tp->bufmgr_config.mbuf_mac_rx_low_water = 17461 DEFAULT_MB_MACRX_LOW_WATER; 17462 tp->bufmgr_config.mbuf_high_water = 17463 DEFAULT_MB_HIGH_WATER; 17464 17465 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17466 DEFAULT_MB_RDMA_LOW_WATER_JUMBO; 17467 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17468 DEFAULT_MB_MACRX_LOW_WATER_JUMBO; 17469 tp->bufmgr_config.mbuf_high_water_jumbo = 17470 DEFAULT_MB_HIGH_WATER_JUMBO; 17471 } 17472 17473 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; 17474 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; 17475 } 17476 17477 static char *tg3_phy_string(struct tg3 *tp) 17478 { 17479 switch (tp->phy_id & TG3_PHY_ID_MASK) { 17480 case TG3_PHY_ID_BCM5400: return "5400"; 17481 case TG3_PHY_ID_BCM5401: return "5401"; 17482 case TG3_PHY_ID_BCM5411: return "5411"; 17483 case TG3_PHY_ID_BCM5701: return "5701"; 17484 case TG3_PHY_ID_BCM5703: return "5703"; 17485 case TG3_PHY_ID_BCM5704: return "5704"; 17486 case TG3_PHY_ID_BCM5705: return "5705"; 17487 case TG3_PHY_ID_BCM5750: return "5750"; 17488 case TG3_PHY_ID_BCM5752: return "5752"; 17489 case TG3_PHY_ID_BCM5714: return "5714"; 17490 case TG3_PHY_ID_BCM5780: return "5780"; 17491 case TG3_PHY_ID_BCM5755: return "5755"; 17492 case TG3_PHY_ID_BCM5787: return "5787"; 17493 case TG3_PHY_ID_BCM5784: return "5784"; 17494 case TG3_PHY_ID_BCM5756: return "5722/5756"; 17495 case TG3_PHY_ID_BCM5906: return "5906"; 17496 case TG3_PHY_ID_BCM5761: return "5761"; 17497 case TG3_PHY_ID_BCM5718C: return "5718C"; 17498 case TG3_PHY_ID_BCM5718S: return "5718S"; 17499 case TG3_PHY_ID_BCM57765: return "57765"; 17500 case TG3_PHY_ID_BCM5719C: return "5719C"; 17501 case TG3_PHY_ID_BCM5720C: return "5720C"; 17502 case TG3_PHY_ID_BCM5762: return "5762C"; 17503 case TG3_PHY_ID_BCM8002: return "8002/serdes"; 17504 case 0: return "serdes"; 17505 default: return "unknown"; 17506 } 17507 } 17508 17509 static char *tg3_bus_string(struct tg3 *tp, char *str) 17510 { 17511 if (tg3_flag(tp, PCI_EXPRESS)) { 17512 strcpy(str, "PCI Express"); 17513 return str; 17514 } else if (tg3_flag(tp, PCIX_MODE)) { 17515 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; 17516 17517 strcpy(str, "PCIX:"); 17518 17519 if ((clock_ctrl == 7) || 17520 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == 17521 GRC_MISC_CFG_BOARD_ID_5704CIOBE)) 17522 strcat(str, "133MHz"); 17523 else if (clock_ctrl == 0) 17524 strcat(str, "33MHz"); 17525 else if (clock_ctrl == 2) 17526 strcat(str, "50MHz"); 17527 else if (clock_ctrl == 4) 17528 strcat(str, "66MHz"); 17529 else if (clock_ctrl == 6) 17530 strcat(str, "100MHz"); 17531 } else { 17532 strcpy(str, "PCI:"); 17533 if (tg3_flag(tp, PCI_HIGH_SPEED)) 17534 strcat(str, "66MHz"); 17535 else 17536 strcat(str, "33MHz"); 17537 } 17538 if (tg3_flag(tp, PCI_32BIT)) 17539 strcat(str, ":32-bit"); 17540 else 17541 strcat(str, ":64-bit"); 17542 return str; 17543 } 17544 17545 static void tg3_init_coal(struct tg3 *tp) 17546 { 17547 struct ethtool_coalesce *ec = &tp->coal; 17548 17549 memset(ec, 0, sizeof(*ec)); 17550 ec->cmd = ETHTOOL_GCOALESCE; 17551 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; 17552 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; 17553 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; 17554 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; 17555 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; 17556 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; 17557 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; 17558 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; 17559 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; 17560 17561 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | 17562 HOSTCC_MODE_CLRTICK_TXBD)) { 17563 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; 17564 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; 17565 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; 17566 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; 17567 } 17568 17569 if (tg3_flag(tp, 5705_PLUS)) { 17570 ec->rx_coalesce_usecs_irq = 0; 17571 ec->tx_coalesce_usecs_irq = 0; 17572 ec->stats_block_coalesce_usecs = 0; 17573 } 17574 } 17575 17576 static int tg3_init_one(struct pci_dev *pdev, 17577 const struct pci_device_id *ent) 17578 { 17579 struct net_device *dev; 17580 struct tg3 *tp; 17581 int i, err; 17582 u32 sndmbx, rcvmbx, intmbx; 17583 char str[40]; 17584 u64 dma_mask, persist_dma_mask; 17585 netdev_features_t features = 0; 17586 u8 addr[ETH_ALEN] __aligned(2); 17587 17588 err = pci_enable_device(pdev); 17589 if (err) { 17590 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 17591 return err; 17592 } 17593 17594 err = pci_request_regions(pdev, DRV_MODULE_NAME); 17595 if (err) { 17596 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 17597 goto err_out_disable_pdev; 17598 } 17599 17600 pci_set_master(pdev); 17601 17602 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); 17603 if (!dev) { 17604 err = -ENOMEM; 17605 goto err_out_free_res; 17606 } 17607 17608 SET_NETDEV_DEV(dev, &pdev->dev); 17609 17610 tp = netdev_priv(dev); 17611 tp->pdev = pdev; 17612 tp->dev = dev; 17613 tp->rx_mode = TG3_DEF_RX_MODE; 17614 tp->tx_mode = TG3_DEF_TX_MODE; 17615 tp->irq_sync = 1; 17616 tp->pcierr_recovery = false; 17617 17618 if (tg3_debug > 0) 17619 tp->msg_enable = tg3_debug; 17620 else 17621 tp->msg_enable = TG3_DEF_MSG_ENABLE; 17622 17623 if (pdev_is_ssb_gige_core(pdev)) { 17624 tg3_flag_set(tp, IS_SSB_CORE); 17625 if (ssb_gige_must_flush_posted_writes(pdev)) 17626 tg3_flag_set(tp, FLUSH_POSTED_WRITES); 17627 if (ssb_gige_one_dma_at_once(pdev)) 17628 tg3_flag_set(tp, ONE_DMA_AT_ONCE); 17629 if (ssb_gige_have_roboswitch(pdev)) { 17630 tg3_flag_set(tp, USE_PHYLIB); 17631 tg3_flag_set(tp, ROBOSWITCH); 17632 } 17633 if (ssb_gige_is_rgmii(pdev)) 17634 tg3_flag_set(tp, RGMII_MODE); 17635 } 17636 17637 /* The word/byte swap controls here control register access byte 17638 * swapping. DMA data byte swapping is controlled in the GRC_MODE 17639 * setting below. 17640 */ 17641 tp->misc_host_ctrl = 17642 MISC_HOST_CTRL_MASK_PCI_INT | 17643 MISC_HOST_CTRL_WORD_SWAP | 17644 MISC_HOST_CTRL_INDIR_ACCESS | 17645 MISC_HOST_CTRL_PCISTATE_RW; 17646 17647 /* The NONFRM (non-frame) byte/word swap controls take effect 17648 * on descriptor entries, anything which isn't packet data. 17649 * 17650 * The StrongARM chips on the board (one for tx, one for rx) 17651 * are running in big-endian mode. 17652 */ 17653 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | 17654 GRC_MODE_WSWAP_NONFRM_DATA); 17655 #ifdef __BIG_ENDIAN 17656 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; 17657 #endif 17658 spin_lock_init(&tp->lock); 17659 spin_lock_init(&tp->indirect_lock); 17660 INIT_WORK(&tp->reset_task, tg3_reset_task); 17661 17662 tp->regs = pci_ioremap_bar(pdev, BAR_0); 17663 if (!tp->regs) { 17664 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 17665 err = -ENOMEM; 17666 goto err_out_free_dev; 17667 } 17668 17669 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 17670 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || 17671 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || 17672 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || 17673 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 17674 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 17675 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 17676 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 17677 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 17678 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 17679 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 17680 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 17681 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 17682 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 17683 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) { 17684 tg3_flag_set(tp, ENABLE_APE); 17685 tp->aperegs = pci_ioremap_bar(pdev, BAR_2); 17686 if (!tp->aperegs) { 17687 dev_err(&pdev->dev, 17688 "Cannot map APE registers, aborting\n"); 17689 err = -ENOMEM; 17690 goto err_out_iounmap; 17691 } 17692 } 17693 17694 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 17695 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 17696 17697 dev->ethtool_ops = &tg3_ethtool_ops; 17698 dev->watchdog_timeo = TG3_TX_TIMEOUT; 17699 dev->netdev_ops = &tg3_netdev_ops; 17700 dev->irq = pdev->irq; 17701 17702 err = tg3_get_invariants(tp, ent); 17703 if (err) { 17704 dev_err(&pdev->dev, 17705 "Problem fetching invariants of chip, aborting\n"); 17706 goto err_out_apeunmap; 17707 } 17708 17709 /* The EPB bridge inside 5714, 5715, and 5780 and any 17710 * device behind the EPB cannot support DMA addresses > 40-bit. 17711 * On 64-bit systems with IOMMU, use 40-bit dma_mask. 17712 * On 64-bit systems without IOMMU, use 64-bit dma_mask and 17713 * do DMA address check in tg3_start_xmit(). 17714 */ 17715 if (tg3_flag(tp, IS_5788)) 17716 persist_dma_mask = dma_mask = DMA_BIT_MASK(32); 17717 else if (tg3_flag(tp, 40BIT_DMA_BUG)) { 17718 persist_dma_mask = dma_mask = DMA_BIT_MASK(40); 17719 #ifdef CONFIG_HIGHMEM 17720 dma_mask = DMA_BIT_MASK(64); 17721 #endif 17722 } else 17723 persist_dma_mask = dma_mask = DMA_BIT_MASK(64); 17724 17725 /* Configure DMA attributes. */ 17726 if (dma_mask > DMA_BIT_MASK(32)) { 17727 err = dma_set_mask(&pdev->dev, dma_mask); 17728 if (!err) { 17729 features |= NETIF_F_HIGHDMA; 17730 err = dma_set_coherent_mask(&pdev->dev, 17731 persist_dma_mask); 17732 if (err < 0) { 17733 dev_err(&pdev->dev, "Unable to obtain 64 bit " 17734 "DMA for consistent allocations\n"); 17735 goto err_out_apeunmap; 17736 } 17737 } 17738 } 17739 if (err || dma_mask == DMA_BIT_MASK(32)) { 17740 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 17741 if (err) { 17742 dev_err(&pdev->dev, 17743 "No usable DMA configuration, aborting\n"); 17744 goto err_out_apeunmap; 17745 } 17746 } 17747 17748 tg3_init_bufmgr_config(tp); 17749 17750 /* 5700 B0 chips do not support checksumming correctly due 17751 * to hardware bugs. 17752 */ 17753 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) { 17754 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 17755 17756 if (tg3_flag(tp, 5755_PLUS)) 17757 features |= NETIF_F_IPV6_CSUM; 17758 } 17759 17760 /* TSO is on by default on chips that support hardware TSO. 17761 * Firmware TSO on older chips gives lower performance, so it 17762 * is off by default, but can be enabled using ethtool. 17763 */ 17764 if ((tg3_flag(tp, HW_TSO_1) || 17765 tg3_flag(tp, HW_TSO_2) || 17766 tg3_flag(tp, HW_TSO_3)) && 17767 (features & NETIF_F_IP_CSUM)) 17768 features |= NETIF_F_TSO; 17769 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { 17770 if (features & NETIF_F_IPV6_CSUM) 17771 features |= NETIF_F_TSO6; 17772 if (tg3_flag(tp, HW_TSO_3) || 17773 tg3_asic_rev(tp) == ASIC_REV_5761 || 17774 (tg3_asic_rev(tp) == ASIC_REV_5784 && 17775 tg3_chip_rev(tp) != CHIPREV_5784_AX) || 17776 tg3_asic_rev(tp) == ASIC_REV_5785 || 17777 tg3_asic_rev(tp) == ASIC_REV_57780) 17778 features |= NETIF_F_TSO_ECN; 17779 } 17780 17781 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | 17782 NETIF_F_HW_VLAN_CTAG_RX; 17783 dev->vlan_features |= features; 17784 17785 /* 17786 * Add loopback capability only for a subset of devices that support 17787 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY 17788 * loopback for the remaining devices. 17789 */ 17790 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 17791 !tg3_flag(tp, CPMU_PRESENT)) 17792 /* Add the loopback capability */ 17793 features |= NETIF_F_LOOPBACK; 17794 17795 dev->hw_features |= features; 17796 dev->priv_flags |= IFF_UNICAST_FLT; 17797 17798 /* MTU range: 60 - 9000 or 1500, depending on hardware */ 17799 dev->min_mtu = TG3_MIN_MTU; 17800 dev->max_mtu = TG3_MAX_MTU(tp); 17801 17802 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 && 17803 !tg3_flag(tp, TSO_CAPABLE) && 17804 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 17805 tg3_flag_set(tp, MAX_RXPEND_64); 17806 tp->rx_pending = 63; 17807 } 17808 17809 err = tg3_get_device_address(tp, addr); 17810 if (err) { 17811 dev_err(&pdev->dev, 17812 "Could not obtain valid ethernet address, aborting\n"); 17813 goto err_out_apeunmap; 17814 } 17815 eth_hw_addr_set(dev, addr); 17816 17817 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; 17818 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; 17819 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; 17820 for (i = 0; i < tp->irq_max; i++) { 17821 struct tg3_napi *tnapi = &tp->napi[i]; 17822 17823 tnapi->tp = tp; 17824 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; 17825 17826 tnapi->int_mbox = intmbx; 17827 intmbx += 0x8; 17828 17829 tnapi->consmbox = rcvmbx; 17830 tnapi->prodmbox = sndmbx; 17831 17832 if (i) 17833 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); 17834 else 17835 tnapi->coal_now = HOSTCC_MODE_NOW; 17836 17837 if (!tg3_flag(tp, SUPPORT_MSIX)) 17838 break; 17839 17840 /* 17841 * If we support MSIX, we'll be using RSS. If we're using 17842 * RSS, the first vector only handles link interrupts and the 17843 * remaining vectors handle rx and tx interrupts. Reuse the 17844 * mailbox values for the next iteration. The values we setup 17845 * above are still useful for the single vectored mode. 17846 */ 17847 if (!i) 17848 continue; 17849 17850 rcvmbx += 0x8; 17851 17852 if (sndmbx & 0x4) 17853 sndmbx -= 0x4; 17854 else 17855 sndmbx += 0xc; 17856 } 17857 17858 /* 17859 * Reset chip in case UNDI or EFI driver did not shutdown 17860 * DMA self test will enable WDMAC and we'll see (spurious) 17861 * pending DMA on the PCI bus at that point. 17862 */ 17863 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || 17864 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 17865 tg3_full_lock(tp, 0); 17866 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 17867 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 17868 tg3_full_unlock(tp); 17869 } 17870 17871 err = tg3_test_dma(tp); 17872 if (err) { 17873 dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); 17874 goto err_out_apeunmap; 17875 } 17876 17877 tg3_init_coal(tp); 17878 17879 pci_set_drvdata(pdev, dev); 17880 17881 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 17882 tg3_asic_rev(tp) == ASIC_REV_5720 || 17883 tg3_asic_rev(tp) == ASIC_REV_5762) 17884 tg3_flag_set(tp, PTP_CAPABLE); 17885 17886 tg3_timer_init(tp); 17887 17888 tg3_carrier_off(tp); 17889 17890 err = register_netdev(dev); 17891 if (err) { 17892 dev_err(&pdev->dev, "Cannot register net device, aborting\n"); 17893 goto err_out_apeunmap; 17894 } 17895 17896 if (tg3_flag(tp, PTP_CAPABLE)) { 17897 tg3_ptp_init(tp); 17898 tp->ptp_clock = ptp_clock_register(&tp->ptp_info, 17899 &tp->pdev->dev); 17900 if (IS_ERR(tp->ptp_clock)) 17901 tp->ptp_clock = NULL; 17902 } 17903 17904 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", 17905 tp->board_part_number, 17906 tg3_chip_rev_id(tp), 17907 tg3_bus_string(tp, str), 17908 dev->dev_addr); 17909 17910 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) { 17911 char *ethtype; 17912 17913 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 17914 ethtype = "10/100Base-TX"; 17915 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 17916 ethtype = "1000Base-SX"; 17917 else 17918 ethtype = "10/100/1000Base-T"; 17919 17920 netdev_info(dev, "attached PHY is %s (%s Ethernet) " 17921 "(WireSpeed[%d], EEE[%d])\n", 17922 tg3_phy_string(tp), ethtype, 17923 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, 17924 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); 17925 } 17926 17927 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", 17928 (dev->features & NETIF_F_RXCSUM) != 0, 17929 tg3_flag(tp, USE_LINKCHG_REG) != 0, 17930 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, 17931 tg3_flag(tp, ENABLE_ASF) != 0, 17932 tg3_flag(tp, TSO_CAPABLE) != 0); 17933 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", 17934 tp->dma_rwctrl, 17935 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : 17936 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); 17937 17938 pci_save_state(pdev); 17939 17940 return 0; 17941 17942 err_out_apeunmap: 17943 if (tp->aperegs) { 17944 iounmap(tp->aperegs); 17945 tp->aperegs = NULL; 17946 } 17947 17948 err_out_iounmap: 17949 if (tp->regs) { 17950 iounmap(tp->regs); 17951 tp->regs = NULL; 17952 } 17953 17954 err_out_free_dev: 17955 free_netdev(dev); 17956 17957 err_out_free_res: 17958 pci_release_regions(pdev); 17959 17960 err_out_disable_pdev: 17961 if (pci_is_enabled(pdev)) 17962 pci_disable_device(pdev); 17963 return err; 17964 } 17965 17966 static void tg3_remove_one(struct pci_dev *pdev) 17967 { 17968 struct net_device *dev = pci_get_drvdata(pdev); 17969 17970 if (dev) { 17971 struct tg3 *tp = netdev_priv(dev); 17972 17973 tg3_ptp_fini(tp); 17974 17975 release_firmware(tp->fw); 17976 17977 tg3_reset_task_cancel(tp); 17978 17979 if (tg3_flag(tp, USE_PHYLIB)) { 17980 tg3_phy_fini(tp); 17981 tg3_mdio_fini(tp); 17982 } 17983 17984 unregister_netdev(dev); 17985 if (tp->aperegs) { 17986 iounmap(tp->aperegs); 17987 tp->aperegs = NULL; 17988 } 17989 if (tp->regs) { 17990 iounmap(tp->regs); 17991 tp->regs = NULL; 17992 } 17993 free_netdev(dev); 17994 pci_release_regions(pdev); 17995 pci_disable_device(pdev); 17996 } 17997 } 17998 17999 #ifdef CONFIG_PM_SLEEP 18000 static int tg3_suspend(struct device *device) 18001 { 18002 struct net_device *dev = dev_get_drvdata(device); 18003 struct tg3 *tp = netdev_priv(dev); 18004 int err = 0; 18005 18006 rtnl_lock(); 18007 18008 if (!netif_running(dev)) 18009 goto unlock; 18010 18011 tg3_reset_task_cancel(tp); 18012 tg3_phy_stop(tp); 18013 tg3_netif_stop(tp); 18014 18015 tg3_timer_stop(tp); 18016 18017 tg3_full_lock(tp, 1); 18018 tg3_disable_ints(tp); 18019 tg3_full_unlock(tp); 18020 18021 netif_device_detach(dev); 18022 18023 tg3_full_lock(tp, 0); 18024 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 18025 tg3_flag_clear(tp, INIT_COMPLETE); 18026 tg3_full_unlock(tp); 18027 18028 err = tg3_power_down_prepare(tp); 18029 if (err) { 18030 int err2; 18031 18032 tg3_full_lock(tp, 0); 18033 18034 tg3_flag_set(tp, INIT_COMPLETE); 18035 err2 = tg3_restart_hw(tp, true); 18036 if (err2) 18037 goto out; 18038 18039 tg3_timer_start(tp); 18040 18041 netif_device_attach(dev); 18042 tg3_netif_start(tp); 18043 18044 out: 18045 tg3_full_unlock(tp); 18046 18047 if (!err2) 18048 tg3_phy_start(tp); 18049 } 18050 18051 unlock: 18052 rtnl_unlock(); 18053 return err; 18054 } 18055 18056 static int tg3_resume(struct device *device) 18057 { 18058 struct net_device *dev = dev_get_drvdata(device); 18059 struct tg3 *tp = netdev_priv(dev); 18060 int err = 0; 18061 18062 rtnl_lock(); 18063 18064 if (!netif_running(dev)) 18065 goto unlock; 18066 18067 netif_device_attach(dev); 18068 18069 tg3_full_lock(tp, 0); 18070 18071 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18072 18073 tg3_flag_set(tp, INIT_COMPLETE); 18074 err = tg3_restart_hw(tp, 18075 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); 18076 if (err) 18077 goto out; 18078 18079 tg3_timer_start(tp); 18080 18081 tg3_netif_start(tp); 18082 18083 out: 18084 tg3_full_unlock(tp); 18085 18086 if (!err) 18087 tg3_phy_start(tp); 18088 18089 unlock: 18090 rtnl_unlock(); 18091 return err; 18092 } 18093 #endif /* CONFIG_PM_SLEEP */ 18094 18095 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); 18096 18097 static void tg3_shutdown(struct pci_dev *pdev) 18098 { 18099 struct net_device *dev = pci_get_drvdata(pdev); 18100 struct tg3 *tp = netdev_priv(dev); 18101 18102 tg3_reset_task_cancel(tp); 18103 18104 rtnl_lock(); 18105 18106 netif_device_detach(dev); 18107 18108 if (netif_running(dev)) 18109 dev_close(dev); 18110 18111 if (system_state == SYSTEM_POWER_OFF) 18112 tg3_power_down(tp); 18113 18114 rtnl_unlock(); 18115 18116 pci_disable_device(pdev); 18117 } 18118 18119 /** 18120 * tg3_io_error_detected - called when PCI error is detected 18121 * @pdev: Pointer to PCI device 18122 * @state: The current pci connection state 18123 * 18124 * This function is called after a PCI bus error affecting 18125 * this device has been detected. 18126 */ 18127 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, 18128 pci_channel_state_t state) 18129 { 18130 struct net_device *netdev = pci_get_drvdata(pdev); 18131 struct tg3 *tp = netdev_priv(netdev); 18132 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; 18133 18134 netdev_info(netdev, "PCI I/O error detected\n"); 18135 18136 /* Want to make sure that the reset task doesn't run */ 18137 tg3_reset_task_cancel(tp); 18138 18139 rtnl_lock(); 18140 18141 /* Could be second call or maybe we don't have netdev yet */ 18142 if (!netdev || tp->pcierr_recovery || !netif_running(netdev)) 18143 goto done; 18144 18145 /* We needn't recover from permanent error */ 18146 if (state == pci_channel_io_frozen) 18147 tp->pcierr_recovery = true; 18148 18149 tg3_phy_stop(tp); 18150 18151 tg3_netif_stop(tp); 18152 18153 tg3_timer_stop(tp); 18154 18155 netif_device_detach(netdev); 18156 18157 /* Clean up software state, even if MMIO is blocked */ 18158 tg3_full_lock(tp, 0); 18159 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 18160 tg3_full_unlock(tp); 18161 18162 done: 18163 if (state == pci_channel_io_perm_failure) { 18164 if (netdev) { 18165 tg3_napi_enable(tp); 18166 dev_close(netdev); 18167 } 18168 err = PCI_ERS_RESULT_DISCONNECT; 18169 } else { 18170 pci_disable_device(pdev); 18171 } 18172 18173 rtnl_unlock(); 18174 18175 return err; 18176 } 18177 18178 /** 18179 * tg3_io_slot_reset - called after the pci bus has been reset. 18180 * @pdev: Pointer to PCI device 18181 * 18182 * Restart the card from scratch, as if from a cold-boot. 18183 * At this point, the card has exprienced a hard reset, 18184 * followed by fixups by BIOS, and has its config space 18185 * set up identically to what it was at cold boot. 18186 */ 18187 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) 18188 { 18189 struct net_device *netdev = pci_get_drvdata(pdev); 18190 struct tg3 *tp = netdev_priv(netdev); 18191 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 18192 int err; 18193 18194 rtnl_lock(); 18195 18196 if (pci_enable_device(pdev)) { 18197 dev_err(&pdev->dev, 18198 "Cannot re-enable PCI device after reset.\n"); 18199 goto done; 18200 } 18201 18202 pci_set_master(pdev); 18203 pci_restore_state(pdev); 18204 pci_save_state(pdev); 18205 18206 if (!netdev || !netif_running(netdev)) { 18207 rc = PCI_ERS_RESULT_RECOVERED; 18208 goto done; 18209 } 18210 18211 err = tg3_power_up(tp); 18212 if (err) 18213 goto done; 18214 18215 rc = PCI_ERS_RESULT_RECOVERED; 18216 18217 done: 18218 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { 18219 tg3_napi_enable(tp); 18220 dev_close(netdev); 18221 } 18222 rtnl_unlock(); 18223 18224 return rc; 18225 } 18226 18227 /** 18228 * tg3_io_resume - called when traffic can start flowing again. 18229 * @pdev: Pointer to PCI device 18230 * 18231 * This callback is called when the error recovery driver tells 18232 * us that its OK to resume normal operation. 18233 */ 18234 static void tg3_io_resume(struct pci_dev *pdev) 18235 { 18236 struct net_device *netdev = pci_get_drvdata(pdev); 18237 struct tg3 *tp = netdev_priv(netdev); 18238 int err; 18239 18240 rtnl_lock(); 18241 18242 if (!netdev || !netif_running(netdev)) 18243 goto done; 18244 18245 tg3_full_lock(tp, 0); 18246 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18247 tg3_flag_set(tp, INIT_COMPLETE); 18248 err = tg3_restart_hw(tp, true); 18249 if (err) { 18250 tg3_full_unlock(tp); 18251 netdev_err(netdev, "Cannot restart hardware after reset.\n"); 18252 goto done; 18253 } 18254 18255 netif_device_attach(netdev); 18256 18257 tg3_timer_start(tp); 18258 18259 tg3_netif_start(tp); 18260 18261 tg3_full_unlock(tp); 18262 18263 tg3_phy_start(tp); 18264 18265 done: 18266 tp->pcierr_recovery = false; 18267 rtnl_unlock(); 18268 } 18269 18270 static const struct pci_error_handlers tg3_err_handler = { 18271 .error_detected = tg3_io_error_detected, 18272 .slot_reset = tg3_io_slot_reset, 18273 .resume = tg3_io_resume 18274 }; 18275 18276 static struct pci_driver tg3_driver = { 18277 .name = DRV_MODULE_NAME, 18278 .id_table = tg3_pci_tbl, 18279 .probe = tg3_init_one, 18280 .remove = tg3_remove_one, 18281 .err_handler = &tg3_err_handler, 18282 .driver.pm = &tg3_pm_ops, 18283 .shutdown = tg3_shutdown, 18284 }; 18285 18286 module_pci_driver(tg3_driver); 18287