1 /* 2 * Copyright 2010-2011 Calxeda, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/kernel.h> 19 #include <linux/circ_buf.h> 20 #include <linux/interrupt.h> 21 #include <linux/etherdevice.h> 22 #include <linux/platform_device.h> 23 #include <linux/skbuff.h> 24 #include <linux/ethtool.h> 25 #include <linux/if.h> 26 #include <linux/crc32.h> 27 #include <linux/dma-mapping.h> 28 #include <linux/slab.h> 29 30 /* XGMAC Register definitions */ 31 #define XGMAC_CONTROL 0x00000000 /* MAC Configuration */ 32 #define XGMAC_FRAME_FILTER 0x00000004 /* MAC Frame Filter */ 33 #define XGMAC_FLOW_CTRL 0x00000018 /* MAC Flow Control */ 34 #define XGMAC_VLAN_TAG 0x0000001C /* VLAN Tags */ 35 #define XGMAC_VERSION 0x00000020 /* Version */ 36 #define XGMAC_VLAN_INCL 0x00000024 /* VLAN tag for tx frames */ 37 #define XGMAC_LPI_CTRL 0x00000028 /* LPI Control and Status */ 38 #define XGMAC_LPI_TIMER 0x0000002C /* LPI Timers Control */ 39 #define XGMAC_TX_PACE 0x00000030 /* Transmit Pace and Stretch */ 40 #define XGMAC_VLAN_HASH 0x00000034 /* VLAN Hash Table */ 41 #define XGMAC_DEBUG 0x00000038 /* Debug */ 42 #define XGMAC_INT_STAT 0x0000003C /* Interrupt and Control */ 43 #define XGMAC_ADDR_HIGH(reg) (0x00000040 + ((reg) * 8)) 44 #define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8)) 45 #define XGMAC_HASH(n) (0x00000300 + (n) * 4) /* HASH table regs */ 46 #define XGMAC_NUM_HASH 16 47 #define XGMAC_OMR 0x00000400 48 #define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */ 49 #define XGMAC_PMT 0x00000704 /* PMT Control and Status */ 50 #define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */ 51 #define XGMAC_MMC_INTR_RX 0x00000804 /* Recieve Interrupt */ 52 #define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */ 53 #define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Recieve Interrupt Mask */ 54 #define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */ 55 56 /* Hardware TX Statistics Counters */ 57 #define XGMAC_MMC_TXOCTET_GB_LO 0x00000814 58 #define XGMAC_MMC_TXOCTET_GB_HI 0x00000818 59 #define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C 60 #define XGMAC_MMC_TXFRAME_GB_HI 0x00000820 61 #define XGMAC_MMC_TXBCFRAME_G 0x00000824 62 #define XGMAC_MMC_TXMCFRAME_G 0x0000082C 63 #define XGMAC_MMC_TXUCFRAME_GB 0x00000864 64 #define XGMAC_MMC_TXMCFRAME_GB 0x0000086C 65 #define XGMAC_MMC_TXBCFRAME_GB 0x00000874 66 #define XGMAC_MMC_TXUNDERFLOW 0x0000087C 67 #define XGMAC_MMC_TXOCTET_G_LO 0x00000884 68 #define XGMAC_MMC_TXOCTET_G_HI 0x00000888 69 #define XGMAC_MMC_TXFRAME_G_LO 0x0000088C 70 #define XGMAC_MMC_TXFRAME_G_HI 0x00000890 71 #define XGMAC_MMC_TXPAUSEFRAME 0x00000894 72 #define XGMAC_MMC_TXVLANFRAME 0x0000089C 73 74 /* Hardware RX Statistics Counters */ 75 #define XGMAC_MMC_RXFRAME_GB_LO 0x00000900 76 #define XGMAC_MMC_RXFRAME_GB_HI 0x00000904 77 #define XGMAC_MMC_RXOCTET_GB_LO 0x00000908 78 #define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C 79 #define XGMAC_MMC_RXOCTET_G_LO 0x00000910 80 #define XGMAC_MMC_RXOCTET_G_HI 0x00000914 81 #define XGMAC_MMC_RXBCFRAME_G 0x00000918 82 #define XGMAC_MMC_RXMCFRAME_G 0x00000920 83 #define XGMAC_MMC_RXCRCERR 0x00000928 84 #define XGMAC_MMC_RXRUNT 0x00000930 85 #define XGMAC_MMC_RXJABBER 0x00000934 86 #define XGMAC_MMC_RXUCFRAME_G 0x00000970 87 #define XGMAC_MMC_RXLENGTHERR 0x00000978 88 #define XGMAC_MMC_RXPAUSEFRAME 0x00000988 89 #define XGMAC_MMC_RXOVERFLOW 0x00000990 90 #define XGMAC_MMC_RXVLANFRAME 0x00000998 91 #define XGMAC_MMC_RXWATCHDOG 0x000009a0 92 93 /* DMA Control and Status Registers */ 94 #define XGMAC_DMA_BUS_MODE 0x00000f00 /* Bus Mode */ 95 #define XGMAC_DMA_TX_POLL 0x00000f04 /* Transmit Poll Demand */ 96 #define XGMAC_DMA_RX_POLL 0x00000f08 /* Received Poll Demand */ 97 #define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c /* Receive List Base */ 98 #define XGMAC_DMA_TX_BASE_ADDR 0x00000f10 /* Transmit List Base */ 99 #define XGMAC_DMA_STATUS 0x00000f14 /* Status Register */ 100 #define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */ 101 #define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */ 102 #define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20 /* Missed Frame Counter */ 103 #define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24 /* RX Intr Watchdog Timer */ 104 #define XGMAC_DMA_AXI_BUS 0x00000f28 /* AXI Bus Mode */ 105 #define XGMAC_DMA_AXI_STATUS 0x00000f2C /* AXI Status */ 106 #define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */ 107 108 #define XGMAC_ADDR_AE 0x80000000 109 #define XGMAC_MAX_FILTER_ADDR 31 110 111 /* PMT Control and Status */ 112 #define XGMAC_PMT_POINTER_RESET 0x80000000 113 #define XGMAC_PMT_GLBL_UNICAST 0x00000200 114 #define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040 115 #define XGMAC_PMT_MAGIC_PKT 0x00000020 116 #define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004 117 #define XGMAC_PMT_MAGIC_PKT_EN 0x00000002 118 #define XGMAC_PMT_POWERDOWN 0x00000001 119 120 #define XGMAC_CONTROL_SPD 0x40000000 /* Speed control */ 121 #define XGMAC_CONTROL_SPD_MASK 0x60000000 122 #define XGMAC_CONTROL_SPD_1G 0x60000000 123 #define XGMAC_CONTROL_SPD_2_5G 0x40000000 124 #define XGMAC_CONTROL_SPD_10G 0x00000000 125 #define XGMAC_CONTROL_SARC 0x10000000 /* Source Addr Insert/Replace */ 126 #define XGMAC_CONTROL_SARK_MASK 0x18000000 127 #define XGMAC_CONTROL_CAR 0x04000000 /* CRC Addition/Replacement */ 128 #define XGMAC_CONTROL_CAR_MASK 0x06000000 129 #define XGMAC_CONTROL_DP 0x01000000 /* Disable Padding */ 130 #define XGMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on rx */ 131 #define XGMAC_CONTROL_JD 0x00400000 /* Jabber disable */ 132 #define XGMAC_CONTROL_JE 0x00100000 /* Jumbo frame */ 133 #define XGMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ 134 #define XGMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ 135 #define XGMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Strip */ 136 #define XGMAC_CONTROL_DDIC 0x00000010 /* Disable Deficit Idle Count */ 137 #define XGMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ 138 #define XGMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ 139 140 /* XGMAC Frame Filter defines */ 141 #define XGMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ 142 #define XGMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ 143 #define XGMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ 144 #define XGMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ 145 #define XGMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ 146 #define XGMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ 147 #define XGMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ 148 #define XGMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ 149 #define XGMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ 150 #define XGMAC_FRAME_FILTER_VHF 0x00000800 /* VLAN Hash Filter */ 151 #define XGMAC_FRAME_FILTER_VPF 0x00001000 /* VLAN Perfect Filter */ 152 #define XGMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ 153 154 /* XGMAC FLOW CTRL defines */ 155 #define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ 156 #define XGMAC_FLOW_CTRL_PT_SHIFT 16 157 #define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */ 158 #define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshhold */ 159 #define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */ 160 #define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */ 161 #define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ 162 #define XGMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ 163 #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ 164 165 /* XGMAC_INT_STAT reg */ 166 #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ 167 #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ 168 169 /* DMA Bus Mode register defines */ 170 #define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ 171 #define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ 172 #define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ 173 #define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */ 174 175 /* Programmable burst length */ 176 #define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ 177 #define DMA_BUS_MODE_PBL_SHIFT 8 178 #define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ 179 #define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ 180 #define DMA_BUS_MODE_RPBL_SHIFT 17 181 #define DMA_BUS_MODE_USP 0x00800000 182 #define DMA_BUS_MODE_8PBL 0x01000000 183 #define DMA_BUS_MODE_AAL 0x02000000 184 185 /* DMA Bus Mode register defines */ 186 #define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */ 187 #define DMA_BUS_PR_RATIO_SHIFT 14 188 #define DMA_BUS_FB 0x00010000 /* Fixed Burst */ 189 190 /* DMA Control register defines */ 191 #define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ 192 #define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ 193 #define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */ 194 195 /* DMA Normal interrupt */ 196 #define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */ 197 #define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */ 198 #define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */ 199 #define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */ 200 #define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */ 201 #define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */ 202 #define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */ 203 #define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */ 204 #define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */ 205 #define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */ 206 #define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */ 207 #define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */ 208 #define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavail */ 209 #define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */ 210 #define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */ 211 212 #define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \ 213 DMA_INTR_ENA_TUE) 214 215 #define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \ 216 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \ 217 DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \ 218 DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \ 219 DMA_INTR_ENA_TSE) 220 221 /* DMA default interrupt mask */ 222 #define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL) 223 224 /* DMA Status register defines */ 225 #define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */ 226 #define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */ 227 #define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */ 228 #define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */ 229 #define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */ 230 #define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */ 231 #define DMA_STATUS_TS_SHIFT 20 232 #define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */ 233 #define DMA_STATUS_RS_SHIFT 17 234 #define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */ 235 #define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */ 236 #define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */ 237 #define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */ 238 #define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */ 239 #define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */ 240 #define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */ 241 #define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */ 242 #define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */ 243 #define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */ 244 #define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */ 245 #define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */ 246 #define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavail */ 247 #define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ 248 #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ 249 250 /* Common MAC defines */ 251 #define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ 252 #define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */ 253 254 /* XGMAC Operation Mode Register */ 255 #define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */ 256 #define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */ 257 #define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshhold Ctrl */ 258 #define XGMAC_OMR_TTC_MASK 0x00030000 259 #define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshhold */ 260 #define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshhold MASK */ 261 #define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshhold */ 262 #define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshhold MASK */ 263 #define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */ 264 #define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */ 265 #define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */ 266 #define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */ 267 #define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshhold Ctrl */ 268 #define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */ 269 270 /* XGMAC HW Features Register */ 271 #define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */ 272 273 #define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008 274 275 /* XGMAC Descriptor Defines */ 276 #define MAX_DESC_BUF_SZ (0x2000 - 8) 277 278 #define RXDESC_EXT_STATUS 0x00000001 279 #define RXDESC_CRC_ERR 0x00000002 280 #define RXDESC_RX_ERR 0x00000008 281 #define RXDESC_RX_WDOG 0x00000010 282 #define RXDESC_FRAME_TYPE 0x00000020 283 #define RXDESC_GIANT_FRAME 0x00000080 284 #define RXDESC_LAST_SEG 0x00000100 285 #define RXDESC_FIRST_SEG 0x00000200 286 #define RXDESC_VLAN_FRAME 0x00000400 287 #define RXDESC_OVERFLOW_ERR 0x00000800 288 #define RXDESC_LENGTH_ERR 0x00001000 289 #define RXDESC_SA_FILTER_FAIL 0x00002000 290 #define RXDESC_DESCRIPTOR_ERR 0x00004000 291 #define RXDESC_ERROR_SUMMARY 0x00008000 292 #define RXDESC_FRAME_LEN_OFFSET 16 293 #define RXDESC_FRAME_LEN_MASK 0x3fff0000 294 #define RXDESC_DA_FILTER_FAIL 0x40000000 295 296 #define RXDESC1_END_RING 0x00008000 297 298 #define RXDESC_IP_PAYLOAD_MASK 0x00000003 299 #define RXDESC_IP_PAYLOAD_UDP 0x00000001 300 #define RXDESC_IP_PAYLOAD_TCP 0x00000002 301 #define RXDESC_IP_PAYLOAD_ICMP 0x00000003 302 #define RXDESC_IP_HEADER_ERR 0x00000008 303 #define RXDESC_IP_PAYLOAD_ERR 0x00000010 304 #define RXDESC_IPV4_PACKET 0x00000040 305 #define RXDESC_IPV6_PACKET 0x00000080 306 #define TXDESC_UNDERFLOW_ERR 0x00000001 307 #define TXDESC_JABBER_TIMEOUT 0x00000002 308 #define TXDESC_LOCAL_FAULT 0x00000004 309 #define TXDESC_REMOTE_FAULT 0x00000008 310 #define TXDESC_VLAN_FRAME 0x00000010 311 #define TXDESC_FRAME_FLUSHED 0x00000020 312 #define TXDESC_IP_HEADER_ERR 0x00000040 313 #define TXDESC_PAYLOAD_CSUM_ERR 0x00000080 314 #define TXDESC_ERROR_SUMMARY 0x00008000 315 #define TXDESC_SA_CTRL_INSERT 0x00040000 316 #define TXDESC_SA_CTRL_REPLACE 0x00080000 317 #define TXDESC_2ND_ADDR_CHAINED 0x00100000 318 #define TXDESC_END_RING 0x00200000 319 #define TXDESC_CSUM_IP 0x00400000 320 #define TXDESC_CSUM_IP_PAYLD 0x00800000 321 #define TXDESC_CSUM_ALL 0x00C00000 322 #define TXDESC_CRC_EN_REPLACE 0x01000000 323 #define TXDESC_CRC_EN_APPEND 0x02000000 324 #define TXDESC_DISABLE_PAD 0x04000000 325 #define TXDESC_FIRST_SEG 0x10000000 326 #define TXDESC_LAST_SEG 0x20000000 327 #define TXDESC_INTERRUPT 0x40000000 328 329 #define DESC_OWN 0x80000000 330 #define DESC_BUFFER1_SZ_MASK 0x00001fff 331 #define DESC_BUFFER2_SZ_MASK 0x1fff0000 332 #define DESC_BUFFER2_SZ_OFFSET 16 333 334 struct xgmac_dma_desc { 335 __le32 flags; 336 __le32 buf_size; 337 __le32 buf1_addr; /* Buffer 1 Address Pointer */ 338 __le32 buf2_addr; /* Buffer 2 Address Pointer */ 339 __le32 ext_status; 340 __le32 res[3]; 341 }; 342 343 struct xgmac_extra_stats { 344 /* Transmit errors */ 345 unsigned long tx_jabber; 346 unsigned long tx_frame_flushed; 347 unsigned long tx_payload_error; 348 unsigned long tx_ip_header_error; 349 unsigned long tx_local_fault; 350 unsigned long tx_remote_fault; 351 /* Receive errors */ 352 unsigned long rx_watchdog; 353 unsigned long rx_da_filter_fail; 354 unsigned long rx_sa_filter_fail; 355 unsigned long rx_payload_error; 356 unsigned long rx_ip_header_error; 357 /* Tx/Rx IRQ errors */ 358 unsigned long tx_undeflow; 359 unsigned long tx_process_stopped; 360 unsigned long rx_buf_unav; 361 unsigned long rx_process_stopped; 362 unsigned long tx_early; 363 unsigned long fatal_bus_error; 364 }; 365 366 struct xgmac_priv { 367 struct xgmac_dma_desc *dma_rx; 368 struct sk_buff **rx_skbuff; 369 unsigned int rx_tail; 370 unsigned int rx_head; 371 372 struct xgmac_dma_desc *dma_tx; 373 struct sk_buff **tx_skbuff; 374 unsigned int tx_head; 375 unsigned int tx_tail; 376 377 void __iomem *base; 378 struct sk_buff_head rx_recycle; 379 unsigned int dma_buf_sz; 380 dma_addr_t dma_rx_phy; 381 dma_addr_t dma_tx_phy; 382 383 struct net_device *dev; 384 struct device *device; 385 struct napi_struct napi; 386 387 struct xgmac_extra_stats xstats; 388 389 spinlock_t stats_lock; 390 int pmt_irq; 391 char rx_pause; 392 char tx_pause; 393 int wolopts; 394 }; 395 396 /* XGMAC Configuration Settings */ 397 #define MAX_MTU 9000 398 #define PAUSE_TIME 0x400 399 400 #define DMA_RX_RING_SZ 256 401 #define DMA_TX_RING_SZ 128 402 /* minimum number of free TX descriptors required to wake up TX process */ 403 #define TX_THRESH (DMA_TX_RING_SZ/4) 404 405 /* DMA descriptor ring helpers */ 406 #define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1)) 407 #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) 408 #define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) 409 410 /* XGMAC Descriptor Access Helpers */ 411 static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) 412 { 413 if (buf_sz > MAX_DESC_BUF_SZ) 414 p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ | 415 (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET); 416 else 417 p->buf_size = cpu_to_le32(buf_sz); 418 } 419 420 static inline int desc_get_buf_len(struct xgmac_dma_desc *p) 421 { 422 u32 len = cpu_to_le32(p->flags); 423 return (len & DESC_BUFFER1_SZ_MASK) + 424 ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); 425 } 426 427 static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size, 428 int buf_sz) 429 { 430 struct xgmac_dma_desc *end = p + ring_size - 1; 431 432 memset(p, 0, sizeof(*p) * ring_size); 433 434 for (; p <= end; p++) 435 desc_set_buf_len(p, buf_sz); 436 437 end->buf_size |= cpu_to_le32(RXDESC1_END_RING); 438 } 439 440 static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size) 441 { 442 memset(p, 0, sizeof(*p) * ring_size); 443 p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING); 444 } 445 446 static inline int desc_get_owner(struct xgmac_dma_desc *p) 447 { 448 return le32_to_cpu(p->flags) & DESC_OWN; 449 } 450 451 static inline void desc_set_rx_owner(struct xgmac_dma_desc *p) 452 { 453 /* Clear all fields and set the owner */ 454 p->flags = cpu_to_le32(DESC_OWN); 455 } 456 457 static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags) 458 { 459 u32 tmpflags = le32_to_cpu(p->flags); 460 tmpflags &= TXDESC_END_RING; 461 tmpflags |= flags | DESC_OWN; 462 p->flags = cpu_to_le32(tmpflags); 463 } 464 465 static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) 466 { 467 return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; 468 } 469 470 static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) 471 { 472 return le32_to_cpu(p->buf1_addr); 473 } 474 475 static inline void desc_set_buf_addr(struct xgmac_dma_desc *p, 476 u32 paddr, int len) 477 { 478 p->buf1_addr = cpu_to_le32(paddr); 479 if (len > MAX_DESC_BUF_SZ) 480 p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ); 481 } 482 483 static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p, 484 u32 paddr, int len) 485 { 486 desc_set_buf_len(p, len); 487 desc_set_buf_addr(p, paddr, len); 488 } 489 490 static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p) 491 { 492 u32 data = le32_to_cpu(p->flags); 493 u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET; 494 if (data & RXDESC_FRAME_TYPE) 495 len -= ETH_FCS_LEN; 496 497 return len; 498 } 499 500 static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr) 501 { 502 int timeout = 1000; 503 u32 reg = readl(ioaddr + XGMAC_OMR); 504 writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR); 505 506 while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF) 507 udelay(1); 508 } 509 510 static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p) 511 { 512 struct xgmac_extra_stats *x = &priv->xstats; 513 u32 status = le32_to_cpu(p->flags); 514 515 if (!(status & TXDESC_ERROR_SUMMARY)) 516 return 0; 517 518 netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status); 519 if (status & TXDESC_JABBER_TIMEOUT) 520 x->tx_jabber++; 521 if (status & TXDESC_FRAME_FLUSHED) 522 x->tx_frame_flushed++; 523 if (status & TXDESC_UNDERFLOW_ERR) 524 xgmac_dma_flush_tx_fifo(priv->base); 525 if (status & TXDESC_IP_HEADER_ERR) 526 x->tx_ip_header_error++; 527 if (status & TXDESC_LOCAL_FAULT) 528 x->tx_local_fault++; 529 if (status & TXDESC_REMOTE_FAULT) 530 x->tx_remote_fault++; 531 if (status & TXDESC_PAYLOAD_CSUM_ERR) 532 x->tx_payload_error++; 533 534 return -1; 535 } 536 537 static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p) 538 { 539 struct xgmac_extra_stats *x = &priv->xstats; 540 int ret = CHECKSUM_UNNECESSARY; 541 u32 status = le32_to_cpu(p->flags); 542 u32 ext_status = le32_to_cpu(p->ext_status); 543 544 if (status & RXDESC_DA_FILTER_FAIL) { 545 netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n"); 546 x->rx_da_filter_fail++; 547 return -1; 548 } 549 550 /* Check if packet has checksum already */ 551 if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) && 552 !(ext_status & RXDESC_IP_PAYLOAD_MASK)) 553 ret = CHECKSUM_NONE; 554 555 netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n", 556 (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status); 557 558 if (!(status & RXDESC_ERROR_SUMMARY)) 559 return ret; 560 561 /* Handle any errors */ 562 if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR | 563 RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR)) 564 return -1; 565 566 if (status & RXDESC_EXT_STATUS) { 567 if (ext_status & RXDESC_IP_HEADER_ERR) 568 x->rx_ip_header_error++; 569 if (ext_status & RXDESC_IP_PAYLOAD_ERR) 570 x->rx_payload_error++; 571 netdev_dbg(priv->dev, "IP checksum error - stat %08x\n", 572 ext_status); 573 return CHECKSUM_NONE; 574 } 575 576 return ret; 577 } 578 579 static inline void xgmac_mac_enable(void __iomem *ioaddr) 580 { 581 u32 value = readl(ioaddr + XGMAC_CONTROL); 582 value |= MAC_ENABLE_RX | MAC_ENABLE_TX; 583 writel(value, ioaddr + XGMAC_CONTROL); 584 585 value = readl(ioaddr + XGMAC_DMA_CONTROL); 586 value |= DMA_CONTROL_ST | DMA_CONTROL_SR; 587 writel(value, ioaddr + XGMAC_DMA_CONTROL); 588 } 589 590 static inline void xgmac_mac_disable(void __iomem *ioaddr) 591 { 592 u32 value = readl(ioaddr + XGMAC_DMA_CONTROL); 593 value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR); 594 writel(value, ioaddr + XGMAC_DMA_CONTROL); 595 596 value = readl(ioaddr + XGMAC_CONTROL); 597 value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX); 598 writel(value, ioaddr + XGMAC_CONTROL); 599 } 600 601 static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr, 602 int num) 603 { 604 u32 data; 605 606 data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); 607 writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); 608 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; 609 writel(data, ioaddr + XGMAC_ADDR_LOW(num)); 610 } 611 612 static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, 613 int num) 614 { 615 u32 hi_addr, lo_addr; 616 617 /* Read the MAC address from the hardware */ 618 hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num)); 619 lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num)); 620 621 /* Extract the MAC address from the high and low words */ 622 addr[0] = lo_addr & 0xff; 623 addr[1] = (lo_addr >> 8) & 0xff; 624 addr[2] = (lo_addr >> 16) & 0xff; 625 addr[3] = (lo_addr >> 24) & 0xff; 626 addr[4] = hi_addr & 0xff; 627 addr[5] = (hi_addr >> 8) & 0xff; 628 } 629 630 static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx) 631 { 632 u32 reg; 633 unsigned int flow = 0; 634 635 priv->rx_pause = rx; 636 priv->tx_pause = tx; 637 638 if (rx || tx) { 639 if (rx) 640 flow |= XGMAC_FLOW_CTRL_RFE; 641 if (tx) 642 flow |= XGMAC_FLOW_CTRL_TFE; 643 644 flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP; 645 flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT); 646 647 writel(flow, priv->base + XGMAC_FLOW_CTRL); 648 649 reg = readl(priv->base + XGMAC_OMR); 650 reg |= XGMAC_OMR_EFC; 651 writel(reg, priv->base + XGMAC_OMR); 652 } else { 653 writel(0, priv->base + XGMAC_FLOW_CTRL); 654 655 reg = readl(priv->base + XGMAC_OMR); 656 reg &= ~XGMAC_OMR_EFC; 657 writel(reg, priv->base + XGMAC_OMR); 658 } 659 660 return 0; 661 } 662 663 static void xgmac_rx_refill(struct xgmac_priv *priv) 664 { 665 struct xgmac_dma_desc *p; 666 dma_addr_t paddr; 667 668 while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) { 669 int entry = priv->rx_head; 670 struct sk_buff *skb; 671 672 p = priv->dma_rx + entry; 673 674 if (priv->rx_skbuff[entry] == NULL) { 675 skb = __skb_dequeue(&priv->rx_recycle); 676 if (skb == NULL) 677 skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz); 678 if (unlikely(skb == NULL)) 679 break; 680 681 priv->rx_skbuff[entry] = skb; 682 paddr = dma_map_single(priv->device, skb->data, 683 priv->dma_buf_sz, DMA_FROM_DEVICE); 684 desc_set_buf_addr(p, paddr, priv->dma_buf_sz); 685 } 686 687 netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n", 688 priv->rx_head, priv->rx_tail); 689 690 priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ); 691 desc_set_rx_owner(p); 692 } 693 } 694 695 /** 696 * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings 697 * @dev: net device structure 698 * Description: this function initializes the DMA RX/TX descriptors 699 * and allocates the socket buffers. 700 */ 701 static int xgmac_dma_desc_rings_init(struct net_device *dev) 702 { 703 struct xgmac_priv *priv = netdev_priv(dev); 704 unsigned int bfsize; 705 706 /* Set the Buffer size according to the MTU; 707 * indeed, in case of jumbo we need to bump-up the buffer sizes. 708 */ 709 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64, 710 64); 711 712 netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize); 713 714 priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ, 715 GFP_KERNEL); 716 if (!priv->rx_skbuff) 717 return -ENOMEM; 718 719 priv->dma_rx = dma_alloc_coherent(priv->device, 720 DMA_RX_RING_SZ * 721 sizeof(struct xgmac_dma_desc), 722 &priv->dma_rx_phy, 723 GFP_KERNEL); 724 if (!priv->dma_rx) 725 goto err_dma_rx; 726 727 priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ, 728 GFP_KERNEL); 729 if (!priv->tx_skbuff) 730 goto err_tx_skb; 731 732 priv->dma_tx = dma_alloc_coherent(priv->device, 733 DMA_TX_RING_SZ * 734 sizeof(struct xgmac_dma_desc), 735 &priv->dma_tx_phy, 736 GFP_KERNEL); 737 if (!priv->dma_tx) 738 goto err_dma_tx; 739 740 netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, " 741 "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", 742 priv->dma_rx, priv->dma_tx, 743 (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy); 744 745 priv->rx_tail = 0; 746 priv->rx_head = 0; 747 priv->dma_buf_sz = bfsize; 748 desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz); 749 xgmac_rx_refill(priv); 750 751 priv->tx_tail = 0; 752 priv->tx_head = 0; 753 desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); 754 755 writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR); 756 writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR); 757 758 return 0; 759 760 err_dma_tx: 761 kfree(priv->tx_skbuff); 762 err_tx_skb: 763 dma_free_coherent(priv->device, 764 DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), 765 priv->dma_rx, priv->dma_rx_phy); 766 err_dma_rx: 767 kfree(priv->rx_skbuff); 768 return -ENOMEM; 769 } 770 771 static void xgmac_free_rx_skbufs(struct xgmac_priv *priv) 772 { 773 int i; 774 struct xgmac_dma_desc *p; 775 776 if (!priv->rx_skbuff) 777 return; 778 779 for (i = 0; i < DMA_RX_RING_SZ; i++) { 780 if (priv->rx_skbuff[i] == NULL) 781 continue; 782 783 p = priv->dma_rx + i; 784 dma_unmap_single(priv->device, desc_get_buf_addr(p), 785 priv->dma_buf_sz, DMA_FROM_DEVICE); 786 dev_kfree_skb_any(priv->rx_skbuff[i]); 787 priv->rx_skbuff[i] = NULL; 788 } 789 } 790 791 static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) 792 { 793 int i, f; 794 struct xgmac_dma_desc *p; 795 796 if (!priv->tx_skbuff) 797 return; 798 799 for (i = 0; i < DMA_TX_RING_SZ; i++) { 800 if (priv->tx_skbuff[i] == NULL) 801 continue; 802 803 p = priv->dma_tx + i; 804 dma_unmap_single(priv->device, desc_get_buf_addr(p), 805 desc_get_buf_len(p), DMA_TO_DEVICE); 806 807 for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) { 808 p = priv->dma_tx + i++; 809 dma_unmap_page(priv->device, desc_get_buf_addr(p), 810 desc_get_buf_len(p), DMA_TO_DEVICE); 811 } 812 813 dev_kfree_skb_any(priv->tx_skbuff[i]); 814 priv->tx_skbuff[i] = NULL; 815 } 816 } 817 818 static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv) 819 { 820 /* Release the DMA TX/RX socket buffers */ 821 xgmac_free_rx_skbufs(priv); 822 xgmac_free_tx_skbufs(priv); 823 824 /* Free the consistent memory allocated for descriptor rings */ 825 if (priv->dma_tx) { 826 dma_free_coherent(priv->device, 827 DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc), 828 priv->dma_tx, priv->dma_tx_phy); 829 priv->dma_tx = NULL; 830 } 831 if (priv->dma_rx) { 832 dma_free_coherent(priv->device, 833 DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), 834 priv->dma_rx, priv->dma_rx_phy); 835 priv->dma_rx = NULL; 836 } 837 kfree(priv->rx_skbuff); 838 priv->rx_skbuff = NULL; 839 kfree(priv->tx_skbuff); 840 priv->tx_skbuff = NULL; 841 } 842 843 /** 844 * xgmac_tx: 845 * @priv: private driver structure 846 * Description: it reclaims resources after transmission completes. 847 */ 848 static void xgmac_tx_complete(struct xgmac_priv *priv) 849 { 850 int i; 851 void __iomem *ioaddr = priv->base; 852 853 writel(DMA_STATUS_TU | DMA_STATUS_NIS, ioaddr + XGMAC_DMA_STATUS); 854 855 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { 856 unsigned int entry = priv->tx_tail; 857 struct sk_buff *skb = priv->tx_skbuff[entry]; 858 struct xgmac_dma_desc *p = priv->dma_tx + entry; 859 860 /* Check if the descriptor is owned by the DMA. */ 861 if (desc_get_owner(p)) 862 break; 863 864 /* Verify tx error by looking at the last segment */ 865 if (desc_get_tx_ls(p)) 866 desc_get_tx_status(priv, p); 867 868 netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", 869 priv->tx_head, priv->tx_tail); 870 871 dma_unmap_single(priv->device, desc_get_buf_addr(p), 872 desc_get_buf_len(p), DMA_TO_DEVICE); 873 874 priv->tx_skbuff[entry] = NULL; 875 priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); 876 877 if (!skb) { 878 continue; 879 } 880 881 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 882 entry = priv->tx_tail = dma_ring_incr(priv->tx_tail, 883 DMA_TX_RING_SZ); 884 p = priv->dma_tx + priv->tx_tail; 885 886 dma_unmap_page(priv->device, desc_get_buf_addr(p), 887 desc_get_buf_len(p), DMA_TO_DEVICE); 888 } 889 890 /* 891 * If there's room in the queue (limit it to size) 892 * we add this skb back into the pool, 893 * if it's the right size. 894 */ 895 if ((skb_queue_len(&priv->rx_recycle) < 896 DMA_RX_RING_SZ) && 897 skb_recycle_check(skb, priv->dma_buf_sz)) 898 __skb_queue_head(&priv->rx_recycle, skb); 899 else 900 dev_kfree_skb(skb); 901 } 902 903 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > 904 TX_THRESH) 905 netif_wake_queue(priv->dev); 906 } 907 908 /** 909 * xgmac_tx_err: 910 * @priv: pointer to the private device structure 911 * Description: it cleans the descriptors and restarts the transmission 912 * in case of errors. 913 */ 914 static void xgmac_tx_err(struct xgmac_priv *priv) 915 { 916 u32 reg, value, inten; 917 918 netif_stop_queue(priv->dev); 919 920 inten = readl(priv->base + XGMAC_DMA_INTR_ENA); 921 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 922 923 reg = readl(priv->base + XGMAC_DMA_CONTROL); 924 writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); 925 do { 926 value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000; 927 } while (value && (value != 0x600000)); 928 929 xgmac_free_tx_skbufs(priv); 930 desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); 931 priv->tx_tail = 0; 932 priv->tx_head = 0; 933 writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR); 934 writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); 935 936 writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, 937 priv->base + XGMAC_DMA_STATUS); 938 writel(inten, priv->base + XGMAC_DMA_INTR_ENA); 939 940 netif_wake_queue(priv->dev); 941 } 942 943 static int xgmac_hw_init(struct net_device *dev) 944 { 945 u32 value, ctrl; 946 int limit; 947 struct xgmac_priv *priv = netdev_priv(dev); 948 void __iomem *ioaddr = priv->base; 949 950 /* Save the ctrl register value */ 951 ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK; 952 953 /* SW reset */ 954 value = DMA_BUS_MODE_SFT_RESET; 955 writel(value, ioaddr + XGMAC_DMA_BUS_MODE); 956 limit = 15000; 957 while (limit-- && 958 (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) 959 cpu_relax(); 960 if (limit < 0) 961 return -EBUSY; 962 963 value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) | 964 (0x10 << DMA_BUS_MODE_RPBL_SHIFT) | 965 DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; 966 writel(value, ioaddr + XGMAC_DMA_BUS_MODE); 967 968 /* Enable interrupts */ 969 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); 970 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); 971 972 /* XGMAC requires AXI bus init. This is a 'magic number' for now */ 973 writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); 974 975 ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS | 976 XGMAC_CONTROL_CAR; 977 if (dev->features & NETIF_F_RXCSUM) 978 ctrl |= XGMAC_CONTROL_IPC; 979 writel(ctrl, ioaddr + XGMAC_CONTROL); 980 981 value = DMA_CONTROL_DFF; 982 writel(value, ioaddr + XGMAC_DMA_CONTROL); 983 984 /* Set the HW DMA mode and the COE */ 985 writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA | 986 XGMAC_OMR_RTC_256, 987 ioaddr + XGMAC_OMR); 988 989 /* Reset the MMC counters */ 990 writel(1, ioaddr + XGMAC_MMC_CTRL); 991 return 0; 992 } 993 994 /** 995 * xgmac_open - open entry point of the driver 996 * @dev : pointer to the device structure. 997 * Description: 998 * This function is the open entry point of the driver. 999 * Return value: 1000 * 0 on success and an appropriate (-)ve integer as defined in errno.h 1001 * file on failure. 1002 */ 1003 static int xgmac_open(struct net_device *dev) 1004 { 1005 int ret; 1006 struct xgmac_priv *priv = netdev_priv(dev); 1007 void __iomem *ioaddr = priv->base; 1008 1009 /* Check that the MAC address is valid. If its not, refuse 1010 * to bring the device up. The user must specify an 1011 * address using the following linux command: 1012 * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */ 1013 if (!is_valid_ether_addr(dev->dev_addr)) { 1014 eth_hw_addr_random(dev); 1015 netdev_dbg(priv->dev, "generated random MAC address %pM\n", 1016 dev->dev_addr); 1017 } 1018 1019 skb_queue_head_init(&priv->rx_recycle); 1020 memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats)); 1021 1022 /* Initialize the XGMAC and descriptors */ 1023 xgmac_hw_init(dev); 1024 xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); 1025 xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause); 1026 1027 ret = xgmac_dma_desc_rings_init(dev); 1028 if (ret < 0) 1029 return ret; 1030 1031 /* Enable the MAC Rx/Tx */ 1032 xgmac_mac_enable(ioaddr); 1033 1034 napi_enable(&priv->napi); 1035 netif_start_queue(dev); 1036 1037 return 0; 1038 } 1039 1040 /** 1041 * xgmac_release - close entry point of the driver 1042 * @dev : device pointer. 1043 * Description: 1044 * This is the stop entry point of the driver. 1045 */ 1046 static int xgmac_stop(struct net_device *dev) 1047 { 1048 struct xgmac_priv *priv = netdev_priv(dev); 1049 1050 netif_stop_queue(dev); 1051 1052 if (readl(priv->base + XGMAC_DMA_INTR_ENA)) 1053 napi_disable(&priv->napi); 1054 1055 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 1056 skb_queue_purge(&priv->rx_recycle); 1057 1058 /* Disable the MAC core */ 1059 xgmac_mac_disable(priv->base); 1060 1061 /* Release and free the Rx/Tx resources */ 1062 xgmac_free_dma_desc_rings(priv); 1063 1064 return 0; 1065 } 1066 1067 /** 1068 * xgmac_xmit: 1069 * @skb : the socket buffer 1070 * @dev : device pointer 1071 * Description : Tx entry point of the driver. 1072 */ 1073 static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) 1074 { 1075 struct xgmac_priv *priv = netdev_priv(dev); 1076 unsigned int entry; 1077 int i; 1078 int nfrags = skb_shinfo(skb)->nr_frags; 1079 struct xgmac_dma_desc *desc, *first; 1080 unsigned int desc_flags; 1081 unsigned int len; 1082 dma_addr_t paddr; 1083 1084 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < 1085 (nfrags + 1)) { 1086 writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE, 1087 priv->base + XGMAC_DMA_INTR_ENA); 1088 netif_stop_queue(dev); 1089 return NETDEV_TX_BUSY; 1090 } 1091 1092 desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ? 1093 TXDESC_CSUM_ALL : 0; 1094 entry = priv->tx_head; 1095 desc = priv->dma_tx + entry; 1096 first = desc; 1097 1098 len = skb_headlen(skb); 1099 paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); 1100 if (dma_mapping_error(priv->device, paddr)) { 1101 dev_kfree_skb(skb); 1102 return -EIO; 1103 } 1104 priv->tx_skbuff[entry] = skb; 1105 desc_set_buf_addr_and_size(desc, paddr, len); 1106 1107 for (i = 0; i < nfrags; i++) { 1108 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1109 1110 len = frag->size; 1111 1112 paddr = skb_frag_dma_map(priv->device, frag, 0, len, 1113 DMA_TO_DEVICE); 1114 if (dma_mapping_error(priv->device, paddr)) { 1115 dev_kfree_skb(skb); 1116 return -EIO; 1117 } 1118 1119 entry = dma_ring_incr(entry, DMA_TX_RING_SZ); 1120 desc = priv->dma_tx + entry; 1121 priv->tx_skbuff[entry] = NULL; 1122 1123 desc_set_buf_addr_and_size(desc, paddr, len); 1124 if (i < (nfrags - 1)) 1125 desc_set_tx_owner(desc, desc_flags); 1126 } 1127 1128 /* Interrupt on completition only for the latest segment */ 1129 if (desc != first) 1130 desc_set_tx_owner(desc, desc_flags | 1131 TXDESC_LAST_SEG | TXDESC_INTERRUPT); 1132 else 1133 desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT; 1134 1135 /* Set owner on first desc last to avoid race condition */ 1136 wmb(); 1137 desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); 1138 1139 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); 1140 1141 writel(1, priv->base + XGMAC_DMA_TX_POLL); 1142 1143 return NETDEV_TX_OK; 1144 } 1145 1146 static int xgmac_rx(struct xgmac_priv *priv, int limit) 1147 { 1148 unsigned int entry; 1149 unsigned int count = 0; 1150 struct xgmac_dma_desc *p; 1151 1152 while (count < limit) { 1153 int ip_checksum; 1154 struct sk_buff *skb; 1155 int frame_len; 1156 1157 writel(DMA_STATUS_RI | DMA_STATUS_NIS, 1158 priv->base + XGMAC_DMA_STATUS); 1159 1160 entry = priv->rx_tail; 1161 p = priv->dma_rx + entry; 1162 if (desc_get_owner(p)) 1163 break; 1164 1165 count++; 1166 priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ); 1167 1168 /* read the status of the incoming frame */ 1169 ip_checksum = desc_get_rx_status(priv, p); 1170 if (ip_checksum < 0) 1171 continue; 1172 1173 skb = priv->rx_skbuff[entry]; 1174 if (unlikely(!skb)) { 1175 netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n"); 1176 break; 1177 } 1178 priv->rx_skbuff[entry] = NULL; 1179 1180 frame_len = desc_get_rx_frame_len(p); 1181 netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n", 1182 frame_len, ip_checksum); 1183 1184 skb_put(skb, frame_len); 1185 dma_unmap_single(priv->device, desc_get_buf_addr(p), 1186 frame_len, DMA_FROM_DEVICE); 1187 1188 skb->protocol = eth_type_trans(skb, priv->dev); 1189 skb->ip_summed = ip_checksum; 1190 if (ip_checksum == CHECKSUM_NONE) 1191 netif_receive_skb(skb); 1192 else 1193 napi_gro_receive(&priv->napi, skb); 1194 } 1195 1196 xgmac_rx_refill(priv); 1197 1198 writel(1, priv->base + XGMAC_DMA_RX_POLL); 1199 1200 return count; 1201 } 1202 1203 /** 1204 * xgmac_poll - xgmac poll method (NAPI) 1205 * @napi : pointer to the napi structure. 1206 * @budget : maximum number of packets that the current CPU can receive from 1207 * all interfaces. 1208 * Description : 1209 * This function implements the the reception process. 1210 * Also it runs the TX completion thread 1211 */ 1212 static int xgmac_poll(struct napi_struct *napi, int budget) 1213 { 1214 struct xgmac_priv *priv = container_of(napi, 1215 struct xgmac_priv, napi); 1216 int work_done = 0; 1217 1218 xgmac_tx_complete(priv); 1219 work_done = xgmac_rx(priv, budget); 1220 1221 if (work_done < budget) { 1222 napi_complete(napi); 1223 writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); 1224 } 1225 return work_done; 1226 } 1227 1228 /** 1229 * xgmac_tx_timeout 1230 * @dev : Pointer to net device structure 1231 * Description: this function is called when a packet transmission fails to 1232 * complete within a reasonable tmrate. The driver will mark the error in the 1233 * netdev structure and arrange for the device to be reset to a sane state 1234 * in order to transmit a new packet. 1235 */ 1236 static void xgmac_tx_timeout(struct net_device *dev) 1237 { 1238 struct xgmac_priv *priv = netdev_priv(dev); 1239 1240 /* Clear Tx resources and restart transmitting again */ 1241 xgmac_tx_err(priv); 1242 } 1243 1244 /** 1245 * xgmac_set_rx_mode - entry point for multicast addressing 1246 * @dev : pointer to the device structure 1247 * Description: 1248 * This function is a driver entry point which gets called by the kernel 1249 * whenever multicast addresses must be enabled/disabled. 1250 * Return value: 1251 * void. 1252 */ 1253 static void xgmac_set_rx_mode(struct net_device *dev) 1254 { 1255 int i; 1256 struct xgmac_priv *priv = netdev_priv(dev); 1257 void __iomem *ioaddr = priv->base; 1258 unsigned int value = 0; 1259 u32 hash_filter[XGMAC_NUM_HASH]; 1260 int reg = 1; 1261 struct netdev_hw_addr *ha; 1262 bool use_hash = false; 1263 1264 netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n", 1265 netdev_mc_count(dev), netdev_uc_count(dev)); 1266 1267 if (dev->flags & IFF_PROMISC) { 1268 writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER); 1269 return; 1270 } 1271 1272 memset(hash_filter, 0, sizeof(hash_filter)); 1273 1274 if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) { 1275 use_hash = true; 1276 value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF; 1277 } 1278 netdev_for_each_uc_addr(ha, dev) { 1279 if (use_hash) { 1280 u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23; 1281 1282 /* The most significant 4 bits determine the register to 1283 * use (H/L) while the other 5 bits determine the bit 1284 * within the register. */ 1285 hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 1286 } else { 1287 xgmac_set_mac_addr(ioaddr, ha->addr, reg); 1288 reg++; 1289 } 1290 } 1291 1292 if (dev->flags & IFF_ALLMULTI) { 1293 value |= XGMAC_FRAME_FILTER_PM; 1294 goto out; 1295 } 1296 1297 if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { 1298 use_hash = true; 1299 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; 1300 } 1301 netdev_for_each_mc_addr(ha, dev) { 1302 if (use_hash) { 1303 u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23; 1304 1305 /* The most significant 4 bits determine the register to 1306 * use (H/L) while the other 5 bits determine the bit 1307 * within the register. */ 1308 hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 1309 } else { 1310 xgmac_set_mac_addr(ioaddr, ha->addr, reg); 1311 reg++; 1312 } 1313 } 1314 1315 out: 1316 for (i = 0; i < XGMAC_NUM_HASH; i++) 1317 writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); 1318 1319 writel(value, ioaddr + XGMAC_FRAME_FILTER); 1320 } 1321 1322 /** 1323 * xgmac_change_mtu - entry point to change MTU size for the device. 1324 * @dev : device pointer. 1325 * @new_mtu : the new MTU size for the device. 1326 * Description: the Maximum Transfer Unit (MTU) is used by the network layer 1327 * to drive packet transmission. Ethernet has an MTU of 1500 octets 1328 * (ETH_DATA_LEN). This value can be changed with ifconfig. 1329 * Return value: 1330 * 0 on success and an appropriate (-)ve integer as defined in errno.h 1331 * file on failure. 1332 */ 1333 static int xgmac_change_mtu(struct net_device *dev, int new_mtu) 1334 { 1335 struct xgmac_priv *priv = netdev_priv(dev); 1336 int old_mtu; 1337 1338 if ((new_mtu < 46) || (new_mtu > MAX_MTU)) { 1339 netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU); 1340 return -EINVAL; 1341 } 1342 1343 old_mtu = dev->mtu; 1344 dev->mtu = new_mtu; 1345 1346 /* return early if the buffer sizes will not change */ 1347 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) 1348 return 0; 1349 if (old_mtu == new_mtu) 1350 return 0; 1351 1352 /* Stop everything, get ready to change the MTU */ 1353 if (!netif_running(dev)) 1354 return 0; 1355 1356 /* Bring the interface down and then back up */ 1357 xgmac_stop(dev); 1358 return xgmac_open(dev); 1359 } 1360 1361 static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id) 1362 { 1363 u32 intr_status; 1364 struct net_device *dev = (struct net_device *)dev_id; 1365 struct xgmac_priv *priv = netdev_priv(dev); 1366 void __iomem *ioaddr = priv->base; 1367 1368 intr_status = readl(ioaddr + XGMAC_INT_STAT); 1369 if (intr_status & XGMAC_INT_STAT_PMT) { 1370 netdev_dbg(priv->dev, "received Magic frame\n"); 1371 /* clear the PMT bits 5 and 6 by reading the PMT */ 1372 readl(ioaddr + XGMAC_PMT); 1373 } 1374 return IRQ_HANDLED; 1375 } 1376 1377 static irqreturn_t xgmac_interrupt(int irq, void *dev_id) 1378 { 1379 u32 intr_status; 1380 bool tx_err = false; 1381 struct net_device *dev = (struct net_device *)dev_id; 1382 struct xgmac_priv *priv = netdev_priv(dev); 1383 struct xgmac_extra_stats *x = &priv->xstats; 1384 1385 /* read the status register (CSR5) */ 1386 intr_status = readl(priv->base + XGMAC_DMA_STATUS); 1387 intr_status &= readl(priv->base + XGMAC_DMA_INTR_ENA); 1388 writel(intr_status, priv->base + XGMAC_DMA_STATUS); 1389 1390 /* It displays the DMA process states (CSR5 register) */ 1391 /* ABNORMAL interrupts */ 1392 if (unlikely(intr_status & DMA_STATUS_AIS)) { 1393 if (intr_status & DMA_STATUS_TJT) { 1394 netdev_err(priv->dev, "transmit jabber\n"); 1395 x->tx_jabber++; 1396 } 1397 if (intr_status & DMA_STATUS_RU) 1398 x->rx_buf_unav++; 1399 if (intr_status & DMA_STATUS_RPS) { 1400 netdev_err(priv->dev, "receive process stopped\n"); 1401 x->rx_process_stopped++; 1402 } 1403 if (intr_status & DMA_STATUS_ETI) { 1404 netdev_err(priv->dev, "transmit early interrupt\n"); 1405 x->tx_early++; 1406 } 1407 if (intr_status & DMA_STATUS_TPS) { 1408 netdev_err(priv->dev, "transmit process stopped\n"); 1409 x->tx_process_stopped++; 1410 tx_err = true; 1411 } 1412 if (intr_status & DMA_STATUS_FBI) { 1413 netdev_err(priv->dev, "fatal bus error\n"); 1414 x->fatal_bus_error++; 1415 tx_err = true; 1416 } 1417 1418 if (tx_err) 1419 xgmac_tx_err(priv); 1420 } 1421 1422 /* TX/RX NORMAL interrupts */ 1423 if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) { 1424 writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA); 1425 napi_schedule(&priv->napi); 1426 } 1427 1428 return IRQ_HANDLED; 1429 } 1430 1431 #ifdef CONFIG_NET_POLL_CONTROLLER 1432 /* Polling receive - used by NETCONSOLE and other diagnostic tools 1433 * to allow network I/O with interrupts disabled. */ 1434 static void xgmac_poll_controller(struct net_device *dev) 1435 { 1436 disable_irq(dev->irq); 1437 xgmac_interrupt(dev->irq, dev); 1438 enable_irq(dev->irq); 1439 } 1440 #endif 1441 1442 static struct rtnl_link_stats64 * 1443 xgmac_get_stats64(struct net_device *dev, 1444 struct rtnl_link_stats64 *storage) 1445 { 1446 struct xgmac_priv *priv = netdev_priv(dev); 1447 void __iomem *base = priv->base; 1448 u32 count; 1449 1450 spin_lock_bh(&priv->stats_lock); 1451 writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL); 1452 1453 storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO); 1454 storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32; 1455 1456 storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO); 1457 storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G); 1458 storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR); 1459 storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR); 1460 storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW); 1461 1462 storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO); 1463 storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32; 1464 1465 count = readl(base + XGMAC_MMC_TXFRAME_GB_LO); 1466 storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO); 1467 storage->tx_packets = count; 1468 storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW); 1469 1470 writel(0, base + XGMAC_MMC_CTRL); 1471 spin_unlock_bh(&priv->stats_lock); 1472 return storage; 1473 } 1474 1475 static int xgmac_set_mac_address(struct net_device *dev, void *p) 1476 { 1477 struct xgmac_priv *priv = netdev_priv(dev); 1478 void __iomem *ioaddr = priv->base; 1479 struct sockaddr *addr = p; 1480 1481 if (!is_valid_ether_addr(addr->sa_data)) 1482 return -EADDRNOTAVAIL; 1483 1484 dev->addr_assign_type &= ~NET_ADDR_RANDOM; 1485 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1486 1487 xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); 1488 1489 return 0; 1490 } 1491 1492 static int xgmac_set_features(struct net_device *dev, netdev_features_t features) 1493 { 1494 u32 ctrl; 1495 struct xgmac_priv *priv = netdev_priv(dev); 1496 void __iomem *ioaddr = priv->base; 1497 u32 changed = dev->features ^ features; 1498 1499 if (!(changed & NETIF_F_RXCSUM)) 1500 return 0; 1501 1502 ctrl = readl(ioaddr + XGMAC_CONTROL); 1503 if (features & NETIF_F_RXCSUM) 1504 ctrl |= XGMAC_CONTROL_IPC; 1505 else 1506 ctrl &= ~XGMAC_CONTROL_IPC; 1507 writel(ctrl, ioaddr + XGMAC_CONTROL); 1508 1509 return 0; 1510 } 1511 1512 static const struct net_device_ops xgmac_netdev_ops = { 1513 .ndo_open = xgmac_open, 1514 .ndo_start_xmit = xgmac_xmit, 1515 .ndo_stop = xgmac_stop, 1516 .ndo_change_mtu = xgmac_change_mtu, 1517 .ndo_set_rx_mode = xgmac_set_rx_mode, 1518 .ndo_tx_timeout = xgmac_tx_timeout, 1519 .ndo_get_stats64 = xgmac_get_stats64, 1520 #ifdef CONFIG_NET_POLL_CONTROLLER 1521 .ndo_poll_controller = xgmac_poll_controller, 1522 #endif 1523 .ndo_set_mac_address = xgmac_set_mac_address, 1524 .ndo_set_features = xgmac_set_features, 1525 }; 1526 1527 static int xgmac_ethtool_getsettings(struct net_device *dev, 1528 struct ethtool_cmd *cmd) 1529 { 1530 cmd->autoneg = 0; 1531 cmd->duplex = DUPLEX_FULL; 1532 ethtool_cmd_speed_set(cmd, 10000); 1533 cmd->supported = 0; 1534 cmd->advertising = 0; 1535 cmd->transceiver = XCVR_INTERNAL; 1536 return 0; 1537 } 1538 1539 static void xgmac_get_pauseparam(struct net_device *netdev, 1540 struct ethtool_pauseparam *pause) 1541 { 1542 struct xgmac_priv *priv = netdev_priv(netdev); 1543 1544 pause->rx_pause = priv->rx_pause; 1545 pause->tx_pause = priv->tx_pause; 1546 } 1547 1548 static int xgmac_set_pauseparam(struct net_device *netdev, 1549 struct ethtool_pauseparam *pause) 1550 { 1551 struct xgmac_priv *priv = netdev_priv(netdev); 1552 1553 if (pause->autoneg) 1554 return -EINVAL; 1555 1556 return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause); 1557 } 1558 1559 struct xgmac_stats { 1560 char stat_string[ETH_GSTRING_LEN]; 1561 int stat_offset; 1562 bool is_reg; 1563 }; 1564 1565 #define XGMAC_STAT(m) \ 1566 { #m, offsetof(struct xgmac_priv, xstats.m), false } 1567 #define XGMAC_HW_STAT(m, reg_offset) \ 1568 { #m, reg_offset, true } 1569 1570 static const struct xgmac_stats xgmac_gstrings_stats[] = { 1571 XGMAC_STAT(tx_frame_flushed), 1572 XGMAC_STAT(tx_payload_error), 1573 XGMAC_STAT(tx_ip_header_error), 1574 XGMAC_STAT(tx_local_fault), 1575 XGMAC_STAT(tx_remote_fault), 1576 XGMAC_STAT(tx_early), 1577 XGMAC_STAT(tx_process_stopped), 1578 XGMAC_STAT(tx_jabber), 1579 XGMAC_STAT(rx_buf_unav), 1580 XGMAC_STAT(rx_process_stopped), 1581 XGMAC_STAT(rx_payload_error), 1582 XGMAC_STAT(rx_ip_header_error), 1583 XGMAC_STAT(rx_da_filter_fail), 1584 XGMAC_STAT(rx_sa_filter_fail), 1585 XGMAC_STAT(fatal_bus_error), 1586 XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), 1587 XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), 1588 XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME), 1589 XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME), 1590 XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME), 1591 }; 1592 #define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats) 1593 1594 static void xgmac_get_ethtool_stats(struct net_device *dev, 1595 struct ethtool_stats *dummy, 1596 u64 *data) 1597 { 1598 struct xgmac_priv *priv = netdev_priv(dev); 1599 void *p = priv; 1600 int i; 1601 1602 for (i = 0; i < XGMAC_STATS_LEN; i++) { 1603 if (xgmac_gstrings_stats[i].is_reg) 1604 *data++ = readl(priv->base + 1605 xgmac_gstrings_stats[i].stat_offset); 1606 else 1607 *data++ = *(u32 *)(p + 1608 xgmac_gstrings_stats[i].stat_offset); 1609 } 1610 } 1611 1612 static int xgmac_get_sset_count(struct net_device *netdev, int sset) 1613 { 1614 switch (sset) { 1615 case ETH_SS_STATS: 1616 return XGMAC_STATS_LEN; 1617 default: 1618 return -EINVAL; 1619 } 1620 } 1621 1622 static void xgmac_get_strings(struct net_device *dev, u32 stringset, 1623 u8 *data) 1624 { 1625 int i; 1626 u8 *p = data; 1627 1628 switch (stringset) { 1629 case ETH_SS_STATS: 1630 for (i = 0; i < XGMAC_STATS_LEN; i++) { 1631 memcpy(p, xgmac_gstrings_stats[i].stat_string, 1632 ETH_GSTRING_LEN); 1633 p += ETH_GSTRING_LEN; 1634 } 1635 break; 1636 default: 1637 WARN_ON(1); 1638 break; 1639 } 1640 } 1641 1642 static void xgmac_get_wol(struct net_device *dev, 1643 struct ethtool_wolinfo *wol) 1644 { 1645 struct xgmac_priv *priv = netdev_priv(dev); 1646 1647 if (device_can_wakeup(priv->device)) { 1648 wol->supported = WAKE_MAGIC | WAKE_UCAST; 1649 wol->wolopts = priv->wolopts; 1650 } 1651 } 1652 1653 static int xgmac_set_wol(struct net_device *dev, 1654 struct ethtool_wolinfo *wol) 1655 { 1656 struct xgmac_priv *priv = netdev_priv(dev); 1657 u32 support = WAKE_MAGIC | WAKE_UCAST; 1658 1659 if (!device_can_wakeup(priv->device)) 1660 return -ENOTSUPP; 1661 1662 if (wol->wolopts & ~support) 1663 return -EINVAL; 1664 1665 priv->wolopts = wol->wolopts; 1666 1667 if (wol->wolopts) { 1668 device_set_wakeup_enable(priv->device, 1); 1669 enable_irq_wake(dev->irq); 1670 } else { 1671 device_set_wakeup_enable(priv->device, 0); 1672 disable_irq_wake(dev->irq); 1673 } 1674 1675 return 0; 1676 } 1677 1678 static const struct ethtool_ops xgmac_ethtool_ops = { 1679 .get_settings = xgmac_ethtool_getsettings, 1680 .get_link = ethtool_op_get_link, 1681 .get_pauseparam = xgmac_get_pauseparam, 1682 .set_pauseparam = xgmac_set_pauseparam, 1683 .get_ethtool_stats = xgmac_get_ethtool_stats, 1684 .get_strings = xgmac_get_strings, 1685 .get_wol = xgmac_get_wol, 1686 .set_wol = xgmac_set_wol, 1687 .get_sset_count = xgmac_get_sset_count, 1688 }; 1689 1690 /** 1691 * xgmac_probe 1692 * @pdev: platform device pointer 1693 * Description: the driver is initialized through platform_device. 1694 */ 1695 static int xgmac_probe(struct platform_device *pdev) 1696 { 1697 int ret = 0; 1698 struct resource *res; 1699 struct net_device *ndev = NULL; 1700 struct xgmac_priv *priv = NULL; 1701 u32 uid; 1702 1703 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1704 if (!res) 1705 return -ENODEV; 1706 1707 if (!request_mem_region(res->start, resource_size(res), pdev->name)) 1708 return -EBUSY; 1709 1710 ndev = alloc_etherdev(sizeof(struct xgmac_priv)); 1711 if (!ndev) { 1712 ret = -ENOMEM; 1713 goto err_alloc; 1714 } 1715 1716 SET_NETDEV_DEV(ndev, &pdev->dev); 1717 priv = netdev_priv(ndev); 1718 platform_set_drvdata(pdev, ndev); 1719 ether_setup(ndev); 1720 ndev->netdev_ops = &xgmac_netdev_ops; 1721 SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); 1722 spin_lock_init(&priv->stats_lock); 1723 1724 priv->device = &pdev->dev; 1725 priv->dev = ndev; 1726 priv->rx_pause = 1; 1727 priv->tx_pause = 1; 1728 1729 priv->base = ioremap(res->start, resource_size(res)); 1730 if (!priv->base) { 1731 netdev_err(ndev, "ioremap failed\n"); 1732 ret = -ENOMEM; 1733 goto err_io; 1734 } 1735 1736 uid = readl(priv->base + XGMAC_VERSION); 1737 netdev_info(ndev, "h/w version is 0x%x\n", uid); 1738 1739 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 1740 ndev->irq = platform_get_irq(pdev, 0); 1741 if (ndev->irq == -ENXIO) { 1742 netdev_err(ndev, "No irq resource\n"); 1743 ret = ndev->irq; 1744 goto err_irq; 1745 } 1746 1747 ret = request_irq(ndev->irq, xgmac_interrupt, 0, 1748 dev_name(&pdev->dev), ndev); 1749 if (ret < 0) { 1750 netdev_err(ndev, "Could not request irq %d - ret %d)\n", 1751 ndev->irq, ret); 1752 goto err_irq; 1753 } 1754 1755 priv->pmt_irq = platform_get_irq(pdev, 1); 1756 if (priv->pmt_irq == -ENXIO) { 1757 netdev_err(ndev, "No pmt irq resource\n"); 1758 ret = priv->pmt_irq; 1759 goto err_pmt_irq; 1760 } 1761 1762 ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0, 1763 dev_name(&pdev->dev), ndev); 1764 if (ret < 0) { 1765 netdev_err(ndev, "Could not request irq %d - ret %d)\n", 1766 priv->pmt_irq, ret); 1767 goto err_pmt_irq; 1768 } 1769 1770 device_set_wakeup_capable(&pdev->dev, 1); 1771 if (device_can_wakeup(priv->device)) 1772 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ 1773 1774 ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; 1775 if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) 1776 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1777 NETIF_F_RXCSUM; 1778 ndev->features |= ndev->hw_features; 1779 ndev->priv_flags |= IFF_UNICAST_FLT; 1780 1781 /* Get the MAC address */ 1782 xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0); 1783 if (!is_valid_ether_addr(ndev->dev_addr)) 1784 netdev_warn(ndev, "MAC address %pM not valid", 1785 ndev->dev_addr); 1786 1787 netif_napi_add(ndev, &priv->napi, xgmac_poll, 64); 1788 ret = register_netdev(ndev); 1789 if (ret) 1790 goto err_reg; 1791 1792 return 0; 1793 1794 err_reg: 1795 netif_napi_del(&priv->napi); 1796 free_irq(priv->pmt_irq, ndev); 1797 err_pmt_irq: 1798 free_irq(ndev->irq, ndev); 1799 err_irq: 1800 iounmap(priv->base); 1801 err_io: 1802 free_netdev(ndev); 1803 err_alloc: 1804 release_mem_region(res->start, resource_size(res)); 1805 platform_set_drvdata(pdev, NULL); 1806 return ret; 1807 } 1808 1809 /** 1810 * xgmac_dvr_remove 1811 * @pdev: platform device pointer 1812 * Description: this function resets the TX/RX processes, disables the MAC RX/TX 1813 * changes the link status, releases the DMA descriptor rings, 1814 * unregisters the MDIO bus and unmaps the allocated memory. 1815 */ 1816 static int xgmac_remove(struct platform_device *pdev) 1817 { 1818 struct net_device *ndev = platform_get_drvdata(pdev); 1819 struct xgmac_priv *priv = netdev_priv(ndev); 1820 struct resource *res; 1821 1822 xgmac_mac_disable(priv->base); 1823 1824 /* Free the IRQ lines */ 1825 free_irq(ndev->irq, ndev); 1826 free_irq(priv->pmt_irq, ndev); 1827 1828 platform_set_drvdata(pdev, NULL); 1829 unregister_netdev(ndev); 1830 netif_napi_del(&priv->napi); 1831 1832 iounmap(priv->base); 1833 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1834 release_mem_region(res->start, resource_size(res)); 1835 1836 free_netdev(ndev); 1837 1838 return 0; 1839 } 1840 1841 #ifdef CONFIG_PM_SLEEP 1842 static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode) 1843 { 1844 unsigned int pmt = 0; 1845 1846 if (mode & WAKE_MAGIC) 1847 pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT; 1848 if (mode & WAKE_UCAST) 1849 pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; 1850 1851 writel(pmt, ioaddr + XGMAC_PMT); 1852 } 1853 1854 static int xgmac_suspend(struct device *dev) 1855 { 1856 struct net_device *ndev = platform_get_drvdata(to_platform_device(dev)); 1857 struct xgmac_priv *priv = netdev_priv(ndev); 1858 u32 value; 1859 1860 if (!ndev || !netif_running(ndev)) 1861 return 0; 1862 1863 netif_device_detach(ndev); 1864 napi_disable(&priv->napi); 1865 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 1866 1867 if (device_may_wakeup(priv->device)) { 1868 /* Stop TX/RX DMA Only */ 1869 value = readl(priv->base + XGMAC_DMA_CONTROL); 1870 value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR); 1871 writel(value, priv->base + XGMAC_DMA_CONTROL); 1872 1873 xgmac_pmt(priv->base, priv->wolopts); 1874 } else 1875 xgmac_mac_disable(priv->base); 1876 1877 return 0; 1878 } 1879 1880 static int xgmac_resume(struct device *dev) 1881 { 1882 struct net_device *ndev = platform_get_drvdata(to_platform_device(dev)); 1883 struct xgmac_priv *priv = netdev_priv(ndev); 1884 void __iomem *ioaddr = priv->base; 1885 1886 if (!netif_running(ndev)) 1887 return 0; 1888 1889 xgmac_pmt(ioaddr, 0); 1890 1891 /* Enable the MAC and DMA */ 1892 xgmac_mac_enable(ioaddr); 1893 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); 1894 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); 1895 1896 netif_device_attach(ndev); 1897 napi_enable(&priv->napi); 1898 1899 return 0; 1900 } 1901 1902 static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume); 1903 #define XGMAC_PM_OPS (&xgmac_pm_ops) 1904 #else 1905 #define XGMAC_PM_OPS NULL 1906 #endif /* CONFIG_PM_SLEEP */ 1907 1908 static const struct of_device_id xgmac_of_match[] = { 1909 { .compatible = "calxeda,hb-xgmac", }, 1910 {}, 1911 }; 1912 MODULE_DEVICE_TABLE(of, xgmac_of_match); 1913 1914 static struct platform_driver xgmac_driver = { 1915 .driver = { 1916 .name = "calxedaxgmac", 1917 .of_match_table = xgmac_of_match, 1918 }, 1919 .probe = xgmac_probe, 1920 .remove = xgmac_remove, 1921 .driver.pm = XGMAC_PM_OPS, 1922 }; 1923 1924 module_platform_driver(xgmac_driver); 1925 1926 MODULE_AUTHOR("Calxeda, Inc."); 1927 MODULE_DESCRIPTION("Calxeda 10G XGMAC driver"); 1928 MODULE_LICENSE("GPL v2"); 1929