1 /* 2 * Copyright 2010-2011 Calxeda, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/kernel.h> 19 #include <linux/circ_buf.h> 20 #include <linux/interrupt.h> 21 #include <linux/etherdevice.h> 22 #include <linux/platform_device.h> 23 #include <linux/skbuff.h> 24 #include <linux/ethtool.h> 25 #include <linux/if.h> 26 #include <linux/crc32.h> 27 #include <linux/dma-mapping.h> 28 #include <linux/slab.h> 29 30 /* XGMAC Register definitions */ 31 #define XGMAC_CONTROL 0x00000000 /* MAC Configuration */ 32 #define XGMAC_FRAME_FILTER 0x00000004 /* MAC Frame Filter */ 33 #define XGMAC_FLOW_CTRL 0x00000018 /* MAC Flow Control */ 34 #define XGMAC_VLAN_TAG 0x0000001C /* VLAN Tags */ 35 #define XGMAC_VERSION 0x00000020 /* Version */ 36 #define XGMAC_VLAN_INCL 0x00000024 /* VLAN tag for tx frames */ 37 #define XGMAC_LPI_CTRL 0x00000028 /* LPI Control and Status */ 38 #define XGMAC_LPI_TIMER 0x0000002C /* LPI Timers Control */ 39 #define XGMAC_TX_PACE 0x00000030 /* Transmit Pace and Stretch */ 40 #define XGMAC_VLAN_HASH 0x00000034 /* VLAN Hash Table */ 41 #define XGMAC_DEBUG 0x00000038 /* Debug */ 42 #define XGMAC_INT_STAT 0x0000003C /* Interrupt and Control */ 43 #define XGMAC_ADDR_HIGH(reg) (0x00000040 + ((reg) * 8)) 44 #define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8)) 45 #define XGMAC_HASH(n) (0x00000300 + (n) * 4) /* HASH table regs */ 46 #define XGMAC_NUM_HASH 16 47 #define XGMAC_OMR 0x00000400 48 #define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */ 49 #define XGMAC_PMT 0x00000704 /* PMT Control and Status */ 50 #define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */ 51 #define XGMAC_MMC_INTR_RX 0x00000804 /* Recieve Interrupt */ 52 #define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */ 53 #define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Recieve Interrupt Mask */ 54 #define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */ 55 56 /* Hardware TX Statistics Counters */ 57 #define XGMAC_MMC_TXOCTET_GB_LO 0x00000814 58 #define XGMAC_MMC_TXOCTET_GB_HI 0x00000818 59 #define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C 60 #define XGMAC_MMC_TXFRAME_GB_HI 0x00000820 61 #define XGMAC_MMC_TXBCFRAME_G 0x00000824 62 #define XGMAC_MMC_TXMCFRAME_G 0x0000082C 63 #define XGMAC_MMC_TXUCFRAME_GB 0x00000864 64 #define XGMAC_MMC_TXMCFRAME_GB 0x0000086C 65 #define XGMAC_MMC_TXBCFRAME_GB 0x00000874 66 #define XGMAC_MMC_TXUNDERFLOW 0x0000087C 67 #define XGMAC_MMC_TXOCTET_G_LO 0x00000884 68 #define XGMAC_MMC_TXOCTET_G_HI 0x00000888 69 #define XGMAC_MMC_TXFRAME_G_LO 0x0000088C 70 #define XGMAC_MMC_TXFRAME_G_HI 0x00000890 71 #define XGMAC_MMC_TXPAUSEFRAME 0x00000894 72 #define XGMAC_MMC_TXVLANFRAME 0x0000089C 73 74 /* Hardware RX Statistics Counters */ 75 #define XGMAC_MMC_RXFRAME_GB_LO 0x00000900 76 #define XGMAC_MMC_RXFRAME_GB_HI 0x00000904 77 #define XGMAC_MMC_RXOCTET_GB_LO 0x00000908 78 #define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C 79 #define XGMAC_MMC_RXOCTET_G_LO 0x00000910 80 #define XGMAC_MMC_RXOCTET_G_HI 0x00000914 81 #define XGMAC_MMC_RXBCFRAME_G 0x00000918 82 #define XGMAC_MMC_RXMCFRAME_G 0x00000920 83 #define XGMAC_MMC_RXCRCERR 0x00000928 84 #define XGMAC_MMC_RXRUNT 0x00000930 85 #define XGMAC_MMC_RXJABBER 0x00000934 86 #define XGMAC_MMC_RXUCFRAME_G 0x00000970 87 #define XGMAC_MMC_RXLENGTHERR 0x00000978 88 #define XGMAC_MMC_RXPAUSEFRAME 0x00000988 89 #define XGMAC_MMC_RXOVERFLOW 0x00000990 90 #define XGMAC_MMC_RXVLANFRAME 0x00000998 91 #define XGMAC_MMC_RXWATCHDOG 0x000009a0 92 93 /* DMA Control and Status Registers */ 94 #define XGMAC_DMA_BUS_MODE 0x00000f00 /* Bus Mode */ 95 #define XGMAC_DMA_TX_POLL 0x00000f04 /* Transmit Poll Demand */ 96 #define XGMAC_DMA_RX_POLL 0x00000f08 /* Received Poll Demand */ 97 #define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c /* Receive List Base */ 98 #define XGMAC_DMA_TX_BASE_ADDR 0x00000f10 /* Transmit List Base */ 99 #define XGMAC_DMA_STATUS 0x00000f14 /* Status Register */ 100 #define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */ 101 #define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */ 102 #define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20 /* Missed Frame Counter */ 103 #define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24 /* RX Intr Watchdog Timer */ 104 #define XGMAC_DMA_AXI_BUS 0x00000f28 /* AXI Bus Mode */ 105 #define XGMAC_DMA_AXI_STATUS 0x00000f2C /* AXI Status */ 106 #define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */ 107 108 #define XGMAC_ADDR_AE 0x80000000 109 110 /* PMT Control and Status */ 111 #define XGMAC_PMT_POINTER_RESET 0x80000000 112 #define XGMAC_PMT_GLBL_UNICAST 0x00000200 113 #define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040 114 #define XGMAC_PMT_MAGIC_PKT 0x00000020 115 #define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004 116 #define XGMAC_PMT_MAGIC_PKT_EN 0x00000002 117 #define XGMAC_PMT_POWERDOWN 0x00000001 118 119 #define XGMAC_CONTROL_SPD 0x40000000 /* Speed control */ 120 #define XGMAC_CONTROL_SPD_MASK 0x60000000 121 #define XGMAC_CONTROL_SPD_1G 0x60000000 122 #define XGMAC_CONTROL_SPD_2_5G 0x40000000 123 #define XGMAC_CONTROL_SPD_10G 0x00000000 124 #define XGMAC_CONTROL_SARC 0x10000000 /* Source Addr Insert/Replace */ 125 #define XGMAC_CONTROL_SARK_MASK 0x18000000 126 #define XGMAC_CONTROL_CAR 0x04000000 /* CRC Addition/Replacement */ 127 #define XGMAC_CONTROL_CAR_MASK 0x06000000 128 #define XGMAC_CONTROL_DP 0x01000000 /* Disable Padding */ 129 #define XGMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on rx */ 130 #define XGMAC_CONTROL_JD 0x00400000 /* Jabber disable */ 131 #define XGMAC_CONTROL_JE 0x00100000 /* Jumbo frame */ 132 #define XGMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ 133 #define XGMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ 134 #define XGMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Strip */ 135 #define XGMAC_CONTROL_DDIC 0x00000010 /* Disable Deficit Idle Count */ 136 #define XGMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ 137 #define XGMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ 138 139 /* XGMAC Frame Filter defines */ 140 #define XGMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ 141 #define XGMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ 142 #define XGMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ 143 #define XGMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ 144 #define XGMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ 145 #define XGMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ 146 #define XGMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ 147 #define XGMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ 148 #define XGMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ 149 #define XGMAC_FRAME_FILTER_VHF 0x00000800 /* VLAN Hash Filter */ 150 #define XGMAC_FRAME_FILTER_VPF 0x00001000 /* VLAN Perfect Filter */ 151 #define XGMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ 152 153 /* XGMAC FLOW CTRL defines */ 154 #define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ 155 #define XGMAC_FLOW_CTRL_PT_SHIFT 16 156 #define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */ 157 #define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshhold */ 158 #define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */ 159 #define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */ 160 #define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ 161 #define XGMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ 162 #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ 163 164 /* XGMAC_INT_STAT reg */ 165 #define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */ 166 #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ 167 #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ 168 169 /* DMA Bus Mode register defines */ 170 #define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ 171 #define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ 172 #define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ 173 #define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */ 174 175 /* Programmable burst length */ 176 #define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ 177 #define DMA_BUS_MODE_PBL_SHIFT 8 178 #define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ 179 #define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ 180 #define DMA_BUS_MODE_RPBL_SHIFT 17 181 #define DMA_BUS_MODE_USP 0x00800000 182 #define DMA_BUS_MODE_8PBL 0x01000000 183 #define DMA_BUS_MODE_AAL 0x02000000 184 185 /* DMA Bus Mode register defines */ 186 #define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */ 187 #define DMA_BUS_PR_RATIO_SHIFT 14 188 #define DMA_BUS_FB 0x00010000 /* Fixed Burst */ 189 190 /* DMA Control register defines */ 191 #define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ 192 #define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ 193 #define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */ 194 #define DMA_CONTROL_OSF 0x00000004 /* Operate on 2nd tx frame */ 195 196 /* DMA Normal interrupt */ 197 #define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */ 198 #define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */ 199 #define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */ 200 #define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */ 201 #define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */ 202 #define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */ 203 #define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */ 204 #define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */ 205 #define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */ 206 #define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */ 207 #define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */ 208 #define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */ 209 #define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavail */ 210 #define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */ 211 #define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */ 212 213 #define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \ 214 DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE) 215 216 #define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \ 217 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \ 218 DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \ 219 DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \ 220 DMA_INTR_ENA_TSE) 221 222 /* DMA default interrupt mask */ 223 #define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL) 224 225 /* DMA Status register defines */ 226 #define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */ 227 #define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */ 228 #define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */ 229 #define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */ 230 #define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */ 231 #define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */ 232 #define DMA_STATUS_TS_SHIFT 20 233 #define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */ 234 #define DMA_STATUS_RS_SHIFT 17 235 #define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */ 236 #define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */ 237 #define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */ 238 #define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */ 239 #define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */ 240 #define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */ 241 #define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */ 242 #define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */ 243 #define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */ 244 #define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */ 245 #define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */ 246 #define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */ 247 #define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavail */ 248 #define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ 249 #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ 250 251 /* Common MAC defines */ 252 #define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ 253 #define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */ 254 255 /* XGMAC Operation Mode Register */ 256 #define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */ 257 #define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */ 258 #define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshhold Ctrl */ 259 #define XGMAC_OMR_TTC_MASK 0x00030000 260 #define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshhold */ 261 #define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshhold MASK */ 262 #define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshhold */ 263 #define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshhold MASK */ 264 #define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */ 265 #define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */ 266 #define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */ 267 #define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */ 268 #define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshhold Ctrl */ 269 #define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */ 270 271 /* XGMAC HW Features Register */ 272 #define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */ 273 274 #define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008 275 276 /* XGMAC Descriptor Defines */ 277 #define MAX_DESC_BUF_SZ (0x2000 - 8) 278 279 #define RXDESC_EXT_STATUS 0x00000001 280 #define RXDESC_CRC_ERR 0x00000002 281 #define RXDESC_RX_ERR 0x00000008 282 #define RXDESC_RX_WDOG 0x00000010 283 #define RXDESC_FRAME_TYPE 0x00000020 284 #define RXDESC_GIANT_FRAME 0x00000080 285 #define RXDESC_LAST_SEG 0x00000100 286 #define RXDESC_FIRST_SEG 0x00000200 287 #define RXDESC_VLAN_FRAME 0x00000400 288 #define RXDESC_OVERFLOW_ERR 0x00000800 289 #define RXDESC_LENGTH_ERR 0x00001000 290 #define RXDESC_SA_FILTER_FAIL 0x00002000 291 #define RXDESC_DESCRIPTOR_ERR 0x00004000 292 #define RXDESC_ERROR_SUMMARY 0x00008000 293 #define RXDESC_FRAME_LEN_OFFSET 16 294 #define RXDESC_FRAME_LEN_MASK 0x3fff0000 295 #define RXDESC_DA_FILTER_FAIL 0x40000000 296 297 #define RXDESC1_END_RING 0x00008000 298 299 #define RXDESC_IP_PAYLOAD_MASK 0x00000003 300 #define RXDESC_IP_PAYLOAD_UDP 0x00000001 301 #define RXDESC_IP_PAYLOAD_TCP 0x00000002 302 #define RXDESC_IP_PAYLOAD_ICMP 0x00000003 303 #define RXDESC_IP_HEADER_ERR 0x00000008 304 #define RXDESC_IP_PAYLOAD_ERR 0x00000010 305 #define RXDESC_IPV4_PACKET 0x00000040 306 #define RXDESC_IPV6_PACKET 0x00000080 307 #define TXDESC_UNDERFLOW_ERR 0x00000001 308 #define TXDESC_JABBER_TIMEOUT 0x00000002 309 #define TXDESC_LOCAL_FAULT 0x00000004 310 #define TXDESC_REMOTE_FAULT 0x00000008 311 #define TXDESC_VLAN_FRAME 0x00000010 312 #define TXDESC_FRAME_FLUSHED 0x00000020 313 #define TXDESC_IP_HEADER_ERR 0x00000040 314 #define TXDESC_PAYLOAD_CSUM_ERR 0x00000080 315 #define TXDESC_ERROR_SUMMARY 0x00008000 316 #define TXDESC_SA_CTRL_INSERT 0x00040000 317 #define TXDESC_SA_CTRL_REPLACE 0x00080000 318 #define TXDESC_2ND_ADDR_CHAINED 0x00100000 319 #define TXDESC_END_RING 0x00200000 320 #define TXDESC_CSUM_IP 0x00400000 321 #define TXDESC_CSUM_IP_PAYLD 0x00800000 322 #define TXDESC_CSUM_ALL 0x00C00000 323 #define TXDESC_CRC_EN_REPLACE 0x01000000 324 #define TXDESC_CRC_EN_APPEND 0x02000000 325 #define TXDESC_DISABLE_PAD 0x04000000 326 #define TXDESC_FIRST_SEG 0x10000000 327 #define TXDESC_LAST_SEG 0x20000000 328 #define TXDESC_INTERRUPT 0x40000000 329 330 #define DESC_OWN 0x80000000 331 #define DESC_BUFFER1_SZ_MASK 0x00001fff 332 #define DESC_BUFFER2_SZ_MASK 0x1fff0000 333 #define DESC_BUFFER2_SZ_OFFSET 16 334 335 struct xgmac_dma_desc { 336 __le32 flags; 337 __le32 buf_size; 338 __le32 buf1_addr; /* Buffer 1 Address Pointer */ 339 __le32 buf2_addr; /* Buffer 2 Address Pointer */ 340 __le32 ext_status; 341 __le32 res[3]; 342 }; 343 344 struct xgmac_extra_stats { 345 /* Transmit errors */ 346 unsigned long tx_jabber; 347 unsigned long tx_frame_flushed; 348 unsigned long tx_payload_error; 349 unsigned long tx_ip_header_error; 350 unsigned long tx_local_fault; 351 unsigned long tx_remote_fault; 352 /* Receive errors */ 353 unsigned long rx_watchdog; 354 unsigned long rx_da_filter_fail; 355 unsigned long rx_payload_error; 356 unsigned long rx_ip_header_error; 357 /* Tx/Rx IRQ errors */ 358 unsigned long tx_process_stopped; 359 unsigned long rx_buf_unav; 360 unsigned long rx_process_stopped; 361 unsigned long tx_early; 362 unsigned long fatal_bus_error; 363 }; 364 365 struct xgmac_priv { 366 struct xgmac_dma_desc *dma_rx; 367 struct sk_buff **rx_skbuff; 368 unsigned int rx_tail; 369 unsigned int rx_head; 370 371 struct xgmac_dma_desc *dma_tx; 372 struct sk_buff **tx_skbuff; 373 unsigned int tx_head; 374 unsigned int tx_tail; 375 int tx_irq_cnt; 376 377 void __iomem *base; 378 unsigned int dma_buf_sz; 379 dma_addr_t dma_rx_phy; 380 dma_addr_t dma_tx_phy; 381 382 struct net_device *dev; 383 struct device *device; 384 struct napi_struct napi; 385 386 int max_macs; 387 struct xgmac_extra_stats xstats; 388 389 spinlock_t stats_lock; 390 int pmt_irq; 391 char rx_pause; 392 char tx_pause; 393 int wolopts; 394 struct work_struct tx_timeout_work; 395 }; 396 397 /* XGMAC Configuration Settings */ 398 #define MAX_MTU 9000 399 #define PAUSE_TIME 0x400 400 401 #define DMA_RX_RING_SZ 256 402 #define DMA_TX_RING_SZ 128 403 /* minimum number of free TX descriptors required to wake up TX process */ 404 #define TX_THRESH (DMA_TX_RING_SZ/4) 405 406 /* DMA descriptor ring helpers */ 407 #define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1)) 408 #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) 409 #define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) 410 411 #define tx_dma_ring_space(p) \ 412 dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ) 413 414 /* XGMAC Descriptor Access Helpers */ 415 static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) 416 { 417 if (buf_sz > MAX_DESC_BUF_SZ) 418 p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ | 419 (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET); 420 else 421 p->buf_size = cpu_to_le32(buf_sz); 422 } 423 424 static inline int desc_get_buf_len(struct xgmac_dma_desc *p) 425 { 426 u32 len = le32_to_cpu(p->buf_size); 427 return (len & DESC_BUFFER1_SZ_MASK) + 428 ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); 429 } 430 431 static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size, 432 int buf_sz) 433 { 434 struct xgmac_dma_desc *end = p + ring_size - 1; 435 436 memset(p, 0, sizeof(*p) * ring_size); 437 438 for (; p <= end; p++) 439 desc_set_buf_len(p, buf_sz); 440 441 end->buf_size |= cpu_to_le32(RXDESC1_END_RING); 442 } 443 444 static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size) 445 { 446 memset(p, 0, sizeof(*p) * ring_size); 447 p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING); 448 } 449 450 static inline int desc_get_owner(struct xgmac_dma_desc *p) 451 { 452 return le32_to_cpu(p->flags) & DESC_OWN; 453 } 454 455 static inline void desc_set_rx_owner(struct xgmac_dma_desc *p) 456 { 457 /* Clear all fields and set the owner */ 458 p->flags = cpu_to_le32(DESC_OWN); 459 } 460 461 static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags) 462 { 463 u32 tmpflags = le32_to_cpu(p->flags); 464 tmpflags &= TXDESC_END_RING; 465 tmpflags |= flags | DESC_OWN; 466 p->flags = cpu_to_le32(tmpflags); 467 } 468 469 static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p) 470 { 471 u32 tmpflags = le32_to_cpu(p->flags); 472 tmpflags &= TXDESC_END_RING; 473 p->flags = cpu_to_le32(tmpflags); 474 } 475 476 static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) 477 { 478 return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; 479 } 480 481 static inline int desc_get_tx_fs(struct xgmac_dma_desc *p) 482 { 483 return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG; 484 } 485 486 static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) 487 { 488 return le32_to_cpu(p->buf1_addr); 489 } 490 491 static inline void desc_set_buf_addr(struct xgmac_dma_desc *p, 492 u32 paddr, int len) 493 { 494 p->buf1_addr = cpu_to_le32(paddr); 495 if (len > MAX_DESC_BUF_SZ) 496 p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ); 497 } 498 499 static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p, 500 u32 paddr, int len) 501 { 502 desc_set_buf_len(p, len); 503 desc_set_buf_addr(p, paddr, len); 504 } 505 506 static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p) 507 { 508 u32 data = le32_to_cpu(p->flags); 509 u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET; 510 if (data & RXDESC_FRAME_TYPE) 511 len -= ETH_FCS_LEN; 512 513 return len; 514 } 515 516 static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr) 517 { 518 int timeout = 1000; 519 u32 reg = readl(ioaddr + XGMAC_OMR); 520 writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR); 521 522 while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF) 523 udelay(1); 524 } 525 526 static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p) 527 { 528 struct xgmac_extra_stats *x = &priv->xstats; 529 u32 status = le32_to_cpu(p->flags); 530 531 if (!(status & TXDESC_ERROR_SUMMARY)) 532 return 0; 533 534 netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status); 535 if (status & TXDESC_JABBER_TIMEOUT) 536 x->tx_jabber++; 537 if (status & TXDESC_FRAME_FLUSHED) 538 x->tx_frame_flushed++; 539 if (status & TXDESC_UNDERFLOW_ERR) 540 xgmac_dma_flush_tx_fifo(priv->base); 541 if (status & TXDESC_IP_HEADER_ERR) 542 x->tx_ip_header_error++; 543 if (status & TXDESC_LOCAL_FAULT) 544 x->tx_local_fault++; 545 if (status & TXDESC_REMOTE_FAULT) 546 x->tx_remote_fault++; 547 if (status & TXDESC_PAYLOAD_CSUM_ERR) 548 x->tx_payload_error++; 549 550 return -1; 551 } 552 553 static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p) 554 { 555 struct xgmac_extra_stats *x = &priv->xstats; 556 int ret = CHECKSUM_UNNECESSARY; 557 u32 status = le32_to_cpu(p->flags); 558 u32 ext_status = le32_to_cpu(p->ext_status); 559 560 if (status & RXDESC_DA_FILTER_FAIL) { 561 netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n"); 562 x->rx_da_filter_fail++; 563 return -1; 564 } 565 566 /* All frames should fit into a single buffer */ 567 if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG)) 568 return -1; 569 570 /* Check if packet has checksum already */ 571 if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) && 572 !(ext_status & RXDESC_IP_PAYLOAD_MASK)) 573 ret = CHECKSUM_NONE; 574 575 netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n", 576 (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status); 577 578 if (!(status & RXDESC_ERROR_SUMMARY)) 579 return ret; 580 581 /* Handle any errors */ 582 if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR | 583 RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR)) 584 return -1; 585 586 if (status & RXDESC_EXT_STATUS) { 587 if (ext_status & RXDESC_IP_HEADER_ERR) 588 x->rx_ip_header_error++; 589 if (ext_status & RXDESC_IP_PAYLOAD_ERR) 590 x->rx_payload_error++; 591 netdev_dbg(priv->dev, "IP checksum error - stat %08x\n", 592 ext_status); 593 return CHECKSUM_NONE; 594 } 595 596 return ret; 597 } 598 599 static inline void xgmac_mac_enable(void __iomem *ioaddr) 600 { 601 u32 value = readl(ioaddr + XGMAC_CONTROL); 602 value |= MAC_ENABLE_RX | MAC_ENABLE_TX; 603 writel(value, ioaddr + XGMAC_CONTROL); 604 605 value = readl(ioaddr + XGMAC_DMA_CONTROL); 606 value |= DMA_CONTROL_ST | DMA_CONTROL_SR; 607 writel(value, ioaddr + XGMAC_DMA_CONTROL); 608 } 609 610 static inline void xgmac_mac_disable(void __iomem *ioaddr) 611 { 612 u32 value = readl(ioaddr + XGMAC_DMA_CONTROL); 613 value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR); 614 writel(value, ioaddr + XGMAC_DMA_CONTROL); 615 616 value = readl(ioaddr + XGMAC_CONTROL); 617 value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX); 618 writel(value, ioaddr + XGMAC_CONTROL); 619 } 620 621 static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr, 622 int num) 623 { 624 u32 data; 625 626 if (addr) { 627 data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); 628 writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); 629 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; 630 writel(data, ioaddr + XGMAC_ADDR_LOW(num)); 631 } else { 632 writel(0, ioaddr + XGMAC_ADDR_HIGH(num)); 633 writel(0, ioaddr + XGMAC_ADDR_LOW(num)); 634 } 635 } 636 637 static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, 638 int num) 639 { 640 u32 hi_addr, lo_addr; 641 642 /* Read the MAC address from the hardware */ 643 hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num)); 644 lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num)); 645 646 /* Extract the MAC address from the high and low words */ 647 addr[0] = lo_addr & 0xff; 648 addr[1] = (lo_addr >> 8) & 0xff; 649 addr[2] = (lo_addr >> 16) & 0xff; 650 addr[3] = (lo_addr >> 24) & 0xff; 651 addr[4] = hi_addr & 0xff; 652 addr[5] = (hi_addr >> 8) & 0xff; 653 } 654 655 static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx) 656 { 657 u32 reg; 658 unsigned int flow = 0; 659 660 priv->rx_pause = rx; 661 priv->tx_pause = tx; 662 663 if (rx || tx) { 664 if (rx) 665 flow |= XGMAC_FLOW_CTRL_RFE; 666 if (tx) 667 flow |= XGMAC_FLOW_CTRL_TFE; 668 669 flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP; 670 flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT); 671 672 writel(flow, priv->base + XGMAC_FLOW_CTRL); 673 674 reg = readl(priv->base + XGMAC_OMR); 675 reg |= XGMAC_OMR_EFC; 676 writel(reg, priv->base + XGMAC_OMR); 677 } else { 678 writel(0, priv->base + XGMAC_FLOW_CTRL); 679 680 reg = readl(priv->base + XGMAC_OMR); 681 reg &= ~XGMAC_OMR_EFC; 682 writel(reg, priv->base + XGMAC_OMR); 683 } 684 685 return 0; 686 } 687 688 static void xgmac_rx_refill(struct xgmac_priv *priv) 689 { 690 struct xgmac_dma_desc *p; 691 dma_addr_t paddr; 692 int bufsz = priv->dev->mtu + ETH_HLEN + ETH_FCS_LEN; 693 694 while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) { 695 int entry = priv->rx_head; 696 struct sk_buff *skb; 697 698 p = priv->dma_rx + entry; 699 700 if (priv->rx_skbuff[entry] == NULL) { 701 skb = netdev_alloc_skb_ip_align(priv->dev, bufsz); 702 if (unlikely(skb == NULL)) 703 break; 704 705 paddr = dma_map_single(priv->device, skb->data, 706 priv->dma_buf_sz - NET_IP_ALIGN, 707 DMA_FROM_DEVICE); 708 if (dma_mapping_error(priv->device, paddr)) { 709 dev_kfree_skb_any(skb); 710 break; 711 } 712 priv->rx_skbuff[entry] = skb; 713 desc_set_buf_addr(p, paddr, priv->dma_buf_sz); 714 } 715 716 netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n", 717 priv->rx_head, priv->rx_tail); 718 719 priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ); 720 desc_set_rx_owner(p); 721 } 722 } 723 724 /** 725 * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings 726 * @dev: net device structure 727 * Description: this function initializes the DMA RX/TX descriptors 728 * and allocates the socket buffers. 729 */ 730 static int xgmac_dma_desc_rings_init(struct net_device *dev) 731 { 732 struct xgmac_priv *priv = netdev_priv(dev); 733 unsigned int bfsize; 734 735 /* Set the Buffer size according to the MTU; 736 * The total buffer size including any IP offset must be a multiple 737 * of 8 bytes. 738 */ 739 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8); 740 741 netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize); 742 743 priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ, 744 GFP_KERNEL); 745 if (!priv->rx_skbuff) 746 return -ENOMEM; 747 748 priv->dma_rx = dma_alloc_coherent(priv->device, 749 DMA_RX_RING_SZ * 750 sizeof(struct xgmac_dma_desc), 751 &priv->dma_rx_phy, 752 GFP_KERNEL); 753 if (!priv->dma_rx) 754 goto err_dma_rx; 755 756 priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ, 757 GFP_KERNEL); 758 if (!priv->tx_skbuff) 759 goto err_tx_skb; 760 761 priv->dma_tx = dma_alloc_coherent(priv->device, 762 DMA_TX_RING_SZ * 763 sizeof(struct xgmac_dma_desc), 764 &priv->dma_tx_phy, 765 GFP_KERNEL); 766 if (!priv->dma_tx) 767 goto err_dma_tx; 768 769 netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, " 770 "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", 771 priv->dma_rx, priv->dma_tx, 772 (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy); 773 774 priv->rx_tail = 0; 775 priv->rx_head = 0; 776 priv->dma_buf_sz = bfsize; 777 desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz); 778 xgmac_rx_refill(priv); 779 780 priv->tx_tail = 0; 781 priv->tx_head = 0; 782 desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); 783 784 writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR); 785 writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR); 786 787 return 0; 788 789 err_dma_tx: 790 kfree(priv->tx_skbuff); 791 err_tx_skb: 792 dma_free_coherent(priv->device, 793 DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), 794 priv->dma_rx, priv->dma_rx_phy); 795 err_dma_rx: 796 kfree(priv->rx_skbuff); 797 return -ENOMEM; 798 } 799 800 static void xgmac_free_rx_skbufs(struct xgmac_priv *priv) 801 { 802 int i; 803 struct xgmac_dma_desc *p; 804 805 if (!priv->rx_skbuff) 806 return; 807 808 for (i = 0; i < DMA_RX_RING_SZ; i++) { 809 struct sk_buff *skb = priv->rx_skbuff[i]; 810 if (skb == NULL) 811 continue; 812 813 p = priv->dma_rx + i; 814 dma_unmap_single(priv->device, desc_get_buf_addr(p), 815 priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); 816 dev_kfree_skb_any(skb); 817 priv->rx_skbuff[i] = NULL; 818 } 819 } 820 821 static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) 822 { 823 int i; 824 struct xgmac_dma_desc *p; 825 826 if (!priv->tx_skbuff) 827 return; 828 829 for (i = 0; i < DMA_TX_RING_SZ; i++) { 830 if (priv->tx_skbuff[i] == NULL) 831 continue; 832 833 p = priv->dma_tx + i; 834 if (desc_get_tx_fs(p)) 835 dma_unmap_single(priv->device, desc_get_buf_addr(p), 836 desc_get_buf_len(p), DMA_TO_DEVICE); 837 else 838 dma_unmap_page(priv->device, desc_get_buf_addr(p), 839 desc_get_buf_len(p), DMA_TO_DEVICE); 840 841 if (desc_get_tx_ls(p)) 842 dev_kfree_skb_any(priv->tx_skbuff[i]); 843 priv->tx_skbuff[i] = NULL; 844 } 845 } 846 847 static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv) 848 { 849 /* Release the DMA TX/RX socket buffers */ 850 xgmac_free_rx_skbufs(priv); 851 xgmac_free_tx_skbufs(priv); 852 853 /* Free the consistent memory allocated for descriptor rings */ 854 if (priv->dma_tx) { 855 dma_free_coherent(priv->device, 856 DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc), 857 priv->dma_tx, priv->dma_tx_phy); 858 priv->dma_tx = NULL; 859 } 860 if (priv->dma_rx) { 861 dma_free_coherent(priv->device, 862 DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), 863 priv->dma_rx, priv->dma_rx_phy); 864 priv->dma_rx = NULL; 865 } 866 kfree(priv->rx_skbuff); 867 priv->rx_skbuff = NULL; 868 kfree(priv->tx_skbuff); 869 priv->tx_skbuff = NULL; 870 } 871 872 /** 873 * xgmac_tx: 874 * @priv: private driver structure 875 * Description: it reclaims resources after transmission completes. 876 */ 877 static void xgmac_tx_complete(struct xgmac_priv *priv) 878 { 879 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { 880 unsigned int entry = priv->tx_tail; 881 struct sk_buff *skb = priv->tx_skbuff[entry]; 882 struct xgmac_dma_desc *p = priv->dma_tx + entry; 883 884 /* Check if the descriptor is owned by the DMA. */ 885 if (desc_get_owner(p)) 886 break; 887 888 netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", 889 priv->tx_head, priv->tx_tail); 890 891 if (desc_get_tx_fs(p)) 892 dma_unmap_single(priv->device, desc_get_buf_addr(p), 893 desc_get_buf_len(p), DMA_TO_DEVICE); 894 else 895 dma_unmap_page(priv->device, desc_get_buf_addr(p), 896 desc_get_buf_len(p), DMA_TO_DEVICE); 897 898 /* Check tx error on the last segment */ 899 if (desc_get_tx_ls(p)) { 900 desc_get_tx_status(priv, p); 901 dev_kfree_skb(skb); 902 } 903 904 priv->tx_skbuff[entry] = NULL; 905 priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); 906 } 907 908 /* Ensure tx_tail is visible to xgmac_xmit */ 909 smp_mb(); 910 if (unlikely(netif_queue_stopped(priv->dev) && 911 (tx_dma_ring_space(priv) > MAX_SKB_FRAGS))) 912 netif_wake_queue(priv->dev); 913 } 914 915 static void xgmac_tx_timeout_work(struct work_struct *work) 916 { 917 u32 reg, value; 918 struct xgmac_priv *priv = 919 container_of(work, struct xgmac_priv, tx_timeout_work); 920 921 napi_disable(&priv->napi); 922 923 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 924 925 netif_tx_lock(priv->dev); 926 927 reg = readl(priv->base + XGMAC_DMA_CONTROL); 928 writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); 929 do { 930 value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000; 931 } while (value && (value != 0x600000)); 932 933 xgmac_free_tx_skbufs(priv); 934 desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); 935 priv->tx_tail = 0; 936 priv->tx_head = 0; 937 writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR); 938 writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); 939 940 writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, 941 priv->base + XGMAC_DMA_STATUS); 942 943 netif_tx_unlock(priv->dev); 944 netif_wake_queue(priv->dev); 945 946 napi_enable(&priv->napi); 947 948 /* Enable interrupts */ 949 writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS); 950 writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); 951 } 952 953 static int xgmac_hw_init(struct net_device *dev) 954 { 955 u32 value, ctrl; 956 int limit; 957 struct xgmac_priv *priv = netdev_priv(dev); 958 void __iomem *ioaddr = priv->base; 959 960 /* Save the ctrl register value */ 961 ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK; 962 963 /* SW reset */ 964 value = DMA_BUS_MODE_SFT_RESET; 965 writel(value, ioaddr + XGMAC_DMA_BUS_MODE); 966 limit = 15000; 967 while (limit-- && 968 (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) 969 cpu_relax(); 970 if (limit < 0) 971 return -EBUSY; 972 973 value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) | 974 (0x10 << DMA_BUS_MODE_RPBL_SHIFT) | 975 DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; 976 writel(value, ioaddr + XGMAC_DMA_BUS_MODE); 977 978 writel(0, ioaddr + XGMAC_DMA_INTR_ENA); 979 980 /* Mask power mgt interrupt */ 981 writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); 982 983 /* XGMAC requires AXI bus init. This is a 'magic number' for now */ 984 writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); 985 986 ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS | 987 XGMAC_CONTROL_CAR; 988 if (dev->features & NETIF_F_RXCSUM) 989 ctrl |= XGMAC_CONTROL_IPC; 990 writel(ctrl, ioaddr + XGMAC_CONTROL); 991 992 writel(DMA_CONTROL_OSF, ioaddr + XGMAC_DMA_CONTROL); 993 994 /* Set the HW DMA mode and the COE */ 995 writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA | 996 XGMAC_OMR_RTC_256, 997 ioaddr + XGMAC_OMR); 998 999 /* Reset the MMC counters */ 1000 writel(1, ioaddr + XGMAC_MMC_CTRL); 1001 return 0; 1002 } 1003 1004 /** 1005 * xgmac_open - open entry point of the driver 1006 * @dev : pointer to the device structure. 1007 * Description: 1008 * This function is the open entry point of the driver. 1009 * Return value: 1010 * 0 on success and an appropriate (-)ve integer as defined in errno.h 1011 * file on failure. 1012 */ 1013 static int xgmac_open(struct net_device *dev) 1014 { 1015 int ret; 1016 struct xgmac_priv *priv = netdev_priv(dev); 1017 void __iomem *ioaddr = priv->base; 1018 1019 /* Check that the MAC address is valid. If its not, refuse 1020 * to bring the device up. The user must specify an 1021 * address using the following linux command: 1022 * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */ 1023 if (!is_valid_ether_addr(dev->dev_addr)) { 1024 eth_hw_addr_random(dev); 1025 netdev_dbg(priv->dev, "generated random MAC address %pM\n", 1026 dev->dev_addr); 1027 } 1028 1029 memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats)); 1030 1031 /* Initialize the XGMAC and descriptors */ 1032 xgmac_hw_init(dev); 1033 xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); 1034 xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause); 1035 1036 ret = xgmac_dma_desc_rings_init(dev); 1037 if (ret < 0) 1038 return ret; 1039 1040 /* Enable the MAC Rx/Tx */ 1041 xgmac_mac_enable(ioaddr); 1042 1043 napi_enable(&priv->napi); 1044 netif_start_queue(dev); 1045 1046 /* Enable interrupts */ 1047 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); 1048 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); 1049 1050 return 0; 1051 } 1052 1053 /** 1054 * xgmac_release - close entry point of the driver 1055 * @dev : device pointer. 1056 * Description: 1057 * This is the stop entry point of the driver. 1058 */ 1059 static int xgmac_stop(struct net_device *dev) 1060 { 1061 struct xgmac_priv *priv = netdev_priv(dev); 1062 1063 if (readl(priv->base + XGMAC_DMA_INTR_ENA)) 1064 napi_disable(&priv->napi); 1065 1066 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 1067 1068 netif_tx_disable(dev); 1069 1070 /* Disable the MAC core */ 1071 xgmac_mac_disable(priv->base); 1072 1073 /* Release and free the Rx/Tx resources */ 1074 xgmac_free_dma_desc_rings(priv); 1075 1076 return 0; 1077 } 1078 1079 /** 1080 * xgmac_xmit: 1081 * @skb : the socket buffer 1082 * @dev : device pointer 1083 * Description : Tx entry point of the driver. 1084 */ 1085 static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) 1086 { 1087 struct xgmac_priv *priv = netdev_priv(dev); 1088 unsigned int entry; 1089 int i; 1090 u32 irq_flag; 1091 int nfrags = skb_shinfo(skb)->nr_frags; 1092 struct xgmac_dma_desc *desc, *first; 1093 unsigned int desc_flags; 1094 unsigned int len; 1095 dma_addr_t paddr; 1096 1097 priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1); 1098 irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT; 1099 1100 desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ? 1101 TXDESC_CSUM_ALL : 0; 1102 entry = priv->tx_head; 1103 desc = priv->dma_tx + entry; 1104 first = desc; 1105 1106 len = skb_headlen(skb); 1107 paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); 1108 if (dma_mapping_error(priv->device, paddr)) { 1109 dev_kfree_skb(skb); 1110 return NETDEV_TX_OK; 1111 } 1112 priv->tx_skbuff[entry] = skb; 1113 desc_set_buf_addr_and_size(desc, paddr, len); 1114 1115 for (i = 0; i < nfrags; i++) { 1116 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1117 1118 len = frag->size; 1119 1120 paddr = skb_frag_dma_map(priv->device, frag, 0, len, 1121 DMA_TO_DEVICE); 1122 if (dma_mapping_error(priv->device, paddr)) 1123 goto dma_err; 1124 1125 entry = dma_ring_incr(entry, DMA_TX_RING_SZ); 1126 desc = priv->dma_tx + entry; 1127 priv->tx_skbuff[entry] = skb; 1128 1129 desc_set_buf_addr_and_size(desc, paddr, len); 1130 if (i < (nfrags - 1)) 1131 desc_set_tx_owner(desc, desc_flags); 1132 } 1133 1134 /* Interrupt on completition only for the latest segment */ 1135 if (desc != first) 1136 desc_set_tx_owner(desc, desc_flags | 1137 TXDESC_LAST_SEG | irq_flag); 1138 else 1139 desc_flags |= TXDESC_LAST_SEG | irq_flag; 1140 1141 /* Set owner on first desc last to avoid race condition */ 1142 wmb(); 1143 desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); 1144 1145 writel(1, priv->base + XGMAC_DMA_TX_POLL); 1146 1147 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); 1148 1149 /* Ensure tx_head update is visible to tx completion */ 1150 smp_mb(); 1151 if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) { 1152 netif_stop_queue(dev); 1153 /* Ensure netif_stop_queue is visible to tx completion */ 1154 smp_mb(); 1155 if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS) 1156 netif_start_queue(dev); 1157 } 1158 return NETDEV_TX_OK; 1159 1160 dma_err: 1161 entry = priv->tx_head; 1162 for ( ; i > 0; i--) { 1163 entry = dma_ring_incr(entry, DMA_TX_RING_SZ); 1164 desc = priv->dma_tx + entry; 1165 priv->tx_skbuff[entry] = NULL; 1166 dma_unmap_page(priv->device, desc_get_buf_addr(desc), 1167 desc_get_buf_len(desc), DMA_TO_DEVICE); 1168 desc_clear_tx_owner(desc); 1169 } 1170 desc = first; 1171 dma_unmap_single(priv->device, desc_get_buf_addr(desc), 1172 desc_get_buf_len(desc), DMA_TO_DEVICE); 1173 dev_kfree_skb(skb); 1174 return NETDEV_TX_OK; 1175 } 1176 1177 static int xgmac_rx(struct xgmac_priv *priv, int limit) 1178 { 1179 unsigned int entry; 1180 unsigned int count = 0; 1181 struct xgmac_dma_desc *p; 1182 1183 while (count < limit) { 1184 int ip_checksum; 1185 struct sk_buff *skb; 1186 int frame_len; 1187 1188 if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ)) 1189 break; 1190 1191 entry = priv->rx_tail; 1192 p = priv->dma_rx + entry; 1193 if (desc_get_owner(p)) 1194 break; 1195 1196 count++; 1197 priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ); 1198 1199 /* read the status of the incoming frame */ 1200 ip_checksum = desc_get_rx_status(priv, p); 1201 if (ip_checksum < 0) 1202 continue; 1203 1204 skb = priv->rx_skbuff[entry]; 1205 if (unlikely(!skb)) { 1206 netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n"); 1207 break; 1208 } 1209 priv->rx_skbuff[entry] = NULL; 1210 1211 frame_len = desc_get_rx_frame_len(p); 1212 netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n", 1213 frame_len, ip_checksum); 1214 1215 skb_put(skb, frame_len); 1216 dma_unmap_single(priv->device, desc_get_buf_addr(p), 1217 priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); 1218 1219 skb->protocol = eth_type_trans(skb, priv->dev); 1220 skb->ip_summed = ip_checksum; 1221 if (ip_checksum == CHECKSUM_NONE) 1222 netif_receive_skb(skb); 1223 else 1224 napi_gro_receive(&priv->napi, skb); 1225 } 1226 1227 xgmac_rx_refill(priv); 1228 1229 return count; 1230 } 1231 1232 /** 1233 * xgmac_poll - xgmac poll method (NAPI) 1234 * @napi : pointer to the napi structure. 1235 * @budget : maximum number of packets that the current CPU can receive from 1236 * all interfaces. 1237 * Description : 1238 * This function implements the the reception process. 1239 * Also it runs the TX completion thread 1240 */ 1241 static int xgmac_poll(struct napi_struct *napi, int budget) 1242 { 1243 struct xgmac_priv *priv = container_of(napi, 1244 struct xgmac_priv, napi); 1245 int work_done = 0; 1246 1247 xgmac_tx_complete(priv); 1248 work_done = xgmac_rx(priv, budget); 1249 1250 if (work_done < budget) { 1251 napi_complete(napi); 1252 __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); 1253 } 1254 return work_done; 1255 } 1256 1257 /** 1258 * xgmac_tx_timeout 1259 * @dev : Pointer to net device structure 1260 * Description: this function is called when a packet transmission fails to 1261 * complete within a reasonable tmrate. The driver will mark the error in the 1262 * netdev structure and arrange for the device to be reset to a sane state 1263 * in order to transmit a new packet. 1264 */ 1265 static void xgmac_tx_timeout(struct net_device *dev) 1266 { 1267 struct xgmac_priv *priv = netdev_priv(dev); 1268 schedule_work(&priv->tx_timeout_work); 1269 } 1270 1271 /** 1272 * xgmac_set_rx_mode - entry point for multicast addressing 1273 * @dev : pointer to the device structure 1274 * Description: 1275 * This function is a driver entry point which gets called by the kernel 1276 * whenever multicast addresses must be enabled/disabled. 1277 * Return value: 1278 * void. 1279 */ 1280 static void xgmac_set_rx_mode(struct net_device *dev) 1281 { 1282 int i; 1283 struct xgmac_priv *priv = netdev_priv(dev); 1284 void __iomem *ioaddr = priv->base; 1285 unsigned int value = 0; 1286 u32 hash_filter[XGMAC_NUM_HASH]; 1287 int reg = 1; 1288 struct netdev_hw_addr *ha; 1289 bool use_hash = false; 1290 1291 netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n", 1292 netdev_mc_count(dev), netdev_uc_count(dev)); 1293 1294 if (dev->flags & IFF_PROMISC) 1295 value |= XGMAC_FRAME_FILTER_PR; 1296 1297 memset(hash_filter, 0, sizeof(hash_filter)); 1298 1299 if (netdev_uc_count(dev) > priv->max_macs) { 1300 use_hash = true; 1301 value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF; 1302 } 1303 netdev_for_each_uc_addr(ha, dev) { 1304 if (use_hash) { 1305 u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23; 1306 1307 /* The most significant 4 bits determine the register to 1308 * use (H/L) while the other 5 bits determine the bit 1309 * within the register. */ 1310 hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 1311 } else { 1312 xgmac_set_mac_addr(ioaddr, ha->addr, reg); 1313 reg++; 1314 } 1315 } 1316 1317 if (dev->flags & IFF_ALLMULTI) { 1318 value |= XGMAC_FRAME_FILTER_PM; 1319 goto out; 1320 } 1321 1322 if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) { 1323 use_hash = true; 1324 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; 1325 } else { 1326 use_hash = false; 1327 } 1328 netdev_for_each_mc_addr(ha, dev) { 1329 if (use_hash) { 1330 u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23; 1331 1332 /* The most significant 4 bits determine the register to 1333 * use (H/L) while the other 5 bits determine the bit 1334 * within the register. */ 1335 hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 1336 } else { 1337 xgmac_set_mac_addr(ioaddr, ha->addr, reg); 1338 reg++; 1339 } 1340 } 1341 1342 out: 1343 for (i = reg; i <= priv->max_macs; i++) 1344 xgmac_set_mac_addr(ioaddr, NULL, i); 1345 for (i = 0; i < XGMAC_NUM_HASH; i++) 1346 writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); 1347 1348 writel(value, ioaddr + XGMAC_FRAME_FILTER); 1349 } 1350 1351 /** 1352 * xgmac_change_mtu - entry point to change MTU size for the device. 1353 * @dev : device pointer. 1354 * @new_mtu : the new MTU size for the device. 1355 * Description: the Maximum Transfer Unit (MTU) is used by the network layer 1356 * to drive packet transmission. Ethernet has an MTU of 1500 octets 1357 * (ETH_DATA_LEN). This value can be changed with ifconfig. 1358 * Return value: 1359 * 0 on success and an appropriate (-)ve integer as defined in errno.h 1360 * file on failure. 1361 */ 1362 static int xgmac_change_mtu(struct net_device *dev, int new_mtu) 1363 { 1364 struct xgmac_priv *priv = netdev_priv(dev); 1365 int old_mtu; 1366 1367 if ((new_mtu < 46) || (new_mtu > MAX_MTU)) { 1368 netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU); 1369 return -EINVAL; 1370 } 1371 1372 old_mtu = dev->mtu; 1373 1374 /* return early if the buffer sizes will not change */ 1375 if (old_mtu == new_mtu) 1376 return 0; 1377 1378 /* Stop everything, get ready to change the MTU */ 1379 if (!netif_running(dev)) 1380 return 0; 1381 1382 /* Bring interface down, change mtu and bring interface back up */ 1383 xgmac_stop(dev); 1384 dev->mtu = new_mtu; 1385 return xgmac_open(dev); 1386 } 1387 1388 static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id) 1389 { 1390 u32 intr_status; 1391 struct net_device *dev = (struct net_device *)dev_id; 1392 struct xgmac_priv *priv = netdev_priv(dev); 1393 void __iomem *ioaddr = priv->base; 1394 1395 intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT); 1396 if (intr_status & XGMAC_INT_STAT_PMT) { 1397 netdev_dbg(priv->dev, "received Magic frame\n"); 1398 /* clear the PMT bits 5 and 6 by reading the PMT */ 1399 readl(ioaddr + XGMAC_PMT); 1400 } 1401 return IRQ_HANDLED; 1402 } 1403 1404 static irqreturn_t xgmac_interrupt(int irq, void *dev_id) 1405 { 1406 u32 intr_status; 1407 struct net_device *dev = (struct net_device *)dev_id; 1408 struct xgmac_priv *priv = netdev_priv(dev); 1409 struct xgmac_extra_stats *x = &priv->xstats; 1410 1411 /* read the status register (CSR5) */ 1412 intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS); 1413 intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA); 1414 __raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS); 1415 1416 /* It displays the DMA process states (CSR5 register) */ 1417 /* ABNORMAL interrupts */ 1418 if (unlikely(intr_status & DMA_STATUS_AIS)) { 1419 if (intr_status & DMA_STATUS_TJT) { 1420 netdev_err(priv->dev, "transmit jabber\n"); 1421 x->tx_jabber++; 1422 } 1423 if (intr_status & DMA_STATUS_RU) 1424 x->rx_buf_unav++; 1425 if (intr_status & DMA_STATUS_RPS) { 1426 netdev_err(priv->dev, "receive process stopped\n"); 1427 x->rx_process_stopped++; 1428 } 1429 if (intr_status & DMA_STATUS_ETI) { 1430 netdev_err(priv->dev, "transmit early interrupt\n"); 1431 x->tx_early++; 1432 } 1433 if (intr_status & DMA_STATUS_TPS) { 1434 netdev_err(priv->dev, "transmit process stopped\n"); 1435 x->tx_process_stopped++; 1436 schedule_work(&priv->tx_timeout_work); 1437 } 1438 if (intr_status & DMA_STATUS_FBI) { 1439 netdev_err(priv->dev, "fatal bus error\n"); 1440 x->fatal_bus_error++; 1441 } 1442 } 1443 1444 /* TX/RX NORMAL interrupts */ 1445 if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) { 1446 __raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA); 1447 napi_schedule(&priv->napi); 1448 } 1449 1450 return IRQ_HANDLED; 1451 } 1452 1453 #ifdef CONFIG_NET_POLL_CONTROLLER 1454 /* Polling receive - used by NETCONSOLE and other diagnostic tools 1455 * to allow network I/O with interrupts disabled. */ 1456 static void xgmac_poll_controller(struct net_device *dev) 1457 { 1458 disable_irq(dev->irq); 1459 xgmac_interrupt(dev->irq, dev); 1460 enable_irq(dev->irq); 1461 } 1462 #endif 1463 1464 static struct rtnl_link_stats64 * 1465 xgmac_get_stats64(struct net_device *dev, 1466 struct rtnl_link_stats64 *storage) 1467 { 1468 struct xgmac_priv *priv = netdev_priv(dev); 1469 void __iomem *base = priv->base; 1470 u32 count; 1471 1472 spin_lock_bh(&priv->stats_lock); 1473 writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL); 1474 1475 storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO); 1476 storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32; 1477 1478 storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO); 1479 storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G); 1480 storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR); 1481 storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR); 1482 storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW); 1483 1484 storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO); 1485 storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32; 1486 1487 count = readl(base + XGMAC_MMC_TXFRAME_GB_LO); 1488 storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO); 1489 storage->tx_packets = count; 1490 storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW); 1491 1492 writel(0, base + XGMAC_MMC_CTRL); 1493 spin_unlock_bh(&priv->stats_lock); 1494 return storage; 1495 } 1496 1497 static int xgmac_set_mac_address(struct net_device *dev, void *p) 1498 { 1499 struct xgmac_priv *priv = netdev_priv(dev); 1500 void __iomem *ioaddr = priv->base; 1501 struct sockaddr *addr = p; 1502 1503 if (!is_valid_ether_addr(addr->sa_data)) 1504 return -EADDRNOTAVAIL; 1505 1506 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1507 1508 xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); 1509 1510 return 0; 1511 } 1512 1513 static int xgmac_set_features(struct net_device *dev, netdev_features_t features) 1514 { 1515 u32 ctrl; 1516 struct xgmac_priv *priv = netdev_priv(dev); 1517 void __iomem *ioaddr = priv->base; 1518 netdev_features_t changed = dev->features ^ features; 1519 1520 if (!(changed & NETIF_F_RXCSUM)) 1521 return 0; 1522 1523 ctrl = readl(ioaddr + XGMAC_CONTROL); 1524 if (features & NETIF_F_RXCSUM) 1525 ctrl |= XGMAC_CONTROL_IPC; 1526 else 1527 ctrl &= ~XGMAC_CONTROL_IPC; 1528 writel(ctrl, ioaddr + XGMAC_CONTROL); 1529 1530 return 0; 1531 } 1532 1533 static const struct net_device_ops xgmac_netdev_ops = { 1534 .ndo_open = xgmac_open, 1535 .ndo_start_xmit = xgmac_xmit, 1536 .ndo_stop = xgmac_stop, 1537 .ndo_change_mtu = xgmac_change_mtu, 1538 .ndo_set_rx_mode = xgmac_set_rx_mode, 1539 .ndo_tx_timeout = xgmac_tx_timeout, 1540 .ndo_get_stats64 = xgmac_get_stats64, 1541 #ifdef CONFIG_NET_POLL_CONTROLLER 1542 .ndo_poll_controller = xgmac_poll_controller, 1543 #endif 1544 .ndo_set_mac_address = xgmac_set_mac_address, 1545 .ndo_set_features = xgmac_set_features, 1546 }; 1547 1548 static int xgmac_ethtool_getsettings(struct net_device *dev, 1549 struct ethtool_cmd *cmd) 1550 { 1551 cmd->autoneg = 0; 1552 cmd->duplex = DUPLEX_FULL; 1553 ethtool_cmd_speed_set(cmd, 10000); 1554 cmd->supported = 0; 1555 cmd->advertising = 0; 1556 cmd->transceiver = XCVR_INTERNAL; 1557 return 0; 1558 } 1559 1560 static void xgmac_get_pauseparam(struct net_device *netdev, 1561 struct ethtool_pauseparam *pause) 1562 { 1563 struct xgmac_priv *priv = netdev_priv(netdev); 1564 1565 pause->rx_pause = priv->rx_pause; 1566 pause->tx_pause = priv->tx_pause; 1567 } 1568 1569 static int xgmac_set_pauseparam(struct net_device *netdev, 1570 struct ethtool_pauseparam *pause) 1571 { 1572 struct xgmac_priv *priv = netdev_priv(netdev); 1573 1574 if (pause->autoneg) 1575 return -EINVAL; 1576 1577 return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause); 1578 } 1579 1580 struct xgmac_stats { 1581 char stat_string[ETH_GSTRING_LEN]; 1582 int stat_offset; 1583 bool is_reg; 1584 }; 1585 1586 #define XGMAC_STAT(m) \ 1587 { #m, offsetof(struct xgmac_priv, xstats.m), false } 1588 #define XGMAC_HW_STAT(m, reg_offset) \ 1589 { #m, reg_offset, true } 1590 1591 static const struct xgmac_stats xgmac_gstrings_stats[] = { 1592 XGMAC_STAT(tx_frame_flushed), 1593 XGMAC_STAT(tx_payload_error), 1594 XGMAC_STAT(tx_ip_header_error), 1595 XGMAC_STAT(tx_local_fault), 1596 XGMAC_STAT(tx_remote_fault), 1597 XGMAC_STAT(tx_early), 1598 XGMAC_STAT(tx_process_stopped), 1599 XGMAC_STAT(tx_jabber), 1600 XGMAC_STAT(rx_buf_unav), 1601 XGMAC_STAT(rx_process_stopped), 1602 XGMAC_STAT(rx_payload_error), 1603 XGMAC_STAT(rx_ip_header_error), 1604 XGMAC_STAT(rx_da_filter_fail), 1605 XGMAC_STAT(fatal_bus_error), 1606 XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), 1607 XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), 1608 XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME), 1609 XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME), 1610 XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME), 1611 }; 1612 #define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats) 1613 1614 static void xgmac_get_ethtool_stats(struct net_device *dev, 1615 struct ethtool_stats *dummy, 1616 u64 *data) 1617 { 1618 struct xgmac_priv *priv = netdev_priv(dev); 1619 void *p = priv; 1620 int i; 1621 1622 for (i = 0; i < XGMAC_STATS_LEN; i++) { 1623 if (xgmac_gstrings_stats[i].is_reg) 1624 *data++ = readl(priv->base + 1625 xgmac_gstrings_stats[i].stat_offset); 1626 else 1627 *data++ = *(u32 *)(p + 1628 xgmac_gstrings_stats[i].stat_offset); 1629 } 1630 } 1631 1632 static int xgmac_get_sset_count(struct net_device *netdev, int sset) 1633 { 1634 switch (sset) { 1635 case ETH_SS_STATS: 1636 return XGMAC_STATS_LEN; 1637 default: 1638 return -EINVAL; 1639 } 1640 } 1641 1642 static void xgmac_get_strings(struct net_device *dev, u32 stringset, 1643 u8 *data) 1644 { 1645 int i; 1646 u8 *p = data; 1647 1648 switch (stringset) { 1649 case ETH_SS_STATS: 1650 for (i = 0; i < XGMAC_STATS_LEN; i++) { 1651 memcpy(p, xgmac_gstrings_stats[i].stat_string, 1652 ETH_GSTRING_LEN); 1653 p += ETH_GSTRING_LEN; 1654 } 1655 break; 1656 default: 1657 WARN_ON(1); 1658 break; 1659 } 1660 } 1661 1662 static void xgmac_get_wol(struct net_device *dev, 1663 struct ethtool_wolinfo *wol) 1664 { 1665 struct xgmac_priv *priv = netdev_priv(dev); 1666 1667 if (device_can_wakeup(priv->device)) { 1668 wol->supported = WAKE_MAGIC | WAKE_UCAST; 1669 wol->wolopts = priv->wolopts; 1670 } 1671 } 1672 1673 static int xgmac_set_wol(struct net_device *dev, 1674 struct ethtool_wolinfo *wol) 1675 { 1676 struct xgmac_priv *priv = netdev_priv(dev); 1677 u32 support = WAKE_MAGIC | WAKE_UCAST; 1678 1679 if (!device_can_wakeup(priv->device)) 1680 return -ENOTSUPP; 1681 1682 if (wol->wolopts & ~support) 1683 return -EINVAL; 1684 1685 priv->wolopts = wol->wolopts; 1686 1687 if (wol->wolopts) { 1688 device_set_wakeup_enable(priv->device, 1); 1689 enable_irq_wake(dev->irq); 1690 } else { 1691 device_set_wakeup_enable(priv->device, 0); 1692 disable_irq_wake(dev->irq); 1693 } 1694 1695 return 0; 1696 } 1697 1698 static const struct ethtool_ops xgmac_ethtool_ops = { 1699 .get_settings = xgmac_ethtool_getsettings, 1700 .get_link = ethtool_op_get_link, 1701 .get_pauseparam = xgmac_get_pauseparam, 1702 .set_pauseparam = xgmac_set_pauseparam, 1703 .get_ethtool_stats = xgmac_get_ethtool_stats, 1704 .get_strings = xgmac_get_strings, 1705 .get_wol = xgmac_get_wol, 1706 .set_wol = xgmac_set_wol, 1707 .get_sset_count = xgmac_get_sset_count, 1708 }; 1709 1710 /** 1711 * xgmac_probe 1712 * @pdev: platform device pointer 1713 * Description: the driver is initialized through platform_device. 1714 */ 1715 static int xgmac_probe(struct platform_device *pdev) 1716 { 1717 int ret = 0; 1718 struct resource *res; 1719 struct net_device *ndev = NULL; 1720 struct xgmac_priv *priv = NULL; 1721 u32 uid; 1722 1723 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1724 if (!res) 1725 return -ENODEV; 1726 1727 if (!request_mem_region(res->start, resource_size(res), pdev->name)) 1728 return -EBUSY; 1729 1730 ndev = alloc_etherdev(sizeof(struct xgmac_priv)); 1731 if (!ndev) { 1732 ret = -ENOMEM; 1733 goto err_alloc; 1734 } 1735 1736 SET_NETDEV_DEV(ndev, &pdev->dev); 1737 priv = netdev_priv(ndev); 1738 platform_set_drvdata(pdev, ndev); 1739 ether_setup(ndev); 1740 ndev->netdev_ops = &xgmac_netdev_ops; 1741 SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); 1742 spin_lock_init(&priv->stats_lock); 1743 INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work); 1744 1745 priv->device = &pdev->dev; 1746 priv->dev = ndev; 1747 priv->rx_pause = 1; 1748 priv->tx_pause = 1; 1749 1750 priv->base = ioremap(res->start, resource_size(res)); 1751 if (!priv->base) { 1752 netdev_err(ndev, "ioremap failed\n"); 1753 ret = -ENOMEM; 1754 goto err_io; 1755 } 1756 1757 uid = readl(priv->base + XGMAC_VERSION); 1758 netdev_info(ndev, "h/w version is 0x%x\n", uid); 1759 1760 /* Figure out how many valid mac address filter registers we have */ 1761 writel(1, priv->base + XGMAC_ADDR_HIGH(31)); 1762 if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1) 1763 priv->max_macs = 31; 1764 else 1765 priv->max_macs = 7; 1766 1767 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 1768 ndev->irq = platform_get_irq(pdev, 0); 1769 if (ndev->irq == -ENXIO) { 1770 netdev_err(ndev, "No irq resource\n"); 1771 ret = ndev->irq; 1772 goto err_irq; 1773 } 1774 1775 ret = request_irq(ndev->irq, xgmac_interrupt, 0, 1776 dev_name(&pdev->dev), ndev); 1777 if (ret < 0) { 1778 netdev_err(ndev, "Could not request irq %d - ret %d)\n", 1779 ndev->irq, ret); 1780 goto err_irq; 1781 } 1782 1783 priv->pmt_irq = platform_get_irq(pdev, 1); 1784 if (priv->pmt_irq == -ENXIO) { 1785 netdev_err(ndev, "No pmt irq resource\n"); 1786 ret = priv->pmt_irq; 1787 goto err_pmt_irq; 1788 } 1789 1790 ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0, 1791 dev_name(&pdev->dev), ndev); 1792 if (ret < 0) { 1793 netdev_err(ndev, "Could not request irq %d - ret %d)\n", 1794 priv->pmt_irq, ret); 1795 goto err_pmt_irq; 1796 } 1797 1798 device_set_wakeup_capable(&pdev->dev, 1); 1799 if (device_can_wakeup(priv->device)) 1800 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ 1801 1802 ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA; 1803 if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) 1804 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1805 NETIF_F_RXCSUM; 1806 ndev->features |= ndev->hw_features; 1807 ndev->priv_flags |= IFF_UNICAST_FLT; 1808 1809 /* Get the MAC address */ 1810 xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0); 1811 if (!is_valid_ether_addr(ndev->dev_addr)) 1812 netdev_warn(ndev, "MAC address %pM not valid", 1813 ndev->dev_addr); 1814 1815 netif_napi_add(ndev, &priv->napi, xgmac_poll, 64); 1816 ret = register_netdev(ndev); 1817 if (ret) 1818 goto err_reg; 1819 1820 return 0; 1821 1822 err_reg: 1823 netif_napi_del(&priv->napi); 1824 free_irq(priv->pmt_irq, ndev); 1825 err_pmt_irq: 1826 free_irq(ndev->irq, ndev); 1827 err_irq: 1828 iounmap(priv->base); 1829 err_io: 1830 free_netdev(ndev); 1831 err_alloc: 1832 release_mem_region(res->start, resource_size(res)); 1833 return ret; 1834 } 1835 1836 /** 1837 * xgmac_dvr_remove 1838 * @pdev: platform device pointer 1839 * Description: this function resets the TX/RX processes, disables the MAC RX/TX 1840 * changes the link status, releases the DMA descriptor rings, 1841 * unregisters the MDIO bus and unmaps the allocated memory. 1842 */ 1843 static int xgmac_remove(struct platform_device *pdev) 1844 { 1845 struct net_device *ndev = platform_get_drvdata(pdev); 1846 struct xgmac_priv *priv = netdev_priv(ndev); 1847 struct resource *res; 1848 1849 xgmac_mac_disable(priv->base); 1850 1851 /* Free the IRQ lines */ 1852 free_irq(ndev->irq, ndev); 1853 free_irq(priv->pmt_irq, ndev); 1854 1855 unregister_netdev(ndev); 1856 netif_napi_del(&priv->napi); 1857 1858 iounmap(priv->base); 1859 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1860 release_mem_region(res->start, resource_size(res)); 1861 1862 free_netdev(ndev); 1863 1864 return 0; 1865 } 1866 1867 #ifdef CONFIG_PM_SLEEP 1868 static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode) 1869 { 1870 unsigned int pmt = 0; 1871 1872 if (mode & WAKE_MAGIC) 1873 pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN; 1874 if (mode & WAKE_UCAST) 1875 pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; 1876 1877 writel(pmt, ioaddr + XGMAC_PMT); 1878 } 1879 1880 static int xgmac_suspend(struct device *dev) 1881 { 1882 struct net_device *ndev = platform_get_drvdata(to_platform_device(dev)); 1883 struct xgmac_priv *priv = netdev_priv(ndev); 1884 u32 value; 1885 1886 if (!ndev || !netif_running(ndev)) 1887 return 0; 1888 1889 netif_device_detach(ndev); 1890 napi_disable(&priv->napi); 1891 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 1892 1893 if (device_may_wakeup(priv->device)) { 1894 /* Stop TX/RX DMA Only */ 1895 value = readl(priv->base + XGMAC_DMA_CONTROL); 1896 value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR); 1897 writel(value, priv->base + XGMAC_DMA_CONTROL); 1898 1899 xgmac_pmt(priv->base, priv->wolopts); 1900 } else 1901 xgmac_mac_disable(priv->base); 1902 1903 return 0; 1904 } 1905 1906 static int xgmac_resume(struct device *dev) 1907 { 1908 struct net_device *ndev = platform_get_drvdata(to_platform_device(dev)); 1909 struct xgmac_priv *priv = netdev_priv(ndev); 1910 void __iomem *ioaddr = priv->base; 1911 1912 if (!netif_running(ndev)) 1913 return 0; 1914 1915 xgmac_pmt(ioaddr, 0); 1916 1917 /* Enable the MAC and DMA */ 1918 xgmac_mac_enable(ioaddr); 1919 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); 1920 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); 1921 1922 netif_device_attach(ndev); 1923 napi_enable(&priv->napi); 1924 1925 return 0; 1926 } 1927 #endif /* CONFIG_PM_SLEEP */ 1928 1929 static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume); 1930 1931 static const struct of_device_id xgmac_of_match[] = { 1932 { .compatible = "calxeda,hb-xgmac", }, 1933 {}, 1934 }; 1935 MODULE_DEVICE_TABLE(of, xgmac_of_match); 1936 1937 static struct platform_driver xgmac_driver = { 1938 .driver = { 1939 .name = "calxedaxgmac", 1940 .of_match_table = xgmac_of_match, 1941 }, 1942 .probe = xgmac_probe, 1943 .remove = xgmac_remove, 1944 .driver.pm = &xgmac_pm_ops, 1945 }; 1946 1947 module_platform_driver(xgmac_driver); 1948 1949 MODULE_AUTHOR("Calxeda, Inc."); 1950 MODULE_DESCRIPTION("Calxeda 10G XGMAC driver"); 1951 MODULE_LICENSE("GPL v2"); 1952