1 /* 2 * Copyright 2010-2011 Calxeda, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 #include <linux/module.h> 17 #include <linux/kernel.h> 18 #include <linux/circ_buf.h> 19 #include <linux/interrupt.h> 20 #include <linux/etherdevice.h> 21 #include <linux/platform_device.h> 22 #include <linux/skbuff.h> 23 #include <linux/ethtool.h> 24 #include <linux/if.h> 25 #include <linux/crc32.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/slab.h> 28 29 /* XGMAC Register definitions */ 30 #define XGMAC_CONTROL 0x00000000 /* MAC Configuration */ 31 #define XGMAC_FRAME_FILTER 0x00000004 /* MAC Frame Filter */ 32 #define XGMAC_FLOW_CTRL 0x00000018 /* MAC Flow Control */ 33 #define XGMAC_VLAN_TAG 0x0000001C /* VLAN Tags */ 34 #define XGMAC_VERSION 0x00000020 /* Version */ 35 #define XGMAC_VLAN_INCL 0x00000024 /* VLAN tag for tx frames */ 36 #define XGMAC_LPI_CTRL 0x00000028 /* LPI Control and Status */ 37 #define XGMAC_LPI_TIMER 0x0000002C /* LPI Timers Control */ 38 #define XGMAC_TX_PACE 0x00000030 /* Transmit Pace and Stretch */ 39 #define XGMAC_VLAN_HASH 0x00000034 /* VLAN Hash Table */ 40 #define XGMAC_DEBUG 0x00000038 /* Debug */ 41 #define XGMAC_INT_STAT 0x0000003C /* Interrupt and Control */ 42 #define XGMAC_ADDR_HIGH(reg) (0x00000040 + ((reg) * 8)) 43 #define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8)) 44 #define XGMAC_HASH(n) (0x00000300 + (n) * 4) /* HASH table regs */ 45 #define XGMAC_NUM_HASH 16 46 #define XGMAC_OMR 0x00000400 47 #define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */ 48 #define XGMAC_PMT 0x00000704 /* PMT Control and Status */ 49 #define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */ 50 #define XGMAC_MMC_INTR_RX 0x00000804 /* Receive Interrupt */ 51 #define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */ 52 #define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Receive Interrupt Mask */ 53 #define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */ 54 55 /* Hardware TX Statistics Counters */ 56 #define XGMAC_MMC_TXOCTET_GB_LO 0x00000814 57 #define XGMAC_MMC_TXOCTET_GB_HI 0x00000818 58 #define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C 59 #define XGMAC_MMC_TXFRAME_GB_HI 0x00000820 60 #define XGMAC_MMC_TXBCFRAME_G 0x00000824 61 #define XGMAC_MMC_TXMCFRAME_G 0x0000082C 62 #define XGMAC_MMC_TXUCFRAME_GB 0x00000864 63 #define XGMAC_MMC_TXMCFRAME_GB 0x0000086C 64 #define XGMAC_MMC_TXBCFRAME_GB 0x00000874 65 #define XGMAC_MMC_TXUNDERFLOW 0x0000087C 66 #define XGMAC_MMC_TXOCTET_G_LO 0x00000884 67 #define XGMAC_MMC_TXOCTET_G_HI 0x00000888 68 #define XGMAC_MMC_TXFRAME_G_LO 0x0000088C 69 #define XGMAC_MMC_TXFRAME_G_HI 0x00000890 70 #define XGMAC_MMC_TXPAUSEFRAME 0x00000894 71 #define XGMAC_MMC_TXVLANFRAME 0x0000089C 72 73 /* Hardware RX Statistics Counters */ 74 #define XGMAC_MMC_RXFRAME_GB_LO 0x00000900 75 #define XGMAC_MMC_RXFRAME_GB_HI 0x00000904 76 #define XGMAC_MMC_RXOCTET_GB_LO 0x00000908 77 #define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C 78 #define XGMAC_MMC_RXOCTET_G_LO 0x00000910 79 #define XGMAC_MMC_RXOCTET_G_HI 0x00000914 80 #define XGMAC_MMC_RXBCFRAME_G 0x00000918 81 #define XGMAC_MMC_RXMCFRAME_G 0x00000920 82 #define XGMAC_MMC_RXCRCERR 0x00000928 83 #define XGMAC_MMC_RXRUNT 0x00000930 84 #define XGMAC_MMC_RXJABBER 0x00000934 85 #define XGMAC_MMC_RXUCFRAME_G 0x00000970 86 #define XGMAC_MMC_RXLENGTHERR 0x00000978 87 #define XGMAC_MMC_RXPAUSEFRAME 0x00000988 88 #define XGMAC_MMC_RXOVERFLOW 0x00000990 89 #define XGMAC_MMC_RXVLANFRAME 0x00000998 90 #define XGMAC_MMC_RXWATCHDOG 0x000009a0 91 92 /* DMA Control and Status Registers */ 93 #define XGMAC_DMA_BUS_MODE 0x00000f00 /* Bus Mode */ 94 #define XGMAC_DMA_TX_POLL 0x00000f04 /* Transmit Poll Demand */ 95 #define XGMAC_DMA_RX_POLL 0x00000f08 /* Received Poll Demand */ 96 #define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c /* Receive List Base */ 97 #define XGMAC_DMA_TX_BASE_ADDR 0x00000f10 /* Transmit List Base */ 98 #define XGMAC_DMA_STATUS 0x00000f14 /* Status Register */ 99 #define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */ 100 #define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */ 101 #define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20 /* Missed Frame Counter */ 102 #define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24 /* RX Intr Watchdog Timer */ 103 #define XGMAC_DMA_AXI_BUS 0x00000f28 /* AXI Bus Mode */ 104 #define XGMAC_DMA_AXI_STATUS 0x00000f2C /* AXI Status */ 105 #define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */ 106 107 #define XGMAC_ADDR_AE 0x80000000 108 109 /* PMT Control and Status */ 110 #define XGMAC_PMT_POINTER_RESET 0x80000000 111 #define XGMAC_PMT_GLBL_UNICAST 0x00000200 112 #define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040 113 #define XGMAC_PMT_MAGIC_PKT 0x00000020 114 #define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004 115 #define XGMAC_PMT_MAGIC_PKT_EN 0x00000002 116 #define XGMAC_PMT_POWERDOWN 0x00000001 117 118 #define XGMAC_CONTROL_SPD 0x40000000 /* Speed control */ 119 #define XGMAC_CONTROL_SPD_MASK 0x60000000 120 #define XGMAC_CONTROL_SPD_1G 0x60000000 121 #define XGMAC_CONTROL_SPD_2_5G 0x40000000 122 #define XGMAC_CONTROL_SPD_10G 0x00000000 123 #define XGMAC_CONTROL_SARC 0x10000000 /* Source Addr Insert/Replace */ 124 #define XGMAC_CONTROL_SARK_MASK 0x18000000 125 #define XGMAC_CONTROL_CAR 0x04000000 /* CRC Addition/Replacement */ 126 #define XGMAC_CONTROL_CAR_MASK 0x06000000 127 #define XGMAC_CONTROL_DP 0x01000000 /* Disable Padding */ 128 #define XGMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on rx */ 129 #define XGMAC_CONTROL_JD 0x00400000 /* Jabber disable */ 130 #define XGMAC_CONTROL_JE 0x00100000 /* Jumbo frame */ 131 #define XGMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ 132 #define XGMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ 133 #define XGMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Strip */ 134 #define XGMAC_CONTROL_DDIC 0x00000010 /* Disable Deficit Idle Count */ 135 #define XGMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ 136 #define XGMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ 137 138 /* XGMAC Frame Filter defines */ 139 #define XGMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ 140 #define XGMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ 141 #define XGMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ 142 #define XGMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ 143 #define XGMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ 144 #define XGMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ 145 #define XGMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ 146 #define XGMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ 147 #define XGMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ 148 #define XGMAC_FRAME_FILTER_VHF 0x00000800 /* VLAN Hash Filter */ 149 #define XGMAC_FRAME_FILTER_VPF 0x00001000 /* VLAN Perfect Filter */ 150 #define XGMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ 151 152 /* XGMAC FLOW CTRL defines */ 153 #define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ 154 #define XGMAC_FLOW_CTRL_PT_SHIFT 16 155 #define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */ 156 #define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshold */ 157 #define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */ 158 #define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */ 159 #define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ 160 #define XGMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ 161 #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ 162 163 /* XGMAC_INT_STAT reg */ 164 #define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */ 165 #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ 166 #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ 167 168 /* DMA Bus Mode register defines */ 169 #define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ 170 #define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ 171 #define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ 172 #define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */ 173 174 /* Programmable burst length */ 175 #define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ 176 #define DMA_BUS_MODE_PBL_SHIFT 8 177 #define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ 178 #define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ 179 #define DMA_BUS_MODE_RPBL_SHIFT 17 180 #define DMA_BUS_MODE_USP 0x00800000 181 #define DMA_BUS_MODE_8PBL 0x01000000 182 #define DMA_BUS_MODE_AAL 0x02000000 183 184 /* DMA Bus Mode register defines */ 185 #define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */ 186 #define DMA_BUS_PR_RATIO_SHIFT 14 187 #define DMA_BUS_FB 0x00010000 /* Fixed Burst */ 188 189 /* DMA Control register defines */ 190 #define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ 191 #define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ 192 #define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */ 193 #define DMA_CONTROL_OSF 0x00000004 /* Operate on 2nd tx frame */ 194 195 /* DMA Normal interrupt */ 196 #define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */ 197 #define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */ 198 #define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */ 199 #define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */ 200 #define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */ 201 #define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */ 202 #define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */ 203 #define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */ 204 #define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */ 205 #define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */ 206 #define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */ 207 #define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */ 208 #define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavail */ 209 #define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */ 210 #define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */ 211 212 #define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \ 213 DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE) 214 215 #define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \ 216 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \ 217 DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \ 218 DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \ 219 DMA_INTR_ENA_TSE) 220 221 /* DMA default interrupt mask */ 222 #define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL) 223 224 /* DMA Status register defines */ 225 #define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */ 226 #define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */ 227 #define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */ 228 #define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */ 229 #define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */ 230 #define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */ 231 #define DMA_STATUS_TS_SHIFT 20 232 #define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */ 233 #define DMA_STATUS_RS_SHIFT 17 234 #define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */ 235 #define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */ 236 #define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */ 237 #define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */ 238 #define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */ 239 #define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */ 240 #define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */ 241 #define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */ 242 #define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */ 243 #define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */ 244 #define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */ 245 #define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */ 246 #define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavail */ 247 #define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ 248 #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ 249 250 /* Common MAC defines */ 251 #define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ 252 #define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */ 253 254 /* XGMAC Operation Mode Register */ 255 #define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */ 256 #define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */ 257 #define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshold Ctrl */ 258 #define XGMAC_OMR_TTC_MASK 0x00030000 259 #define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshold */ 260 #define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshold MASK */ 261 #define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshold */ 262 #define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshold MASK */ 263 #define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */ 264 #define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */ 265 #define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */ 266 #define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */ 267 #define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshold Ctrl */ 268 #define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshold Ctrl MASK */ 269 270 /* XGMAC HW Features Register */ 271 #define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */ 272 273 #define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008 274 275 /* XGMAC Descriptor Defines */ 276 #define MAX_DESC_BUF_SZ (0x2000 - 8) 277 278 #define RXDESC_EXT_STATUS 0x00000001 279 #define RXDESC_CRC_ERR 0x00000002 280 #define RXDESC_RX_ERR 0x00000008 281 #define RXDESC_RX_WDOG 0x00000010 282 #define RXDESC_FRAME_TYPE 0x00000020 283 #define RXDESC_GIANT_FRAME 0x00000080 284 #define RXDESC_LAST_SEG 0x00000100 285 #define RXDESC_FIRST_SEG 0x00000200 286 #define RXDESC_VLAN_FRAME 0x00000400 287 #define RXDESC_OVERFLOW_ERR 0x00000800 288 #define RXDESC_LENGTH_ERR 0x00001000 289 #define RXDESC_SA_FILTER_FAIL 0x00002000 290 #define RXDESC_DESCRIPTOR_ERR 0x00004000 291 #define RXDESC_ERROR_SUMMARY 0x00008000 292 #define RXDESC_FRAME_LEN_OFFSET 16 293 #define RXDESC_FRAME_LEN_MASK 0x3fff0000 294 #define RXDESC_DA_FILTER_FAIL 0x40000000 295 296 #define RXDESC1_END_RING 0x00008000 297 298 #define RXDESC_IP_PAYLOAD_MASK 0x00000003 299 #define RXDESC_IP_PAYLOAD_UDP 0x00000001 300 #define RXDESC_IP_PAYLOAD_TCP 0x00000002 301 #define RXDESC_IP_PAYLOAD_ICMP 0x00000003 302 #define RXDESC_IP_HEADER_ERR 0x00000008 303 #define RXDESC_IP_PAYLOAD_ERR 0x00000010 304 #define RXDESC_IPV4_PACKET 0x00000040 305 #define RXDESC_IPV6_PACKET 0x00000080 306 #define TXDESC_UNDERFLOW_ERR 0x00000001 307 #define TXDESC_JABBER_TIMEOUT 0x00000002 308 #define TXDESC_LOCAL_FAULT 0x00000004 309 #define TXDESC_REMOTE_FAULT 0x00000008 310 #define TXDESC_VLAN_FRAME 0x00000010 311 #define TXDESC_FRAME_FLUSHED 0x00000020 312 #define TXDESC_IP_HEADER_ERR 0x00000040 313 #define TXDESC_PAYLOAD_CSUM_ERR 0x00000080 314 #define TXDESC_ERROR_SUMMARY 0x00008000 315 #define TXDESC_SA_CTRL_INSERT 0x00040000 316 #define TXDESC_SA_CTRL_REPLACE 0x00080000 317 #define TXDESC_2ND_ADDR_CHAINED 0x00100000 318 #define TXDESC_END_RING 0x00200000 319 #define TXDESC_CSUM_IP 0x00400000 320 #define TXDESC_CSUM_IP_PAYLD 0x00800000 321 #define TXDESC_CSUM_ALL 0x00C00000 322 #define TXDESC_CRC_EN_REPLACE 0x01000000 323 #define TXDESC_CRC_EN_APPEND 0x02000000 324 #define TXDESC_DISABLE_PAD 0x04000000 325 #define TXDESC_FIRST_SEG 0x10000000 326 #define TXDESC_LAST_SEG 0x20000000 327 #define TXDESC_INTERRUPT 0x40000000 328 329 #define DESC_OWN 0x80000000 330 #define DESC_BUFFER1_SZ_MASK 0x00001fff 331 #define DESC_BUFFER2_SZ_MASK 0x1fff0000 332 #define DESC_BUFFER2_SZ_OFFSET 16 333 334 struct xgmac_dma_desc { 335 __le32 flags; 336 __le32 buf_size; 337 __le32 buf1_addr; /* Buffer 1 Address Pointer */ 338 __le32 buf2_addr; /* Buffer 2 Address Pointer */ 339 __le32 ext_status; 340 __le32 res[3]; 341 }; 342 343 struct xgmac_extra_stats { 344 /* Transmit errors */ 345 unsigned long tx_jabber; 346 unsigned long tx_frame_flushed; 347 unsigned long tx_payload_error; 348 unsigned long tx_ip_header_error; 349 unsigned long tx_local_fault; 350 unsigned long tx_remote_fault; 351 /* Receive errors */ 352 unsigned long rx_watchdog; 353 unsigned long rx_da_filter_fail; 354 unsigned long rx_payload_error; 355 unsigned long rx_ip_header_error; 356 /* Tx/Rx IRQ errors */ 357 unsigned long tx_process_stopped; 358 unsigned long rx_buf_unav; 359 unsigned long rx_process_stopped; 360 unsigned long tx_early; 361 unsigned long fatal_bus_error; 362 }; 363 364 struct xgmac_priv { 365 struct xgmac_dma_desc *dma_rx; 366 struct sk_buff **rx_skbuff; 367 unsigned int rx_tail; 368 unsigned int rx_head; 369 370 struct xgmac_dma_desc *dma_tx; 371 struct sk_buff **tx_skbuff; 372 unsigned int tx_head; 373 unsigned int tx_tail; 374 int tx_irq_cnt; 375 376 void __iomem *base; 377 unsigned int dma_buf_sz; 378 dma_addr_t dma_rx_phy; 379 dma_addr_t dma_tx_phy; 380 381 struct net_device *dev; 382 struct device *device; 383 struct napi_struct napi; 384 385 int max_macs; 386 struct xgmac_extra_stats xstats; 387 388 spinlock_t stats_lock; 389 int pmt_irq; 390 char rx_pause; 391 char tx_pause; 392 int wolopts; 393 struct work_struct tx_timeout_work; 394 }; 395 396 /* XGMAC Configuration Settings */ 397 #define MAX_MTU 9000 398 #define PAUSE_TIME 0x400 399 400 #define DMA_RX_RING_SZ 256 401 #define DMA_TX_RING_SZ 128 402 /* minimum number of free TX descriptors required to wake up TX process */ 403 #define TX_THRESH (DMA_TX_RING_SZ/4) 404 405 /* DMA descriptor ring helpers */ 406 #define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1)) 407 #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) 408 #define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) 409 410 #define tx_dma_ring_space(p) \ 411 dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ) 412 413 /* XGMAC Descriptor Access Helpers */ 414 static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) 415 { 416 if (buf_sz > MAX_DESC_BUF_SZ) 417 p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ | 418 (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET); 419 else 420 p->buf_size = cpu_to_le32(buf_sz); 421 } 422 423 static inline int desc_get_buf_len(struct xgmac_dma_desc *p) 424 { 425 u32 len = le32_to_cpu(p->buf_size); 426 return (len & DESC_BUFFER1_SZ_MASK) + 427 ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); 428 } 429 430 static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size, 431 int buf_sz) 432 { 433 struct xgmac_dma_desc *end = p + ring_size - 1; 434 435 memset(p, 0, sizeof(*p) * ring_size); 436 437 for (; p <= end; p++) 438 desc_set_buf_len(p, buf_sz); 439 440 end->buf_size |= cpu_to_le32(RXDESC1_END_RING); 441 } 442 443 static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size) 444 { 445 memset(p, 0, sizeof(*p) * ring_size); 446 p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING); 447 } 448 449 static inline int desc_get_owner(struct xgmac_dma_desc *p) 450 { 451 return le32_to_cpu(p->flags) & DESC_OWN; 452 } 453 454 static inline void desc_set_rx_owner(struct xgmac_dma_desc *p) 455 { 456 /* Clear all fields and set the owner */ 457 p->flags = cpu_to_le32(DESC_OWN); 458 } 459 460 static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags) 461 { 462 u32 tmpflags = le32_to_cpu(p->flags); 463 tmpflags &= TXDESC_END_RING; 464 tmpflags |= flags | DESC_OWN; 465 p->flags = cpu_to_le32(tmpflags); 466 } 467 468 static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p) 469 { 470 u32 tmpflags = le32_to_cpu(p->flags); 471 tmpflags &= TXDESC_END_RING; 472 p->flags = cpu_to_le32(tmpflags); 473 } 474 475 static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) 476 { 477 return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; 478 } 479 480 static inline int desc_get_tx_fs(struct xgmac_dma_desc *p) 481 { 482 return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG; 483 } 484 485 static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) 486 { 487 return le32_to_cpu(p->buf1_addr); 488 } 489 490 static inline void desc_set_buf_addr(struct xgmac_dma_desc *p, 491 u32 paddr, int len) 492 { 493 p->buf1_addr = cpu_to_le32(paddr); 494 if (len > MAX_DESC_BUF_SZ) 495 p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ); 496 } 497 498 static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p, 499 u32 paddr, int len) 500 { 501 desc_set_buf_len(p, len); 502 desc_set_buf_addr(p, paddr, len); 503 } 504 505 static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p) 506 { 507 u32 data = le32_to_cpu(p->flags); 508 u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET; 509 if (data & RXDESC_FRAME_TYPE) 510 len -= ETH_FCS_LEN; 511 512 return len; 513 } 514 515 static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr) 516 { 517 int timeout = 1000; 518 u32 reg = readl(ioaddr + XGMAC_OMR); 519 writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR); 520 521 while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF) 522 udelay(1); 523 } 524 525 static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p) 526 { 527 struct xgmac_extra_stats *x = &priv->xstats; 528 u32 status = le32_to_cpu(p->flags); 529 530 if (!(status & TXDESC_ERROR_SUMMARY)) 531 return 0; 532 533 netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status); 534 if (status & TXDESC_JABBER_TIMEOUT) 535 x->tx_jabber++; 536 if (status & TXDESC_FRAME_FLUSHED) 537 x->tx_frame_flushed++; 538 if (status & TXDESC_UNDERFLOW_ERR) 539 xgmac_dma_flush_tx_fifo(priv->base); 540 if (status & TXDESC_IP_HEADER_ERR) 541 x->tx_ip_header_error++; 542 if (status & TXDESC_LOCAL_FAULT) 543 x->tx_local_fault++; 544 if (status & TXDESC_REMOTE_FAULT) 545 x->tx_remote_fault++; 546 if (status & TXDESC_PAYLOAD_CSUM_ERR) 547 x->tx_payload_error++; 548 549 return -1; 550 } 551 552 static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p) 553 { 554 struct xgmac_extra_stats *x = &priv->xstats; 555 int ret = CHECKSUM_UNNECESSARY; 556 u32 status = le32_to_cpu(p->flags); 557 u32 ext_status = le32_to_cpu(p->ext_status); 558 559 if (status & RXDESC_DA_FILTER_FAIL) { 560 netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n"); 561 x->rx_da_filter_fail++; 562 return -1; 563 } 564 565 /* All frames should fit into a single buffer */ 566 if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG)) 567 return -1; 568 569 /* Check if packet has checksum already */ 570 if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) && 571 !(ext_status & RXDESC_IP_PAYLOAD_MASK)) 572 ret = CHECKSUM_NONE; 573 574 netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n", 575 (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status); 576 577 if (!(status & RXDESC_ERROR_SUMMARY)) 578 return ret; 579 580 /* Handle any errors */ 581 if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR | 582 RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR)) 583 return -1; 584 585 if (status & RXDESC_EXT_STATUS) { 586 if (ext_status & RXDESC_IP_HEADER_ERR) 587 x->rx_ip_header_error++; 588 if (ext_status & RXDESC_IP_PAYLOAD_ERR) 589 x->rx_payload_error++; 590 netdev_dbg(priv->dev, "IP checksum error - stat %08x\n", 591 ext_status); 592 return CHECKSUM_NONE; 593 } 594 595 return ret; 596 } 597 598 static inline void xgmac_mac_enable(void __iomem *ioaddr) 599 { 600 u32 value = readl(ioaddr + XGMAC_CONTROL); 601 value |= MAC_ENABLE_RX | MAC_ENABLE_TX; 602 writel(value, ioaddr + XGMAC_CONTROL); 603 604 value = readl(ioaddr + XGMAC_DMA_CONTROL); 605 value |= DMA_CONTROL_ST | DMA_CONTROL_SR; 606 writel(value, ioaddr + XGMAC_DMA_CONTROL); 607 } 608 609 static inline void xgmac_mac_disable(void __iomem *ioaddr) 610 { 611 u32 value = readl(ioaddr + XGMAC_DMA_CONTROL); 612 value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR); 613 writel(value, ioaddr + XGMAC_DMA_CONTROL); 614 615 value = readl(ioaddr + XGMAC_CONTROL); 616 value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX); 617 writel(value, ioaddr + XGMAC_CONTROL); 618 } 619 620 static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr, 621 int num) 622 { 623 u32 data; 624 625 if (addr) { 626 data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); 627 writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); 628 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; 629 writel(data, ioaddr + XGMAC_ADDR_LOW(num)); 630 } else { 631 writel(0, ioaddr + XGMAC_ADDR_HIGH(num)); 632 writel(0, ioaddr + XGMAC_ADDR_LOW(num)); 633 } 634 } 635 636 static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, 637 int num) 638 { 639 u32 hi_addr, lo_addr; 640 641 /* Read the MAC address from the hardware */ 642 hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num)); 643 lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num)); 644 645 /* Extract the MAC address from the high and low words */ 646 addr[0] = lo_addr & 0xff; 647 addr[1] = (lo_addr >> 8) & 0xff; 648 addr[2] = (lo_addr >> 16) & 0xff; 649 addr[3] = (lo_addr >> 24) & 0xff; 650 addr[4] = hi_addr & 0xff; 651 addr[5] = (hi_addr >> 8) & 0xff; 652 } 653 654 static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx) 655 { 656 u32 reg; 657 unsigned int flow = 0; 658 659 priv->rx_pause = rx; 660 priv->tx_pause = tx; 661 662 if (rx || tx) { 663 if (rx) 664 flow |= XGMAC_FLOW_CTRL_RFE; 665 if (tx) 666 flow |= XGMAC_FLOW_CTRL_TFE; 667 668 flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP; 669 flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT); 670 671 writel(flow, priv->base + XGMAC_FLOW_CTRL); 672 673 reg = readl(priv->base + XGMAC_OMR); 674 reg |= XGMAC_OMR_EFC; 675 writel(reg, priv->base + XGMAC_OMR); 676 } else { 677 writel(0, priv->base + XGMAC_FLOW_CTRL); 678 679 reg = readl(priv->base + XGMAC_OMR); 680 reg &= ~XGMAC_OMR_EFC; 681 writel(reg, priv->base + XGMAC_OMR); 682 } 683 684 return 0; 685 } 686 687 static void xgmac_rx_refill(struct xgmac_priv *priv) 688 { 689 struct xgmac_dma_desc *p; 690 dma_addr_t paddr; 691 int bufsz = priv->dev->mtu + ETH_HLEN + ETH_FCS_LEN; 692 693 while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) { 694 int entry = priv->rx_head; 695 struct sk_buff *skb; 696 697 p = priv->dma_rx + entry; 698 699 if (priv->rx_skbuff[entry] == NULL) { 700 skb = netdev_alloc_skb_ip_align(priv->dev, bufsz); 701 if (unlikely(skb == NULL)) 702 break; 703 704 paddr = dma_map_single(priv->device, skb->data, 705 priv->dma_buf_sz - NET_IP_ALIGN, 706 DMA_FROM_DEVICE); 707 if (dma_mapping_error(priv->device, paddr)) { 708 dev_kfree_skb_any(skb); 709 break; 710 } 711 priv->rx_skbuff[entry] = skb; 712 desc_set_buf_addr(p, paddr, priv->dma_buf_sz); 713 } 714 715 netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n", 716 priv->rx_head, priv->rx_tail); 717 718 priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ); 719 desc_set_rx_owner(p); 720 } 721 } 722 723 /** 724 * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings 725 * @dev: net device structure 726 * Description: this function initializes the DMA RX/TX descriptors 727 * and allocates the socket buffers. 728 */ 729 static int xgmac_dma_desc_rings_init(struct net_device *dev) 730 { 731 struct xgmac_priv *priv = netdev_priv(dev); 732 unsigned int bfsize; 733 734 /* Set the Buffer size according to the MTU; 735 * The total buffer size including any IP offset must be a multiple 736 * of 8 bytes. 737 */ 738 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8); 739 740 netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize); 741 742 priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ, 743 GFP_KERNEL); 744 if (!priv->rx_skbuff) 745 return -ENOMEM; 746 747 priv->dma_rx = dma_alloc_coherent(priv->device, 748 DMA_RX_RING_SZ * 749 sizeof(struct xgmac_dma_desc), 750 &priv->dma_rx_phy, 751 GFP_KERNEL); 752 if (!priv->dma_rx) 753 goto err_dma_rx; 754 755 priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ, 756 GFP_KERNEL); 757 if (!priv->tx_skbuff) 758 goto err_tx_skb; 759 760 priv->dma_tx = dma_alloc_coherent(priv->device, 761 DMA_TX_RING_SZ * 762 sizeof(struct xgmac_dma_desc), 763 &priv->dma_tx_phy, 764 GFP_KERNEL); 765 if (!priv->dma_tx) 766 goto err_dma_tx; 767 768 netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, " 769 "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", 770 priv->dma_rx, priv->dma_tx, 771 (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy); 772 773 priv->rx_tail = 0; 774 priv->rx_head = 0; 775 priv->dma_buf_sz = bfsize; 776 desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz); 777 xgmac_rx_refill(priv); 778 779 priv->tx_tail = 0; 780 priv->tx_head = 0; 781 desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); 782 783 writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR); 784 writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR); 785 786 return 0; 787 788 err_dma_tx: 789 kfree(priv->tx_skbuff); 790 err_tx_skb: 791 dma_free_coherent(priv->device, 792 DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), 793 priv->dma_rx, priv->dma_rx_phy); 794 err_dma_rx: 795 kfree(priv->rx_skbuff); 796 return -ENOMEM; 797 } 798 799 static void xgmac_free_rx_skbufs(struct xgmac_priv *priv) 800 { 801 int i; 802 struct xgmac_dma_desc *p; 803 804 if (!priv->rx_skbuff) 805 return; 806 807 for (i = 0; i < DMA_RX_RING_SZ; i++) { 808 struct sk_buff *skb = priv->rx_skbuff[i]; 809 if (skb == NULL) 810 continue; 811 812 p = priv->dma_rx + i; 813 dma_unmap_single(priv->device, desc_get_buf_addr(p), 814 priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); 815 dev_kfree_skb_any(skb); 816 priv->rx_skbuff[i] = NULL; 817 } 818 } 819 820 static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) 821 { 822 int i; 823 struct xgmac_dma_desc *p; 824 825 if (!priv->tx_skbuff) 826 return; 827 828 for (i = 0; i < DMA_TX_RING_SZ; i++) { 829 if (priv->tx_skbuff[i] == NULL) 830 continue; 831 832 p = priv->dma_tx + i; 833 if (desc_get_tx_fs(p)) 834 dma_unmap_single(priv->device, desc_get_buf_addr(p), 835 desc_get_buf_len(p), DMA_TO_DEVICE); 836 else 837 dma_unmap_page(priv->device, desc_get_buf_addr(p), 838 desc_get_buf_len(p), DMA_TO_DEVICE); 839 840 if (desc_get_tx_ls(p)) 841 dev_kfree_skb_any(priv->tx_skbuff[i]); 842 priv->tx_skbuff[i] = NULL; 843 } 844 } 845 846 static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv) 847 { 848 /* Release the DMA TX/RX socket buffers */ 849 xgmac_free_rx_skbufs(priv); 850 xgmac_free_tx_skbufs(priv); 851 852 /* Free the consistent memory allocated for descriptor rings */ 853 if (priv->dma_tx) { 854 dma_free_coherent(priv->device, 855 DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc), 856 priv->dma_tx, priv->dma_tx_phy); 857 priv->dma_tx = NULL; 858 } 859 if (priv->dma_rx) { 860 dma_free_coherent(priv->device, 861 DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), 862 priv->dma_rx, priv->dma_rx_phy); 863 priv->dma_rx = NULL; 864 } 865 kfree(priv->rx_skbuff); 866 priv->rx_skbuff = NULL; 867 kfree(priv->tx_skbuff); 868 priv->tx_skbuff = NULL; 869 } 870 871 /** 872 * xgmac_tx: 873 * @priv: private driver structure 874 * Description: it reclaims resources after transmission completes. 875 */ 876 static void xgmac_tx_complete(struct xgmac_priv *priv) 877 { 878 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { 879 unsigned int entry = priv->tx_tail; 880 struct sk_buff *skb = priv->tx_skbuff[entry]; 881 struct xgmac_dma_desc *p = priv->dma_tx + entry; 882 883 /* Check if the descriptor is owned by the DMA. */ 884 if (desc_get_owner(p)) 885 break; 886 887 netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", 888 priv->tx_head, priv->tx_tail); 889 890 if (desc_get_tx_fs(p)) 891 dma_unmap_single(priv->device, desc_get_buf_addr(p), 892 desc_get_buf_len(p), DMA_TO_DEVICE); 893 else 894 dma_unmap_page(priv->device, desc_get_buf_addr(p), 895 desc_get_buf_len(p), DMA_TO_DEVICE); 896 897 /* Check tx error on the last segment */ 898 if (desc_get_tx_ls(p)) { 899 desc_get_tx_status(priv, p); 900 dev_consume_skb_any(skb); 901 } 902 903 priv->tx_skbuff[entry] = NULL; 904 priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); 905 } 906 907 /* Ensure tx_tail is visible to xgmac_xmit */ 908 smp_mb(); 909 if (unlikely(netif_queue_stopped(priv->dev) && 910 (tx_dma_ring_space(priv) > MAX_SKB_FRAGS))) 911 netif_wake_queue(priv->dev); 912 } 913 914 static void xgmac_tx_timeout_work(struct work_struct *work) 915 { 916 u32 reg, value; 917 struct xgmac_priv *priv = 918 container_of(work, struct xgmac_priv, tx_timeout_work); 919 920 napi_disable(&priv->napi); 921 922 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 923 924 netif_tx_lock(priv->dev); 925 926 reg = readl(priv->base + XGMAC_DMA_CONTROL); 927 writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); 928 do { 929 value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000; 930 } while (value && (value != 0x600000)); 931 932 xgmac_free_tx_skbufs(priv); 933 desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); 934 priv->tx_tail = 0; 935 priv->tx_head = 0; 936 writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR); 937 writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); 938 939 writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, 940 priv->base + XGMAC_DMA_STATUS); 941 942 netif_tx_unlock(priv->dev); 943 netif_wake_queue(priv->dev); 944 945 napi_enable(&priv->napi); 946 947 /* Enable interrupts */ 948 writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS); 949 writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); 950 } 951 952 static int xgmac_hw_init(struct net_device *dev) 953 { 954 u32 value, ctrl; 955 int limit; 956 struct xgmac_priv *priv = netdev_priv(dev); 957 void __iomem *ioaddr = priv->base; 958 959 /* Save the ctrl register value */ 960 ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK; 961 962 /* SW reset */ 963 value = DMA_BUS_MODE_SFT_RESET; 964 writel(value, ioaddr + XGMAC_DMA_BUS_MODE); 965 limit = 15000; 966 while (limit-- && 967 (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) 968 cpu_relax(); 969 if (limit < 0) 970 return -EBUSY; 971 972 value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) | 973 (0x10 << DMA_BUS_MODE_RPBL_SHIFT) | 974 DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; 975 writel(value, ioaddr + XGMAC_DMA_BUS_MODE); 976 977 writel(0, ioaddr + XGMAC_DMA_INTR_ENA); 978 979 /* Mask power mgt interrupt */ 980 writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); 981 982 /* XGMAC requires AXI bus init. This is a 'magic number' for now */ 983 writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); 984 985 ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS | 986 XGMAC_CONTROL_CAR; 987 if (dev->features & NETIF_F_RXCSUM) 988 ctrl |= XGMAC_CONTROL_IPC; 989 writel(ctrl, ioaddr + XGMAC_CONTROL); 990 991 writel(DMA_CONTROL_OSF, ioaddr + XGMAC_DMA_CONTROL); 992 993 /* Set the HW DMA mode and the COE */ 994 writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA | 995 XGMAC_OMR_RTC_256, 996 ioaddr + XGMAC_OMR); 997 998 /* Reset the MMC counters */ 999 writel(1, ioaddr + XGMAC_MMC_CTRL); 1000 return 0; 1001 } 1002 1003 /** 1004 * xgmac_open - open entry point of the driver 1005 * @dev : pointer to the device structure. 1006 * Description: 1007 * This function is the open entry point of the driver. 1008 * Return value: 1009 * 0 on success and an appropriate (-)ve integer as defined in errno.h 1010 * file on failure. 1011 */ 1012 static int xgmac_open(struct net_device *dev) 1013 { 1014 int ret; 1015 struct xgmac_priv *priv = netdev_priv(dev); 1016 void __iomem *ioaddr = priv->base; 1017 1018 /* Check that the MAC address is valid. If its not, refuse 1019 * to bring the device up. The user must specify an 1020 * address using the following linux command: 1021 * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */ 1022 if (!is_valid_ether_addr(dev->dev_addr)) { 1023 eth_hw_addr_random(dev); 1024 netdev_dbg(priv->dev, "generated random MAC address %pM\n", 1025 dev->dev_addr); 1026 } 1027 1028 memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats)); 1029 1030 /* Initialize the XGMAC and descriptors */ 1031 xgmac_hw_init(dev); 1032 xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); 1033 xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause); 1034 1035 ret = xgmac_dma_desc_rings_init(dev); 1036 if (ret < 0) 1037 return ret; 1038 1039 /* Enable the MAC Rx/Tx */ 1040 xgmac_mac_enable(ioaddr); 1041 1042 napi_enable(&priv->napi); 1043 netif_start_queue(dev); 1044 1045 /* Enable interrupts */ 1046 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); 1047 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); 1048 1049 return 0; 1050 } 1051 1052 /** 1053 * xgmac_release - close entry point of the driver 1054 * @dev : device pointer. 1055 * Description: 1056 * This is the stop entry point of the driver. 1057 */ 1058 static int xgmac_stop(struct net_device *dev) 1059 { 1060 struct xgmac_priv *priv = netdev_priv(dev); 1061 1062 if (readl(priv->base + XGMAC_DMA_INTR_ENA)) 1063 napi_disable(&priv->napi); 1064 1065 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 1066 1067 netif_tx_disable(dev); 1068 1069 /* Disable the MAC core */ 1070 xgmac_mac_disable(priv->base); 1071 1072 /* Release and free the Rx/Tx resources */ 1073 xgmac_free_dma_desc_rings(priv); 1074 1075 return 0; 1076 } 1077 1078 /** 1079 * xgmac_xmit: 1080 * @skb : the socket buffer 1081 * @dev : device pointer 1082 * Description : Tx entry point of the driver. 1083 */ 1084 static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) 1085 { 1086 struct xgmac_priv *priv = netdev_priv(dev); 1087 unsigned int entry; 1088 int i; 1089 u32 irq_flag; 1090 int nfrags = skb_shinfo(skb)->nr_frags; 1091 struct xgmac_dma_desc *desc, *first; 1092 unsigned int desc_flags; 1093 unsigned int len; 1094 dma_addr_t paddr; 1095 1096 priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1); 1097 irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT; 1098 1099 desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ? 1100 TXDESC_CSUM_ALL : 0; 1101 entry = priv->tx_head; 1102 desc = priv->dma_tx + entry; 1103 first = desc; 1104 1105 len = skb_headlen(skb); 1106 paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); 1107 if (dma_mapping_error(priv->device, paddr)) { 1108 dev_kfree_skb_any(skb); 1109 return NETDEV_TX_OK; 1110 } 1111 priv->tx_skbuff[entry] = skb; 1112 desc_set_buf_addr_and_size(desc, paddr, len); 1113 1114 for (i = 0; i < nfrags; i++) { 1115 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1116 1117 len = frag->size; 1118 1119 paddr = skb_frag_dma_map(priv->device, frag, 0, len, 1120 DMA_TO_DEVICE); 1121 if (dma_mapping_error(priv->device, paddr)) 1122 goto dma_err; 1123 1124 entry = dma_ring_incr(entry, DMA_TX_RING_SZ); 1125 desc = priv->dma_tx + entry; 1126 priv->tx_skbuff[entry] = skb; 1127 1128 desc_set_buf_addr_and_size(desc, paddr, len); 1129 if (i < (nfrags - 1)) 1130 desc_set_tx_owner(desc, desc_flags); 1131 } 1132 1133 /* Interrupt on completition only for the latest segment */ 1134 if (desc != first) 1135 desc_set_tx_owner(desc, desc_flags | 1136 TXDESC_LAST_SEG | irq_flag); 1137 else 1138 desc_flags |= TXDESC_LAST_SEG | irq_flag; 1139 1140 /* Set owner on first desc last to avoid race condition */ 1141 wmb(); 1142 desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); 1143 1144 writel(1, priv->base + XGMAC_DMA_TX_POLL); 1145 1146 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); 1147 1148 /* Ensure tx_head update is visible to tx completion */ 1149 smp_mb(); 1150 if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) { 1151 netif_stop_queue(dev); 1152 /* Ensure netif_stop_queue is visible to tx completion */ 1153 smp_mb(); 1154 if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS) 1155 netif_start_queue(dev); 1156 } 1157 return NETDEV_TX_OK; 1158 1159 dma_err: 1160 entry = priv->tx_head; 1161 for ( ; i > 0; i--) { 1162 entry = dma_ring_incr(entry, DMA_TX_RING_SZ); 1163 desc = priv->dma_tx + entry; 1164 priv->tx_skbuff[entry] = NULL; 1165 dma_unmap_page(priv->device, desc_get_buf_addr(desc), 1166 desc_get_buf_len(desc), DMA_TO_DEVICE); 1167 desc_clear_tx_owner(desc); 1168 } 1169 desc = first; 1170 dma_unmap_single(priv->device, desc_get_buf_addr(desc), 1171 desc_get_buf_len(desc), DMA_TO_DEVICE); 1172 dev_kfree_skb_any(skb); 1173 return NETDEV_TX_OK; 1174 } 1175 1176 static int xgmac_rx(struct xgmac_priv *priv, int limit) 1177 { 1178 unsigned int entry; 1179 unsigned int count = 0; 1180 struct xgmac_dma_desc *p; 1181 1182 while (count < limit) { 1183 int ip_checksum; 1184 struct sk_buff *skb; 1185 int frame_len; 1186 1187 if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ)) 1188 break; 1189 1190 entry = priv->rx_tail; 1191 p = priv->dma_rx + entry; 1192 if (desc_get_owner(p)) 1193 break; 1194 1195 count++; 1196 priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ); 1197 1198 /* read the status of the incoming frame */ 1199 ip_checksum = desc_get_rx_status(priv, p); 1200 if (ip_checksum < 0) 1201 continue; 1202 1203 skb = priv->rx_skbuff[entry]; 1204 if (unlikely(!skb)) { 1205 netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n"); 1206 break; 1207 } 1208 priv->rx_skbuff[entry] = NULL; 1209 1210 frame_len = desc_get_rx_frame_len(p); 1211 netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n", 1212 frame_len, ip_checksum); 1213 1214 skb_put(skb, frame_len); 1215 dma_unmap_single(priv->device, desc_get_buf_addr(p), 1216 priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); 1217 1218 skb->protocol = eth_type_trans(skb, priv->dev); 1219 skb->ip_summed = ip_checksum; 1220 if (ip_checksum == CHECKSUM_NONE) 1221 netif_receive_skb(skb); 1222 else 1223 napi_gro_receive(&priv->napi, skb); 1224 } 1225 1226 xgmac_rx_refill(priv); 1227 1228 return count; 1229 } 1230 1231 /** 1232 * xgmac_poll - xgmac poll method (NAPI) 1233 * @napi : pointer to the napi structure. 1234 * @budget : maximum number of packets that the current CPU can receive from 1235 * all interfaces. 1236 * Description : 1237 * This function implements the the reception process. 1238 * Also it runs the TX completion thread 1239 */ 1240 static int xgmac_poll(struct napi_struct *napi, int budget) 1241 { 1242 struct xgmac_priv *priv = container_of(napi, 1243 struct xgmac_priv, napi); 1244 int work_done = 0; 1245 1246 xgmac_tx_complete(priv); 1247 work_done = xgmac_rx(priv, budget); 1248 1249 if (work_done < budget) { 1250 napi_complete(napi); 1251 __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); 1252 } 1253 return work_done; 1254 } 1255 1256 /** 1257 * xgmac_tx_timeout 1258 * @dev : Pointer to net device structure 1259 * Description: this function is called when a packet transmission fails to 1260 * complete within a reasonable tmrate. The driver will mark the error in the 1261 * netdev structure and arrange for the device to be reset to a sane state 1262 * in order to transmit a new packet. 1263 */ 1264 static void xgmac_tx_timeout(struct net_device *dev) 1265 { 1266 struct xgmac_priv *priv = netdev_priv(dev); 1267 schedule_work(&priv->tx_timeout_work); 1268 } 1269 1270 /** 1271 * xgmac_set_rx_mode - entry point for multicast addressing 1272 * @dev : pointer to the device structure 1273 * Description: 1274 * This function is a driver entry point which gets called by the kernel 1275 * whenever multicast addresses must be enabled/disabled. 1276 * Return value: 1277 * void. 1278 */ 1279 static void xgmac_set_rx_mode(struct net_device *dev) 1280 { 1281 int i; 1282 struct xgmac_priv *priv = netdev_priv(dev); 1283 void __iomem *ioaddr = priv->base; 1284 unsigned int value = 0; 1285 u32 hash_filter[XGMAC_NUM_HASH]; 1286 int reg = 1; 1287 struct netdev_hw_addr *ha; 1288 bool use_hash = false; 1289 1290 netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n", 1291 netdev_mc_count(dev), netdev_uc_count(dev)); 1292 1293 if (dev->flags & IFF_PROMISC) 1294 value |= XGMAC_FRAME_FILTER_PR; 1295 1296 memset(hash_filter, 0, sizeof(hash_filter)); 1297 1298 if (netdev_uc_count(dev) > priv->max_macs) { 1299 use_hash = true; 1300 value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF; 1301 } 1302 netdev_for_each_uc_addr(ha, dev) { 1303 if (use_hash) { 1304 u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23; 1305 1306 /* The most significant 4 bits determine the register to 1307 * use (H/L) while the other 5 bits determine the bit 1308 * within the register. */ 1309 hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 1310 } else { 1311 xgmac_set_mac_addr(ioaddr, ha->addr, reg); 1312 reg++; 1313 } 1314 } 1315 1316 if (dev->flags & IFF_ALLMULTI) { 1317 value |= XGMAC_FRAME_FILTER_PM; 1318 goto out; 1319 } 1320 1321 if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) { 1322 use_hash = true; 1323 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; 1324 } else { 1325 use_hash = false; 1326 } 1327 netdev_for_each_mc_addr(ha, dev) { 1328 if (use_hash) { 1329 u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23; 1330 1331 /* The most significant 4 bits determine the register to 1332 * use (H/L) while the other 5 bits determine the bit 1333 * within the register. */ 1334 hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 1335 } else { 1336 xgmac_set_mac_addr(ioaddr, ha->addr, reg); 1337 reg++; 1338 } 1339 } 1340 1341 out: 1342 for (i = reg; i <= priv->max_macs; i++) 1343 xgmac_set_mac_addr(ioaddr, NULL, i); 1344 for (i = 0; i < XGMAC_NUM_HASH; i++) 1345 writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); 1346 1347 writel(value, ioaddr + XGMAC_FRAME_FILTER); 1348 } 1349 1350 /** 1351 * xgmac_change_mtu - entry point to change MTU size for the device. 1352 * @dev : device pointer. 1353 * @new_mtu : the new MTU size for the device. 1354 * Description: the Maximum Transfer Unit (MTU) is used by the network layer 1355 * to drive packet transmission. Ethernet has an MTU of 1500 octets 1356 * (ETH_DATA_LEN). This value can be changed with ifconfig. 1357 * Return value: 1358 * 0 on success and an appropriate (-)ve integer as defined in errno.h 1359 * file on failure. 1360 */ 1361 static int xgmac_change_mtu(struct net_device *dev, int new_mtu) 1362 { 1363 struct xgmac_priv *priv = netdev_priv(dev); 1364 int old_mtu; 1365 1366 if ((new_mtu < 46) || (new_mtu > MAX_MTU)) { 1367 netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU); 1368 return -EINVAL; 1369 } 1370 1371 old_mtu = dev->mtu; 1372 1373 /* return early if the buffer sizes will not change */ 1374 if (old_mtu == new_mtu) 1375 return 0; 1376 1377 /* Stop everything, get ready to change the MTU */ 1378 if (!netif_running(dev)) 1379 return 0; 1380 1381 /* Bring interface down, change mtu and bring interface back up */ 1382 xgmac_stop(dev); 1383 dev->mtu = new_mtu; 1384 return xgmac_open(dev); 1385 } 1386 1387 static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id) 1388 { 1389 u32 intr_status; 1390 struct net_device *dev = (struct net_device *)dev_id; 1391 struct xgmac_priv *priv = netdev_priv(dev); 1392 void __iomem *ioaddr = priv->base; 1393 1394 intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT); 1395 if (intr_status & XGMAC_INT_STAT_PMT) { 1396 netdev_dbg(priv->dev, "received Magic frame\n"); 1397 /* clear the PMT bits 5 and 6 by reading the PMT */ 1398 readl(ioaddr + XGMAC_PMT); 1399 } 1400 return IRQ_HANDLED; 1401 } 1402 1403 static irqreturn_t xgmac_interrupt(int irq, void *dev_id) 1404 { 1405 u32 intr_status; 1406 struct net_device *dev = (struct net_device *)dev_id; 1407 struct xgmac_priv *priv = netdev_priv(dev); 1408 struct xgmac_extra_stats *x = &priv->xstats; 1409 1410 /* read the status register (CSR5) */ 1411 intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS); 1412 intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA); 1413 __raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS); 1414 1415 /* It displays the DMA process states (CSR5 register) */ 1416 /* ABNORMAL interrupts */ 1417 if (unlikely(intr_status & DMA_STATUS_AIS)) { 1418 if (intr_status & DMA_STATUS_TJT) { 1419 netdev_err(priv->dev, "transmit jabber\n"); 1420 x->tx_jabber++; 1421 } 1422 if (intr_status & DMA_STATUS_RU) 1423 x->rx_buf_unav++; 1424 if (intr_status & DMA_STATUS_RPS) { 1425 netdev_err(priv->dev, "receive process stopped\n"); 1426 x->rx_process_stopped++; 1427 } 1428 if (intr_status & DMA_STATUS_ETI) { 1429 netdev_err(priv->dev, "transmit early interrupt\n"); 1430 x->tx_early++; 1431 } 1432 if (intr_status & DMA_STATUS_TPS) { 1433 netdev_err(priv->dev, "transmit process stopped\n"); 1434 x->tx_process_stopped++; 1435 schedule_work(&priv->tx_timeout_work); 1436 } 1437 if (intr_status & DMA_STATUS_FBI) { 1438 netdev_err(priv->dev, "fatal bus error\n"); 1439 x->fatal_bus_error++; 1440 } 1441 } 1442 1443 /* TX/RX NORMAL interrupts */ 1444 if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) { 1445 __raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA); 1446 napi_schedule(&priv->napi); 1447 } 1448 1449 return IRQ_HANDLED; 1450 } 1451 1452 #ifdef CONFIG_NET_POLL_CONTROLLER 1453 /* Polling receive - used by NETCONSOLE and other diagnostic tools 1454 * to allow network I/O with interrupts disabled. */ 1455 static void xgmac_poll_controller(struct net_device *dev) 1456 { 1457 disable_irq(dev->irq); 1458 xgmac_interrupt(dev->irq, dev); 1459 enable_irq(dev->irq); 1460 } 1461 #endif 1462 1463 static struct rtnl_link_stats64 * 1464 xgmac_get_stats64(struct net_device *dev, 1465 struct rtnl_link_stats64 *storage) 1466 { 1467 struct xgmac_priv *priv = netdev_priv(dev); 1468 void __iomem *base = priv->base; 1469 u32 count; 1470 1471 spin_lock_bh(&priv->stats_lock); 1472 writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL); 1473 1474 storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO); 1475 storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32; 1476 1477 storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO); 1478 storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G); 1479 storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR); 1480 storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR); 1481 storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW); 1482 1483 storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO); 1484 storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32; 1485 1486 count = readl(base + XGMAC_MMC_TXFRAME_GB_LO); 1487 storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO); 1488 storage->tx_packets = count; 1489 storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW); 1490 1491 writel(0, base + XGMAC_MMC_CTRL); 1492 spin_unlock_bh(&priv->stats_lock); 1493 return storage; 1494 } 1495 1496 static int xgmac_set_mac_address(struct net_device *dev, void *p) 1497 { 1498 struct xgmac_priv *priv = netdev_priv(dev); 1499 void __iomem *ioaddr = priv->base; 1500 struct sockaddr *addr = p; 1501 1502 if (!is_valid_ether_addr(addr->sa_data)) 1503 return -EADDRNOTAVAIL; 1504 1505 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1506 1507 xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); 1508 1509 return 0; 1510 } 1511 1512 static int xgmac_set_features(struct net_device *dev, netdev_features_t features) 1513 { 1514 u32 ctrl; 1515 struct xgmac_priv *priv = netdev_priv(dev); 1516 void __iomem *ioaddr = priv->base; 1517 netdev_features_t changed = dev->features ^ features; 1518 1519 if (!(changed & NETIF_F_RXCSUM)) 1520 return 0; 1521 1522 ctrl = readl(ioaddr + XGMAC_CONTROL); 1523 if (features & NETIF_F_RXCSUM) 1524 ctrl |= XGMAC_CONTROL_IPC; 1525 else 1526 ctrl &= ~XGMAC_CONTROL_IPC; 1527 writel(ctrl, ioaddr + XGMAC_CONTROL); 1528 1529 return 0; 1530 } 1531 1532 static const struct net_device_ops xgmac_netdev_ops = { 1533 .ndo_open = xgmac_open, 1534 .ndo_start_xmit = xgmac_xmit, 1535 .ndo_stop = xgmac_stop, 1536 .ndo_change_mtu = xgmac_change_mtu, 1537 .ndo_set_rx_mode = xgmac_set_rx_mode, 1538 .ndo_tx_timeout = xgmac_tx_timeout, 1539 .ndo_get_stats64 = xgmac_get_stats64, 1540 #ifdef CONFIG_NET_POLL_CONTROLLER 1541 .ndo_poll_controller = xgmac_poll_controller, 1542 #endif 1543 .ndo_set_mac_address = xgmac_set_mac_address, 1544 .ndo_set_features = xgmac_set_features, 1545 }; 1546 1547 static int xgmac_ethtool_getsettings(struct net_device *dev, 1548 struct ethtool_cmd *cmd) 1549 { 1550 cmd->autoneg = 0; 1551 cmd->duplex = DUPLEX_FULL; 1552 ethtool_cmd_speed_set(cmd, 10000); 1553 cmd->supported = 0; 1554 cmd->advertising = 0; 1555 cmd->transceiver = XCVR_INTERNAL; 1556 return 0; 1557 } 1558 1559 static void xgmac_get_pauseparam(struct net_device *netdev, 1560 struct ethtool_pauseparam *pause) 1561 { 1562 struct xgmac_priv *priv = netdev_priv(netdev); 1563 1564 pause->rx_pause = priv->rx_pause; 1565 pause->tx_pause = priv->tx_pause; 1566 } 1567 1568 static int xgmac_set_pauseparam(struct net_device *netdev, 1569 struct ethtool_pauseparam *pause) 1570 { 1571 struct xgmac_priv *priv = netdev_priv(netdev); 1572 1573 if (pause->autoneg) 1574 return -EINVAL; 1575 1576 return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause); 1577 } 1578 1579 struct xgmac_stats { 1580 char stat_string[ETH_GSTRING_LEN]; 1581 int stat_offset; 1582 bool is_reg; 1583 }; 1584 1585 #define XGMAC_STAT(m) \ 1586 { #m, offsetof(struct xgmac_priv, xstats.m), false } 1587 #define XGMAC_HW_STAT(m, reg_offset) \ 1588 { #m, reg_offset, true } 1589 1590 static const struct xgmac_stats xgmac_gstrings_stats[] = { 1591 XGMAC_STAT(tx_frame_flushed), 1592 XGMAC_STAT(tx_payload_error), 1593 XGMAC_STAT(tx_ip_header_error), 1594 XGMAC_STAT(tx_local_fault), 1595 XGMAC_STAT(tx_remote_fault), 1596 XGMAC_STAT(tx_early), 1597 XGMAC_STAT(tx_process_stopped), 1598 XGMAC_STAT(tx_jabber), 1599 XGMAC_STAT(rx_buf_unav), 1600 XGMAC_STAT(rx_process_stopped), 1601 XGMAC_STAT(rx_payload_error), 1602 XGMAC_STAT(rx_ip_header_error), 1603 XGMAC_STAT(rx_da_filter_fail), 1604 XGMAC_STAT(fatal_bus_error), 1605 XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), 1606 XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), 1607 XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME), 1608 XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME), 1609 XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME), 1610 }; 1611 #define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats) 1612 1613 static void xgmac_get_ethtool_stats(struct net_device *dev, 1614 struct ethtool_stats *dummy, 1615 u64 *data) 1616 { 1617 struct xgmac_priv *priv = netdev_priv(dev); 1618 void *p = priv; 1619 int i; 1620 1621 for (i = 0; i < XGMAC_STATS_LEN; i++) { 1622 if (xgmac_gstrings_stats[i].is_reg) 1623 *data++ = readl(priv->base + 1624 xgmac_gstrings_stats[i].stat_offset); 1625 else 1626 *data++ = *(u32 *)(p + 1627 xgmac_gstrings_stats[i].stat_offset); 1628 } 1629 } 1630 1631 static int xgmac_get_sset_count(struct net_device *netdev, int sset) 1632 { 1633 switch (sset) { 1634 case ETH_SS_STATS: 1635 return XGMAC_STATS_LEN; 1636 default: 1637 return -EINVAL; 1638 } 1639 } 1640 1641 static void xgmac_get_strings(struct net_device *dev, u32 stringset, 1642 u8 *data) 1643 { 1644 int i; 1645 u8 *p = data; 1646 1647 switch (stringset) { 1648 case ETH_SS_STATS: 1649 for (i = 0; i < XGMAC_STATS_LEN; i++) { 1650 memcpy(p, xgmac_gstrings_stats[i].stat_string, 1651 ETH_GSTRING_LEN); 1652 p += ETH_GSTRING_LEN; 1653 } 1654 break; 1655 default: 1656 WARN_ON(1); 1657 break; 1658 } 1659 } 1660 1661 static void xgmac_get_wol(struct net_device *dev, 1662 struct ethtool_wolinfo *wol) 1663 { 1664 struct xgmac_priv *priv = netdev_priv(dev); 1665 1666 if (device_can_wakeup(priv->device)) { 1667 wol->supported = WAKE_MAGIC | WAKE_UCAST; 1668 wol->wolopts = priv->wolopts; 1669 } 1670 } 1671 1672 static int xgmac_set_wol(struct net_device *dev, 1673 struct ethtool_wolinfo *wol) 1674 { 1675 struct xgmac_priv *priv = netdev_priv(dev); 1676 u32 support = WAKE_MAGIC | WAKE_UCAST; 1677 1678 if (!device_can_wakeup(priv->device)) 1679 return -ENOTSUPP; 1680 1681 if (wol->wolopts & ~support) 1682 return -EINVAL; 1683 1684 priv->wolopts = wol->wolopts; 1685 1686 if (wol->wolopts) { 1687 device_set_wakeup_enable(priv->device, 1); 1688 enable_irq_wake(dev->irq); 1689 } else { 1690 device_set_wakeup_enable(priv->device, 0); 1691 disable_irq_wake(dev->irq); 1692 } 1693 1694 return 0; 1695 } 1696 1697 static const struct ethtool_ops xgmac_ethtool_ops = { 1698 .get_settings = xgmac_ethtool_getsettings, 1699 .get_link = ethtool_op_get_link, 1700 .get_pauseparam = xgmac_get_pauseparam, 1701 .set_pauseparam = xgmac_set_pauseparam, 1702 .get_ethtool_stats = xgmac_get_ethtool_stats, 1703 .get_strings = xgmac_get_strings, 1704 .get_wol = xgmac_get_wol, 1705 .set_wol = xgmac_set_wol, 1706 .get_sset_count = xgmac_get_sset_count, 1707 }; 1708 1709 /** 1710 * xgmac_probe 1711 * @pdev: platform device pointer 1712 * Description: the driver is initialized through platform_device. 1713 */ 1714 static int xgmac_probe(struct platform_device *pdev) 1715 { 1716 int ret = 0; 1717 struct resource *res; 1718 struct net_device *ndev = NULL; 1719 struct xgmac_priv *priv = NULL; 1720 u32 uid; 1721 1722 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1723 if (!res) 1724 return -ENODEV; 1725 1726 if (!request_mem_region(res->start, resource_size(res), pdev->name)) 1727 return -EBUSY; 1728 1729 ndev = alloc_etherdev(sizeof(struct xgmac_priv)); 1730 if (!ndev) { 1731 ret = -ENOMEM; 1732 goto err_alloc; 1733 } 1734 1735 SET_NETDEV_DEV(ndev, &pdev->dev); 1736 priv = netdev_priv(ndev); 1737 platform_set_drvdata(pdev, ndev); 1738 ndev->netdev_ops = &xgmac_netdev_ops; 1739 ndev->ethtool_ops = &xgmac_ethtool_ops; 1740 spin_lock_init(&priv->stats_lock); 1741 INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work); 1742 1743 priv->device = &pdev->dev; 1744 priv->dev = ndev; 1745 priv->rx_pause = 1; 1746 priv->tx_pause = 1; 1747 1748 priv->base = ioremap(res->start, resource_size(res)); 1749 if (!priv->base) { 1750 netdev_err(ndev, "ioremap failed\n"); 1751 ret = -ENOMEM; 1752 goto err_io; 1753 } 1754 1755 uid = readl(priv->base + XGMAC_VERSION); 1756 netdev_info(ndev, "h/w version is 0x%x\n", uid); 1757 1758 /* Figure out how many valid mac address filter registers we have */ 1759 writel(1, priv->base + XGMAC_ADDR_HIGH(31)); 1760 if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1) 1761 priv->max_macs = 31; 1762 else 1763 priv->max_macs = 7; 1764 1765 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 1766 ndev->irq = platform_get_irq(pdev, 0); 1767 if (ndev->irq == -ENXIO) { 1768 netdev_err(ndev, "No irq resource\n"); 1769 ret = ndev->irq; 1770 goto err_irq; 1771 } 1772 1773 ret = request_irq(ndev->irq, xgmac_interrupt, 0, 1774 dev_name(&pdev->dev), ndev); 1775 if (ret < 0) { 1776 netdev_err(ndev, "Could not request irq %d - ret %d)\n", 1777 ndev->irq, ret); 1778 goto err_irq; 1779 } 1780 1781 priv->pmt_irq = platform_get_irq(pdev, 1); 1782 if (priv->pmt_irq == -ENXIO) { 1783 netdev_err(ndev, "No pmt irq resource\n"); 1784 ret = priv->pmt_irq; 1785 goto err_pmt_irq; 1786 } 1787 1788 ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0, 1789 dev_name(&pdev->dev), ndev); 1790 if (ret < 0) { 1791 netdev_err(ndev, "Could not request irq %d - ret %d)\n", 1792 priv->pmt_irq, ret); 1793 goto err_pmt_irq; 1794 } 1795 1796 device_set_wakeup_capable(&pdev->dev, 1); 1797 if (device_can_wakeup(priv->device)) 1798 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ 1799 1800 ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA; 1801 if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) 1802 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1803 NETIF_F_RXCSUM; 1804 ndev->features |= ndev->hw_features; 1805 ndev->priv_flags |= IFF_UNICAST_FLT; 1806 1807 /* Get the MAC address */ 1808 xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0); 1809 if (!is_valid_ether_addr(ndev->dev_addr)) 1810 netdev_warn(ndev, "MAC address %pM not valid", 1811 ndev->dev_addr); 1812 1813 netif_napi_add(ndev, &priv->napi, xgmac_poll, 64); 1814 ret = register_netdev(ndev); 1815 if (ret) 1816 goto err_reg; 1817 1818 return 0; 1819 1820 err_reg: 1821 netif_napi_del(&priv->napi); 1822 free_irq(priv->pmt_irq, ndev); 1823 err_pmt_irq: 1824 free_irq(ndev->irq, ndev); 1825 err_irq: 1826 iounmap(priv->base); 1827 err_io: 1828 free_netdev(ndev); 1829 err_alloc: 1830 release_mem_region(res->start, resource_size(res)); 1831 return ret; 1832 } 1833 1834 /** 1835 * xgmac_dvr_remove 1836 * @pdev: platform device pointer 1837 * Description: this function resets the TX/RX processes, disables the MAC RX/TX 1838 * changes the link status, releases the DMA descriptor rings, 1839 * unregisters the MDIO bus and unmaps the allocated memory. 1840 */ 1841 static int xgmac_remove(struct platform_device *pdev) 1842 { 1843 struct net_device *ndev = platform_get_drvdata(pdev); 1844 struct xgmac_priv *priv = netdev_priv(ndev); 1845 struct resource *res; 1846 1847 xgmac_mac_disable(priv->base); 1848 1849 /* Free the IRQ lines */ 1850 free_irq(ndev->irq, ndev); 1851 free_irq(priv->pmt_irq, ndev); 1852 1853 unregister_netdev(ndev); 1854 netif_napi_del(&priv->napi); 1855 1856 iounmap(priv->base); 1857 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1858 release_mem_region(res->start, resource_size(res)); 1859 1860 free_netdev(ndev); 1861 1862 return 0; 1863 } 1864 1865 #ifdef CONFIG_PM_SLEEP 1866 static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode) 1867 { 1868 unsigned int pmt = 0; 1869 1870 if (mode & WAKE_MAGIC) 1871 pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN; 1872 if (mode & WAKE_UCAST) 1873 pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; 1874 1875 writel(pmt, ioaddr + XGMAC_PMT); 1876 } 1877 1878 static int xgmac_suspend(struct device *dev) 1879 { 1880 struct net_device *ndev = platform_get_drvdata(to_platform_device(dev)); 1881 struct xgmac_priv *priv = netdev_priv(ndev); 1882 u32 value; 1883 1884 if (!ndev || !netif_running(ndev)) 1885 return 0; 1886 1887 netif_device_detach(ndev); 1888 napi_disable(&priv->napi); 1889 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 1890 1891 if (device_may_wakeup(priv->device)) { 1892 /* Stop TX/RX DMA Only */ 1893 value = readl(priv->base + XGMAC_DMA_CONTROL); 1894 value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR); 1895 writel(value, priv->base + XGMAC_DMA_CONTROL); 1896 1897 xgmac_pmt(priv->base, priv->wolopts); 1898 } else 1899 xgmac_mac_disable(priv->base); 1900 1901 return 0; 1902 } 1903 1904 static int xgmac_resume(struct device *dev) 1905 { 1906 struct net_device *ndev = platform_get_drvdata(to_platform_device(dev)); 1907 struct xgmac_priv *priv = netdev_priv(ndev); 1908 void __iomem *ioaddr = priv->base; 1909 1910 if (!netif_running(ndev)) 1911 return 0; 1912 1913 xgmac_pmt(ioaddr, 0); 1914 1915 /* Enable the MAC and DMA */ 1916 xgmac_mac_enable(ioaddr); 1917 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); 1918 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); 1919 1920 netif_device_attach(ndev); 1921 napi_enable(&priv->napi); 1922 1923 return 0; 1924 } 1925 #endif /* CONFIG_PM_SLEEP */ 1926 1927 static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume); 1928 1929 static const struct of_device_id xgmac_of_match[] = { 1930 { .compatible = "calxeda,hb-xgmac", }, 1931 {}, 1932 }; 1933 MODULE_DEVICE_TABLE(of, xgmac_of_match); 1934 1935 static struct platform_driver xgmac_driver = { 1936 .driver = { 1937 .name = "calxedaxgmac", 1938 .of_match_table = xgmac_of_match, 1939 }, 1940 .probe = xgmac_probe, 1941 .remove = xgmac_remove, 1942 .driver.pm = &xgmac_pm_ops, 1943 }; 1944 1945 module_platform_driver(xgmac_driver); 1946 1947 MODULE_AUTHOR("Calxeda, Inc."); 1948 MODULE_DESCRIPTION("Calxeda 10G XGMAC driver"); 1949 MODULE_LICENSE("GPL v2"); 1950