1 /*
2  * Copyright 2010-2011 Calxeda, Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/kernel.h>
19 #include <linux/circ_buf.h>
20 #include <linux/interrupt.h>
21 #include <linux/etherdevice.h>
22 #include <linux/platform_device.h>
23 #include <linux/skbuff.h>
24 #include <linux/ethtool.h>
25 #include <linux/if.h>
26 #include <linux/crc32.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/slab.h>
29 
30 /* XGMAC Register definitions */
31 #define XGMAC_CONTROL		0x00000000	/* MAC Configuration */
32 #define XGMAC_FRAME_FILTER	0x00000004	/* MAC Frame Filter */
33 #define XGMAC_FLOW_CTRL		0x00000018	/* MAC Flow Control */
34 #define XGMAC_VLAN_TAG		0x0000001C	/* VLAN Tags */
35 #define XGMAC_VERSION		0x00000020	/* Version */
36 #define XGMAC_VLAN_INCL		0x00000024	/* VLAN tag for tx frames */
37 #define XGMAC_LPI_CTRL		0x00000028	/* LPI Control and Status */
38 #define XGMAC_LPI_TIMER		0x0000002C	/* LPI Timers Control */
39 #define XGMAC_TX_PACE		0x00000030	/* Transmit Pace and Stretch */
40 #define XGMAC_VLAN_HASH		0x00000034	/* VLAN Hash Table */
41 #define XGMAC_DEBUG		0x00000038	/* Debug */
42 #define XGMAC_INT_STAT		0x0000003C	/* Interrupt and Control */
43 #define XGMAC_ADDR_HIGH(reg)	(0x00000040 + ((reg) * 8))
44 #define XGMAC_ADDR_LOW(reg)	(0x00000044 + ((reg) * 8))
45 #define XGMAC_HASH(n)		(0x00000300 + (n) * 4) /* HASH table regs */
46 #define XGMAC_NUM_HASH		16
47 #define XGMAC_OMR		0x00000400
48 #define XGMAC_REMOTE_WAKE	0x00000700	/* Remote Wake-Up Frm Filter */
49 #define XGMAC_PMT		0x00000704	/* PMT Control and Status */
50 #define XGMAC_MMC_CTRL		0x00000800	/* XGMAC MMC Control */
51 #define XGMAC_MMC_INTR_RX	0x00000804	/* Recieve Interrupt */
52 #define XGMAC_MMC_INTR_TX	0x00000808	/* Transmit Interrupt */
53 #define XGMAC_MMC_INTR_MASK_RX	0x0000080c	/* Recieve Interrupt Mask */
54 #define XGMAC_MMC_INTR_MASK_TX	0x00000810	/* Transmit Interrupt Mask */
55 
56 /* Hardware TX Statistics Counters */
57 #define XGMAC_MMC_TXOCTET_GB_LO	0x00000814
58 #define XGMAC_MMC_TXOCTET_GB_HI	0x00000818
59 #define XGMAC_MMC_TXFRAME_GB_LO	0x0000081C
60 #define XGMAC_MMC_TXFRAME_GB_HI	0x00000820
61 #define XGMAC_MMC_TXBCFRAME_G	0x00000824
62 #define XGMAC_MMC_TXMCFRAME_G	0x0000082C
63 #define XGMAC_MMC_TXUCFRAME_GB	0x00000864
64 #define XGMAC_MMC_TXMCFRAME_GB	0x0000086C
65 #define XGMAC_MMC_TXBCFRAME_GB	0x00000874
66 #define XGMAC_MMC_TXUNDERFLOW	0x0000087C
67 #define XGMAC_MMC_TXOCTET_G_LO	0x00000884
68 #define XGMAC_MMC_TXOCTET_G_HI	0x00000888
69 #define XGMAC_MMC_TXFRAME_G_LO	0x0000088C
70 #define XGMAC_MMC_TXFRAME_G_HI	0x00000890
71 #define XGMAC_MMC_TXPAUSEFRAME	0x00000894
72 #define XGMAC_MMC_TXVLANFRAME	0x0000089C
73 
74 /* Hardware RX Statistics Counters */
75 #define XGMAC_MMC_RXFRAME_GB_LO	0x00000900
76 #define XGMAC_MMC_RXFRAME_GB_HI	0x00000904
77 #define XGMAC_MMC_RXOCTET_GB_LO	0x00000908
78 #define XGMAC_MMC_RXOCTET_GB_HI	0x0000090C
79 #define XGMAC_MMC_RXOCTET_G_LO	0x00000910
80 #define XGMAC_MMC_RXOCTET_G_HI	0x00000914
81 #define XGMAC_MMC_RXBCFRAME_G	0x00000918
82 #define XGMAC_MMC_RXMCFRAME_G	0x00000920
83 #define XGMAC_MMC_RXCRCERR	0x00000928
84 #define XGMAC_MMC_RXRUNT	0x00000930
85 #define XGMAC_MMC_RXJABBER	0x00000934
86 #define XGMAC_MMC_RXUCFRAME_G	0x00000970
87 #define XGMAC_MMC_RXLENGTHERR	0x00000978
88 #define XGMAC_MMC_RXPAUSEFRAME	0x00000988
89 #define XGMAC_MMC_RXOVERFLOW	0x00000990
90 #define XGMAC_MMC_RXVLANFRAME	0x00000998
91 #define XGMAC_MMC_RXWATCHDOG	0x000009a0
92 
93 /* DMA Control and Status Registers */
94 #define XGMAC_DMA_BUS_MODE	0x00000f00	/* Bus Mode */
95 #define XGMAC_DMA_TX_POLL	0x00000f04	/* Transmit Poll Demand */
96 #define XGMAC_DMA_RX_POLL	0x00000f08	/* Received Poll Demand */
97 #define XGMAC_DMA_RX_BASE_ADDR	0x00000f0c	/* Receive List Base */
98 #define XGMAC_DMA_TX_BASE_ADDR	0x00000f10	/* Transmit List Base */
99 #define XGMAC_DMA_STATUS	0x00000f14	/* Status Register */
100 #define XGMAC_DMA_CONTROL	0x00000f18	/* Ctrl (Operational Mode) */
101 #define XGMAC_DMA_INTR_ENA	0x00000f1c	/* Interrupt Enable */
102 #define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20	/* Missed Frame Counter */
103 #define XGMAC_DMA_RI_WDOG_TIMER	0x00000f24	/* RX Intr Watchdog Timer */
104 #define XGMAC_DMA_AXI_BUS	0x00000f28	/* AXI Bus Mode */
105 #define XGMAC_DMA_AXI_STATUS	0x00000f2C	/* AXI Status */
106 #define XGMAC_DMA_HW_FEATURE	0x00000f58	/* Enabled Hardware Features */
107 
108 #define XGMAC_ADDR_AE		0x80000000
109 #define XGMAC_MAX_FILTER_ADDR	31
110 
111 /* PMT Control and Status */
112 #define XGMAC_PMT_POINTER_RESET	0x80000000
113 #define XGMAC_PMT_GLBL_UNICAST	0x00000200
114 #define XGMAC_PMT_WAKEUP_RX_FRM	0x00000040
115 #define XGMAC_PMT_MAGIC_PKT	0x00000020
116 #define XGMAC_PMT_WAKEUP_FRM_EN	0x00000004
117 #define XGMAC_PMT_MAGIC_PKT_EN	0x00000002
118 #define XGMAC_PMT_POWERDOWN	0x00000001
119 
120 #define XGMAC_CONTROL_SPD	0x40000000	/* Speed control */
121 #define XGMAC_CONTROL_SPD_MASK	0x60000000
122 #define XGMAC_CONTROL_SPD_1G	0x60000000
123 #define XGMAC_CONTROL_SPD_2_5G	0x40000000
124 #define XGMAC_CONTROL_SPD_10G	0x00000000
125 #define XGMAC_CONTROL_SARC	0x10000000	/* Source Addr Insert/Replace */
126 #define XGMAC_CONTROL_SARK_MASK	0x18000000
127 #define XGMAC_CONTROL_CAR	0x04000000	/* CRC Addition/Replacement */
128 #define XGMAC_CONTROL_CAR_MASK	0x06000000
129 #define XGMAC_CONTROL_DP	0x01000000	/* Disable Padding */
130 #define XGMAC_CONTROL_WD	0x00800000	/* Disable Watchdog on rx */
131 #define XGMAC_CONTROL_JD	0x00400000	/* Jabber disable */
132 #define XGMAC_CONTROL_JE	0x00100000	/* Jumbo frame */
133 #define XGMAC_CONTROL_LM	0x00001000	/* Loop-back mode */
134 #define XGMAC_CONTROL_IPC	0x00000400	/* Checksum Offload */
135 #define XGMAC_CONTROL_ACS	0x00000080	/* Automatic Pad/FCS Strip */
136 #define XGMAC_CONTROL_DDIC	0x00000010	/* Disable Deficit Idle Count */
137 #define XGMAC_CONTROL_TE	0x00000008	/* Transmitter Enable */
138 #define XGMAC_CONTROL_RE	0x00000004	/* Receiver Enable */
139 
140 /* XGMAC Frame Filter defines */
141 #define XGMAC_FRAME_FILTER_PR	0x00000001	/* Promiscuous Mode */
142 #define XGMAC_FRAME_FILTER_HUC	0x00000002	/* Hash Unicast */
143 #define XGMAC_FRAME_FILTER_HMC	0x00000004	/* Hash Multicast */
144 #define XGMAC_FRAME_FILTER_DAIF	0x00000008	/* DA Inverse Filtering */
145 #define XGMAC_FRAME_FILTER_PM	0x00000010	/* Pass all multicast */
146 #define XGMAC_FRAME_FILTER_DBF	0x00000020	/* Disable Broadcast frames */
147 #define XGMAC_FRAME_FILTER_SAIF	0x00000100	/* Inverse Filtering */
148 #define XGMAC_FRAME_FILTER_SAF	0x00000200	/* Source Address Filter */
149 #define XGMAC_FRAME_FILTER_HPF	0x00000400	/* Hash or perfect Filter */
150 #define XGMAC_FRAME_FILTER_VHF	0x00000800	/* VLAN Hash Filter */
151 #define XGMAC_FRAME_FILTER_VPF	0x00001000	/* VLAN Perfect Filter */
152 #define XGMAC_FRAME_FILTER_RA	0x80000000	/* Receive all mode */
153 
154 /* XGMAC FLOW CTRL defines */
155 #define XGMAC_FLOW_CTRL_PT_MASK	0xffff0000	/* Pause Time Mask */
156 #define XGMAC_FLOW_CTRL_PT_SHIFT	16
157 #define XGMAC_FLOW_CTRL_DZQP	0x00000080	/* Disable Zero-Quanta Phase */
158 #define XGMAC_FLOW_CTRL_PLT	0x00000020	/* Pause Low Threshhold */
159 #define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030	/* PLT MASK */
160 #define XGMAC_FLOW_CTRL_UP	0x00000008	/* Unicast Pause Frame Detect */
161 #define XGMAC_FLOW_CTRL_RFE	0x00000004	/* Rx Flow Control Enable */
162 #define XGMAC_FLOW_CTRL_TFE	0x00000002	/* Tx Flow Control Enable */
163 #define XGMAC_FLOW_CTRL_FCB_BPA	0x00000001	/* Flow Control Busy ... */
164 
165 /* XGMAC_INT_STAT reg */
166 #define XGMAC_INT_STAT_PMT	0x0080		/* PMT Interrupt Status */
167 #define XGMAC_INT_STAT_LPI	0x0040		/* LPI Interrupt Status */
168 
169 /* DMA Bus Mode register defines */
170 #define DMA_BUS_MODE_SFT_RESET	0x00000001	/* Software Reset */
171 #define DMA_BUS_MODE_DSL_MASK	0x0000007c	/* Descriptor Skip Length */
172 #define DMA_BUS_MODE_DSL_SHIFT	2		/* (in DWORDS) */
173 #define DMA_BUS_MODE_ATDS	0x00000080	/* Alternate Descriptor Size */
174 
175 /* Programmable burst length */
176 #define DMA_BUS_MODE_PBL_MASK	0x00003f00	/* Programmable Burst Len */
177 #define DMA_BUS_MODE_PBL_SHIFT	8
178 #define DMA_BUS_MODE_FB		0x00010000	/* Fixed burst */
179 #define DMA_BUS_MODE_RPBL_MASK	0x003e0000	/* Rx-Programmable Burst Len */
180 #define DMA_BUS_MODE_RPBL_SHIFT	17
181 #define DMA_BUS_MODE_USP	0x00800000
182 #define DMA_BUS_MODE_8PBL	0x01000000
183 #define DMA_BUS_MODE_AAL	0x02000000
184 
185 /* DMA Bus Mode register defines */
186 #define DMA_BUS_PR_RATIO_MASK	0x0000c000	/* Rx/Tx priority ratio */
187 #define DMA_BUS_PR_RATIO_SHIFT	14
188 #define DMA_BUS_FB		0x00010000	/* Fixed Burst */
189 
190 /* DMA Control register defines */
191 #define DMA_CONTROL_ST		0x00002000	/* Start/Stop Transmission */
192 #define DMA_CONTROL_SR		0x00000002	/* Start/Stop Receive */
193 #define DMA_CONTROL_DFF		0x01000000	/* Disable flush of rx frames */
194 #define DMA_CONTROL_OSF		0x00000004	/* Operate on 2nd tx frame */
195 
196 /* DMA Normal interrupt */
197 #define DMA_INTR_ENA_NIE	0x00010000	/* Normal Summary */
198 #define DMA_INTR_ENA_AIE	0x00008000	/* Abnormal Summary */
199 #define DMA_INTR_ENA_ERE	0x00004000	/* Early Receive */
200 #define DMA_INTR_ENA_FBE	0x00002000	/* Fatal Bus Error */
201 #define DMA_INTR_ENA_ETE	0x00000400	/* Early Transmit */
202 #define DMA_INTR_ENA_RWE	0x00000200	/* Receive Watchdog */
203 #define DMA_INTR_ENA_RSE	0x00000100	/* Receive Stopped */
204 #define DMA_INTR_ENA_RUE	0x00000080	/* Receive Buffer Unavailable */
205 #define DMA_INTR_ENA_RIE	0x00000040	/* Receive Interrupt */
206 #define DMA_INTR_ENA_UNE	0x00000020	/* Tx Underflow */
207 #define DMA_INTR_ENA_OVE	0x00000010	/* Receive Overflow */
208 #define DMA_INTR_ENA_TJE	0x00000008	/* Transmit Jabber */
209 #define DMA_INTR_ENA_TUE	0x00000004	/* Transmit Buffer Unavail */
210 #define DMA_INTR_ENA_TSE	0x00000002	/* Transmit Stopped */
211 #define DMA_INTR_ENA_TIE	0x00000001	/* Transmit Interrupt */
212 
213 #define DMA_INTR_NORMAL		(DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
214 				 DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE)
215 
216 #define DMA_INTR_ABNORMAL	(DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
217 				 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
218 				 DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \
219 				 DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \
220 				 DMA_INTR_ENA_TSE)
221 
222 /* DMA default interrupt mask */
223 #define DMA_INTR_DEFAULT_MASK	(DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
224 
225 /* DMA Status register defines */
226 #define DMA_STATUS_GMI		0x08000000	/* MMC interrupt */
227 #define DMA_STATUS_GLI		0x04000000	/* GMAC Line interface int */
228 #define DMA_STATUS_EB_MASK	0x00380000	/* Error Bits Mask */
229 #define DMA_STATUS_EB_TX_ABORT	0x00080000	/* Error Bits - TX Abort */
230 #define DMA_STATUS_EB_RX_ABORT	0x00100000	/* Error Bits - RX Abort */
231 #define DMA_STATUS_TS_MASK	0x00700000	/* Transmit Process State */
232 #define DMA_STATUS_TS_SHIFT	20
233 #define DMA_STATUS_RS_MASK	0x000e0000	/* Receive Process State */
234 #define DMA_STATUS_RS_SHIFT	17
235 #define DMA_STATUS_NIS		0x00010000	/* Normal Interrupt Summary */
236 #define DMA_STATUS_AIS		0x00008000	/* Abnormal Interrupt Summary */
237 #define DMA_STATUS_ERI		0x00004000	/* Early Receive Interrupt */
238 #define DMA_STATUS_FBI		0x00002000	/* Fatal Bus Error Interrupt */
239 #define DMA_STATUS_ETI		0x00000400	/* Early Transmit Interrupt */
240 #define DMA_STATUS_RWT		0x00000200	/* Receive Watchdog Timeout */
241 #define DMA_STATUS_RPS		0x00000100	/* Receive Process Stopped */
242 #define DMA_STATUS_RU		0x00000080	/* Receive Buffer Unavailable */
243 #define DMA_STATUS_RI		0x00000040	/* Receive Interrupt */
244 #define DMA_STATUS_UNF		0x00000020	/* Transmit Underflow */
245 #define DMA_STATUS_OVF		0x00000010	/* Receive Overflow */
246 #define DMA_STATUS_TJT		0x00000008	/* Transmit Jabber Timeout */
247 #define DMA_STATUS_TU		0x00000004	/* Transmit Buffer Unavail */
248 #define DMA_STATUS_TPS		0x00000002	/* Transmit Process Stopped */
249 #define DMA_STATUS_TI		0x00000001	/* Transmit Interrupt */
250 
251 /* Common MAC defines */
252 #define MAC_ENABLE_TX		0x00000008	/* Transmitter Enable */
253 #define MAC_ENABLE_RX		0x00000004	/* Receiver Enable */
254 
255 /* XGMAC Operation Mode Register */
256 #define XGMAC_OMR_TSF		0x00200000	/* TX FIFO Store and Forward */
257 #define XGMAC_OMR_FTF		0x00100000	/* Flush Transmit FIFO */
258 #define XGMAC_OMR_TTC		0x00020000	/* Transmit Threshhold Ctrl */
259 #define XGMAC_OMR_TTC_MASK	0x00030000
260 #define XGMAC_OMR_RFD		0x00006000	/* FC Deactivation Threshhold */
261 #define XGMAC_OMR_RFD_MASK	0x00007000	/* FC Deact Threshhold MASK */
262 #define XGMAC_OMR_RFA		0x00000600	/* FC Activation Threshhold */
263 #define XGMAC_OMR_RFA_MASK	0x00000E00	/* FC Act Threshhold MASK */
264 #define XGMAC_OMR_EFC		0x00000100	/* Enable Hardware FC */
265 #define XGMAC_OMR_FEF		0x00000080	/* Forward Error Frames */
266 #define XGMAC_OMR_DT		0x00000040	/* Drop TCP/IP csum Errors */
267 #define XGMAC_OMR_RSF		0x00000020	/* RX FIFO Store and Forward */
268 #define XGMAC_OMR_RTC_256	0x00000018	/* RX Threshhold Ctrl */
269 #define XGMAC_OMR_RTC_MASK	0x00000018	/* RX Threshhold Ctrl MASK */
270 
271 /* XGMAC HW Features Register */
272 #define DMA_HW_FEAT_TXCOESEL	0x00010000	/* TX Checksum offload */
273 
274 #define XGMAC_MMC_CTRL_CNT_FRZ	0x00000008
275 
276 /* XGMAC Descriptor Defines */
277 #define MAX_DESC_BUF_SZ		(0x2000 - 8)
278 
279 #define RXDESC_EXT_STATUS	0x00000001
280 #define RXDESC_CRC_ERR		0x00000002
281 #define RXDESC_RX_ERR		0x00000008
282 #define RXDESC_RX_WDOG		0x00000010
283 #define RXDESC_FRAME_TYPE	0x00000020
284 #define RXDESC_GIANT_FRAME	0x00000080
285 #define RXDESC_LAST_SEG		0x00000100
286 #define RXDESC_FIRST_SEG	0x00000200
287 #define RXDESC_VLAN_FRAME	0x00000400
288 #define RXDESC_OVERFLOW_ERR	0x00000800
289 #define RXDESC_LENGTH_ERR	0x00001000
290 #define RXDESC_SA_FILTER_FAIL	0x00002000
291 #define RXDESC_DESCRIPTOR_ERR	0x00004000
292 #define RXDESC_ERROR_SUMMARY	0x00008000
293 #define RXDESC_FRAME_LEN_OFFSET	16
294 #define RXDESC_FRAME_LEN_MASK	0x3fff0000
295 #define RXDESC_DA_FILTER_FAIL	0x40000000
296 
297 #define RXDESC1_END_RING	0x00008000
298 
299 #define RXDESC_IP_PAYLOAD_MASK	0x00000003
300 #define RXDESC_IP_PAYLOAD_UDP	0x00000001
301 #define RXDESC_IP_PAYLOAD_TCP	0x00000002
302 #define RXDESC_IP_PAYLOAD_ICMP	0x00000003
303 #define RXDESC_IP_HEADER_ERR	0x00000008
304 #define RXDESC_IP_PAYLOAD_ERR	0x00000010
305 #define RXDESC_IPV4_PACKET	0x00000040
306 #define RXDESC_IPV6_PACKET	0x00000080
307 #define TXDESC_UNDERFLOW_ERR	0x00000001
308 #define TXDESC_JABBER_TIMEOUT	0x00000002
309 #define TXDESC_LOCAL_FAULT	0x00000004
310 #define TXDESC_REMOTE_FAULT	0x00000008
311 #define TXDESC_VLAN_FRAME	0x00000010
312 #define TXDESC_FRAME_FLUSHED	0x00000020
313 #define TXDESC_IP_HEADER_ERR	0x00000040
314 #define TXDESC_PAYLOAD_CSUM_ERR	0x00000080
315 #define TXDESC_ERROR_SUMMARY	0x00008000
316 #define TXDESC_SA_CTRL_INSERT	0x00040000
317 #define TXDESC_SA_CTRL_REPLACE	0x00080000
318 #define TXDESC_2ND_ADDR_CHAINED	0x00100000
319 #define TXDESC_END_RING		0x00200000
320 #define TXDESC_CSUM_IP		0x00400000
321 #define TXDESC_CSUM_IP_PAYLD	0x00800000
322 #define TXDESC_CSUM_ALL		0x00C00000
323 #define TXDESC_CRC_EN_REPLACE	0x01000000
324 #define TXDESC_CRC_EN_APPEND	0x02000000
325 #define TXDESC_DISABLE_PAD	0x04000000
326 #define TXDESC_FIRST_SEG	0x10000000
327 #define TXDESC_LAST_SEG		0x20000000
328 #define TXDESC_INTERRUPT	0x40000000
329 
330 #define DESC_OWN		0x80000000
331 #define DESC_BUFFER1_SZ_MASK	0x00001fff
332 #define DESC_BUFFER2_SZ_MASK	0x1fff0000
333 #define DESC_BUFFER2_SZ_OFFSET	16
334 
335 struct xgmac_dma_desc {
336 	__le32 flags;
337 	__le32 buf_size;
338 	__le32 buf1_addr;		/* Buffer 1 Address Pointer */
339 	__le32 buf2_addr;		/* Buffer 2 Address Pointer */
340 	__le32 ext_status;
341 	__le32 res[3];
342 };
343 
344 struct xgmac_extra_stats {
345 	/* Transmit errors */
346 	unsigned long tx_jabber;
347 	unsigned long tx_frame_flushed;
348 	unsigned long tx_payload_error;
349 	unsigned long tx_ip_header_error;
350 	unsigned long tx_local_fault;
351 	unsigned long tx_remote_fault;
352 	/* Receive errors */
353 	unsigned long rx_watchdog;
354 	unsigned long rx_da_filter_fail;
355 	unsigned long rx_sa_filter_fail;
356 	unsigned long rx_payload_error;
357 	unsigned long rx_ip_header_error;
358 	/* Tx/Rx IRQ errors */
359 	unsigned long tx_undeflow;
360 	unsigned long tx_process_stopped;
361 	unsigned long rx_buf_unav;
362 	unsigned long rx_process_stopped;
363 	unsigned long tx_early;
364 	unsigned long fatal_bus_error;
365 };
366 
367 struct xgmac_priv {
368 	struct xgmac_dma_desc *dma_rx;
369 	struct sk_buff **rx_skbuff;
370 	unsigned int rx_tail;
371 	unsigned int rx_head;
372 
373 	struct xgmac_dma_desc *dma_tx;
374 	struct sk_buff **tx_skbuff;
375 	unsigned int tx_head;
376 	unsigned int tx_tail;
377 	int tx_irq_cnt;
378 
379 	void __iomem *base;
380 	unsigned int dma_buf_sz;
381 	dma_addr_t dma_rx_phy;
382 	dma_addr_t dma_tx_phy;
383 
384 	struct net_device *dev;
385 	struct device *device;
386 	struct napi_struct napi;
387 
388 	struct xgmac_extra_stats xstats;
389 
390 	spinlock_t stats_lock;
391 	int pmt_irq;
392 	char rx_pause;
393 	char tx_pause;
394 	int wolopts;
395 };
396 
397 /* XGMAC Configuration Settings */
398 #define MAX_MTU			9000
399 #define PAUSE_TIME		0x400
400 
401 #define DMA_RX_RING_SZ		256
402 #define DMA_TX_RING_SZ		128
403 /* minimum number of free TX descriptors required to wake up TX process */
404 #define TX_THRESH		(DMA_TX_RING_SZ/4)
405 
406 /* DMA descriptor ring helpers */
407 #define dma_ring_incr(n, s)	(((n) + 1) & ((s) - 1))
408 #define dma_ring_space(h, t, s)	CIRC_SPACE(h, t, s)
409 #define dma_ring_cnt(h, t, s)	CIRC_CNT(h, t, s)
410 
411 /* XGMAC Descriptor Access Helpers */
412 static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
413 {
414 	if (buf_sz > MAX_DESC_BUF_SZ)
415 		p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ |
416 			(buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET);
417 	else
418 		p->buf_size = cpu_to_le32(buf_sz);
419 }
420 
421 static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
422 {
423 	u32 len = cpu_to_le32(p->flags);
424 	return (len & DESC_BUFFER1_SZ_MASK) +
425 		((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
426 }
427 
428 static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size,
429 				     int buf_sz)
430 {
431 	struct xgmac_dma_desc *end = p + ring_size - 1;
432 
433 	memset(p, 0, sizeof(*p) * ring_size);
434 
435 	for (; p <= end; p++)
436 		desc_set_buf_len(p, buf_sz);
437 
438 	end->buf_size |= cpu_to_le32(RXDESC1_END_RING);
439 }
440 
441 static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size)
442 {
443 	memset(p, 0, sizeof(*p) * ring_size);
444 	p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING);
445 }
446 
447 static inline int desc_get_owner(struct xgmac_dma_desc *p)
448 {
449 	return le32_to_cpu(p->flags) & DESC_OWN;
450 }
451 
452 static inline void desc_set_rx_owner(struct xgmac_dma_desc *p)
453 {
454 	/* Clear all fields and set the owner */
455 	p->flags = cpu_to_le32(DESC_OWN);
456 }
457 
458 static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
459 {
460 	u32 tmpflags = le32_to_cpu(p->flags);
461 	tmpflags &= TXDESC_END_RING;
462 	tmpflags |= flags | DESC_OWN;
463 	p->flags = cpu_to_le32(tmpflags);
464 }
465 
466 static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
467 {
468 	return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
469 }
470 
471 static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
472 {
473 	return le32_to_cpu(p->buf1_addr);
474 }
475 
476 static inline void desc_set_buf_addr(struct xgmac_dma_desc *p,
477 				     u32 paddr, int len)
478 {
479 	p->buf1_addr = cpu_to_le32(paddr);
480 	if (len > MAX_DESC_BUF_SZ)
481 		p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ);
482 }
483 
484 static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p,
485 					      u32 paddr, int len)
486 {
487 	desc_set_buf_len(p, len);
488 	desc_set_buf_addr(p, paddr, len);
489 }
490 
491 static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p)
492 {
493 	u32 data = le32_to_cpu(p->flags);
494 	u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET;
495 	if (data & RXDESC_FRAME_TYPE)
496 		len -= ETH_FCS_LEN;
497 
498 	return len;
499 }
500 
501 static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr)
502 {
503 	int timeout = 1000;
504 	u32 reg = readl(ioaddr + XGMAC_OMR);
505 	writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR);
506 
507 	while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF)
508 		udelay(1);
509 }
510 
511 static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
512 {
513 	struct xgmac_extra_stats *x = &priv->xstats;
514 	u32 status = le32_to_cpu(p->flags);
515 
516 	if (!(status & TXDESC_ERROR_SUMMARY))
517 		return 0;
518 
519 	netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status);
520 	if (status & TXDESC_JABBER_TIMEOUT)
521 		x->tx_jabber++;
522 	if (status & TXDESC_FRAME_FLUSHED)
523 		x->tx_frame_flushed++;
524 	if (status & TXDESC_UNDERFLOW_ERR)
525 		xgmac_dma_flush_tx_fifo(priv->base);
526 	if (status & TXDESC_IP_HEADER_ERR)
527 		x->tx_ip_header_error++;
528 	if (status & TXDESC_LOCAL_FAULT)
529 		x->tx_local_fault++;
530 	if (status & TXDESC_REMOTE_FAULT)
531 		x->tx_remote_fault++;
532 	if (status & TXDESC_PAYLOAD_CSUM_ERR)
533 		x->tx_payload_error++;
534 
535 	return -1;
536 }
537 
538 static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
539 {
540 	struct xgmac_extra_stats *x = &priv->xstats;
541 	int ret = CHECKSUM_UNNECESSARY;
542 	u32 status = le32_to_cpu(p->flags);
543 	u32 ext_status = le32_to_cpu(p->ext_status);
544 
545 	if (status & RXDESC_DA_FILTER_FAIL) {
546 		netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n");
547 		x->rx_da_filter_fail++;
548 		return -1;
549 	}
550 
551 	/* All frames should fit into a single buffer */
552 	if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG))
553 		return -1;
554 
555 	/* Check if packet has checksum already */
556 	if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
557 		!(ext_status & RXDESC_IP_PAYLOAD_MASK))
558 		ret = CHECKSUM_NONE;
559 
560 	netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n",
561 		   (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status);
562 
563 	if (!(status & RXDESC_ERROR_SUMMARY))
564 		return ret;
565 
566 	/* Handle any errors */
567 	if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR |
568 		RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR))
569 		return -1;
570 
571 	if (status & RXDESC_EXT_STATUS) {
572 		if (ext_status & RXDESC_IP_HEADER_ERR)
573 			x->rx_ip_header_error++;
574 		if (ext_status & RXDESC_IP_PAYLOAD_ERR)
575 			x->rx_payload_error++;
576 		netdev_dbg(priv->dev, "IP checksum error - stat %08x\n",
577 			   ext_status);
578 		return CHECKSUM_NONE;
579 	}
580 
581 	return ret;
582 }
583 
584 static inline void xgmac_mac_enable(void __iomem *ioaddr)
585 {
586 	u32 value = readl(ioaddr + XGMAC_CONTROL);
587 	value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
588 	writel(value, ioaddr + XGMAC_CONTROL);
589 
590 	value = readl(ioaddr + XGMAC_DMA_CONTROL);
591 	value |= DMA_CONTROL_ST | DMA_CONTROL_SR;
592 	writel(value, ioaddr + XGMAC_DMA_CONTROL);
593 }
594 
595 static inline void xgmac_mac_disable(void __iomem *ioaddr)
596 {
597 	u32 value = readl(ioaddr + XGMAC_DMA_CONTROL);
598 	value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
599 	writel(value, ioaddr + XGMAC_DMA_CONTROL);
600 
601 	value = readl(ioaddr + XGMAC_CONTROL);
602 	value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
603 	writel(value, ioaddr + XGMAC_CONTROL);
604 }
605 
606 static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
607 			       int num)
608 {
609 	u32 data;
610 
611 	data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
612 	writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
613 	data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
614 	writel(data, ioaddr + XGMAC_ADDR_LOW(num));
615 }
616 
617 static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
618 			       int num)
619 {
620 	u32 hi_addr, lo_addr;
621 
622 	/* Read the MAC address from the hardware */
623 	hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num));
624 	lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num));
625 
626 	/* Extract the MAC address from the high and low words */
627 	addr[0] = lo_addr & 0xff;
628 	addr[1] = (lo_addr >> 8) & 0xff;
629 	addr[2] = (lo_addr >> 16) & 0xff;
630 	addr[3] = (lo_addr >> 24) & 0xff;
631 	addr[4] = hi_addr & 0xff;
632 	addr[5] = (hi_addr >> 8) & 0xff;
633 }
634 
635 static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx)
636 {
637 	u32 reg;
638 	unsigned int flow = 0;
639 
640 	priv->rx_pause = rx;
641 	priv->tx_pause = tx;
642 
643 	if (rx || tx) {
644 		if (rx)
645 			flow |= XGMAC_FLOW_CTRL_RFE;
646 		if (tx)
647 			flow |= XGMAC_FLOW_CTRL_TFE;
648 
649 		flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP;
650 		flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT);
651 
652 		writel(flow, priv->base + XGMAC_FLOW_CTRL);
653 
654 		reg = readl(priv->base + XGMAC_OMR);
655 		reg |= XGMAC_OMR_EFC;
656 		writel(reg, priv->base + XGMAC_OMR);
657 	} else {
658 		writel(0, priv->base + XGMAC_FLOW_CTRL);
659 
660 		reg = readl(priv->base + XGMAC_OMR);
661 		reg &= ~XGMAC_OMR_EFC;
662 		writel(reg, priv->base + XGMAC_OMR);
663 	}
664 
665 	return 0;
666 }
667 
668 static void xgmac_rx_refill(struct xgmac_priv *priv)
669 {
670 	struct xgmac_dma_desc *p;
671 	dma_addr_t paddr;
672 	int bufsz = priv->dev->mtu + ETH_HLEN + ETH_FCS_LEN;
673 
674 	while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
675 		int entry = priv->rx_head;
676 		struct sk_buff *skb;
677 
678 		p = priv->dma_rx + entry;
679 
680 		if (priv->rx_skbuff[entry] == NULL) {
681 			skb = netdev_alloc_skb_ip_align(priv->dev, bufsz);
682 			if (unlikely(skb == NULL))
683 				break;
684 
685 			priv->rx_skbuff[entry] = skb;
686 			paddr = dma_map_single(priv->device, skb->data,
687 					       bufsz, DMA_FROM_DEVICE);
688 			desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
689 		}
690 
691 		netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
692 			priv->rx_head, priv->rx_tail);
693 
694 		priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
695 		desc_set_rx_owner(p);
696 	}
697 }
698 
699 /**
700  * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings
701  * @dev: net device structure
702  * Description:  this function initializes the DMA RX/TX descriptors
703  * and allocates the socket buffers.
704  */
705 static int xgmac_dma_desc_rings_init(struct net_device *dev)
706 {
707 	struct xgmac_priv *priv = netdev_priv(dev);
708 	unsigned int bfsize;
709 
710 	/* Set the Buffer size according to the MTU;
711 	 * The total buffer size including any IP offset must be a multiple
712 	 * of 8 bytes.
713 	 */
714 	bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
715 
716 	netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
717 
718 	priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ,
719 				  GFP_KERNEL);
720 	if (!priv->rx_skbuff)
721 		return -ENOMEM;
722 
723 	priv->dma_rx = dma_alloc_coherent(priv->device,
724 					  DMA_RX_RING_SZ *
725 					  sizeof(struct xgmac_dma_desc),
726 					  &priv->dma_rx_phy,
727 					  GFP_KERNEL);
728 	if (!priv->dma_rx)
729 		goto err_dma_rx;
730 
731 	priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ,
732 				  GFP_KERNEL);
733 	if (!priv->tx_skbuff)
734 		goto err_tx_skb;
735 
736 	priv->dma_tx = dma_alloc_coherent(priv->device,
737 					  DMA_TX_RING_SZ *
738 					  sizeof(struct xgmac_dma_desc),
739 					  &priv->dma_tx_phy,
740 					  GFP_KERNEL);
741 	if (!priv->dma_tx)
742 		goto err_dma_tx;
743 
744 	netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, "
745 	    "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
746 	    priv->dma_rx, priv->dma_tx,
747 	    (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
748 
749 	priv->rx_tail = 0;
750 	priv->rx_head = 0;
751 	priv->dma_buf_sz = bfsize;
752 	desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz);
753 	xgmac_rx_refill(priv);
754 
755 	priv->tx_tail = 0;
756 	priv->tx_head = 0;
757 	desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
758 
759 	writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
760 	writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR);
761 
762 	return 0;
763 
764 err_dma_tx:
765 	kfree(priv->tx_skbuff);
766 err_tx_skb:
767 	dma_free_coherent(priv->device,
768 			  DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
769 			  priv->dma_rx, priv->dma_rx_phy);
770 err_dma_rx:
771 	kfree(priv->rx_skbuff);
772 	return -ENOMEM;
773 }
774 
775 static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
776 {
777 	int i;
778 	struct xgmac_dma_desc *p;
779 
780 	if (!priv->rx_skbuff)
781 		return;
782 
783 	for (i = 0; i < DMA_RX_RING_SZ; i++) {
784 		if (priv->rx_skbuff[i] == NULL)
785 			continue;
786 
787 		p = priv->dma_rx + i;
788 		dma_unmap_single(priv->device, desc_get_buf_addr(p),
789 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
790 		dev_kfree_skb_any(priv->rx_skbuff[i]);
791 		priv->rx_skbuff[i] = NULL;
792 	}
793 }
794 
795 static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
796 {
797 	int i, f;
798 	struct xgmac_dma_desc *p;
799 
800 	if (!priv->tx_skbuff)
801 		return;
802 
803 	for (i = 0; i < DMA_TX_RING_SZ; i++) {
804 		if (priv->tx_skbuff[i] == NULL)
805 			continue;
806 
807 		p = priv->dma_tx + i;
808 		dma_unmap_single(priv->device, desc_get_buf_addr(p),
809 				 desc_get_buf_len(p), DMA_TO_DEVICE);
810 
811 		for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) {
812 			p = priv->dma_tx + i++;
813 			dma_unmap_page(priv->device, desc_get_buf_addr(p),
814 				       desc_get_buf_len(p), DMA_TO_DEVICE);
815 		}
816 
817 		dev_kfree_skb_any(priv->tx_skbuff[i]);
818 		priv->tx_skbuff[i] = NULL;
819 	}
820 }
821 
822 static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
823 {
824 	/* Release the DMA TX/RX socket buffers */
825 	xgmac_free_rx_skbufs(priv);
826 	xgmac_free_tx_skbufs(priv);
827 
828 	/* Free the consistent memory allocated for descriptor rings */
829 	if (priv->dma_tx) {
830 		dma_free_coherent(priv->device,
831 				  DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc),
832 				  priv->dma_tx, priv->dma_tx_phy);
833 		priv->dma_tx = NULL;
834 	}
835 	if (priv->dma_rx) {
836 		dma_free_coherent(priv->device,
837 				  DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
838 				  priv->dma_rx, priv->dma_rx_phy);
839 		priv->dma_rx = NULL;
840 	}
841 	kfree(priv->rx_skbuff);
842 	priv->rx_skbuff = NULL;
843 	kfree(priv->tx_skbuff);
844 	priv->tx_skbuff = NULL;
845 }
846 
847 /**
848  * xgmac_tx:
849  * @priv: private driver structure
850  * Description: it reclaims resources after transmission completes.
851  */
852 static void xgmac_tx_complete(struct xgmac_priv *priv)
853 {
854 	int i;
855 
856 	while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
857 		unsigned int entry = priv->tx_tail;
858 		struct sk_buff *skb = priv->tx_skbuff[entry];
859 		struct xgmac_dma_desc *p = priv->dma_tx + entry;
860 
861 		/* Check if the descriptor is owned by the DMA. */
862 		if (desc_get_owner(p))
863 			break;
864 
865 		/* Verify tx error by looking at the last segment */
866 		if (desc_get_tx_ls(p))
867 			desc_get_tx_status(priv, p);
868 
869 		netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
870 			priv->tx_head, priv->tx_tail);
871 
872 		dma_unmap_single(priv->device, desc_get_buf_addr(p),
873 				 desc_get_buf_len(p), DMA_TO_DEVICE);
874 
875 		priv->tx_skbuff[entry] = NULL;
876 		priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
877 
878 		if (!skb) {
879 			continue;
880 		}
881 
882 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
883 			entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
884 							      DMA_TX_RING_SZ);
885 			p = priv->dma_tx + priv->tx_tail;
886 
887 			dma_unmap_page(priv->device, desc_get_buf_addr(p),
888 				       desc_get_buf_len(p), DMA_TO_DEVICE);
889 		}
890 
891 		dev_kfree_skb(skb);
892 	}
893 
894 	if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
895 	    MAX_SKB_FRAGS)
896 		netif_wake_queue(priv->dev);
897 }
898 
899 /**
900  * xgmac_tx_err:
901  * @priv: pointer to the private device structure
902  * Description: it cleans the descriptors and restarts the transmission
903  * in case of errors.
904  */
905 static void xgmac_tx_err(struct xgmac_priv *priv)
906 {
907 	u32 reg, value, inten;
908 
909 	netif_stop_queue(priv->dev);
910 
911 	inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
912 	writel(0, priv->base + XGMAC_DMA_INTR_ENA);
913 
914 	reg = readl(priv->base + XGMAC_DMA_CONTROL);
915 	writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
916 	do {
917 		value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000;
918 	} while (value && (value != 0x600000));
919 
920 	xgmac_free_tx_skbufs(priv);
921 	desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
922 	priv->tx_tail = 0;
923 	priv->tx_head = 0;
924 	writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
925 	writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
926 
927 	writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
928 		priv->base + XGMAC_DMA_STATUS);
929 	writel(inten, priv->base + XGMAC_DMA_INTR_ENA);
930 
931 	netif_wake_queue(priv->dev);
932 }
933 
934 static int xgmac_hw_init(struct net_device *dev)
935 {
936 	u32 value, ctrl;
937 	int limit;
938 	struct xgmac_priv *priv = netdev_priv(dev);
939 	void __iomem *ioaddr = priv->base;
940 
941 	/* Save the ctrl register value */
942 	ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK;
943 
944 	/* SW reset */
945 	value = DMA_BUS_MODE_SFT_RESET;
946 	writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
947 	limit = 15000;
948 	while (limit-- &&
949 		(readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
950 		cpu_relax();
951 	if (limit < 0)
952 		return -EBUSY;
953 
954 	value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) |
955 		(0x10 << DMA_BUS_MODE_RPBL_SHIFT) |
956 		DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
957 	writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
958 
959 	/* Enable interrupts */
960 	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
961 	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
962 
963 	/* XGMAC requires AXI bus init. This is a 'magic number' for now */
964 	writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
965 
966 	ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
967 		XGMAC_CONTROL_CAR;
968 	if (dev->features & NETIF_F_RXCSUM)
969 		ctrl |= XGMAC_CONTROL_IPC;
970 	writel(ctrl, ioaddr + XGMAC_CONTROL);
971 
972 	writel(DMA_CONTROL_OSF, ioaddr + XGMAC_DMA_CONTROL);
973 
974 	/* Set the HW DMA mode and the COE */
975 	writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
976 		XGMAC_OMR_RTC_256,
977 		ioaddr + XGMAC_OMR);
978 
979 	/* Reset the MMC counters */
980 	writel(1, ioaddr + XGMAC_MMC_CTRL);
981 	return 0;
982 }
983 
984 /**
985  *  xgmac_open - open entry point of the driver
986  *  @dev : pointer to the device structure.
987  *  Description:
988  *  This function is the open entry point of the driver.
989  *  Return value:
990  *  0 on success and an appropriate (-)ve integer as defined in errno.h
991  *  file on failure.
992  */
993 static int xgmac_open(struct net_device *dev)
994 {
995 	int ret;
996 	struct xgmac_priv *priv = netdev_priv(dev);
997 	void __iomem *ioaddr = priv->base;
998 
999 	/* Check that the MAC address is valid.  If its not, refuse
1000 	 * to bring the device up. The user must specify an
1001 	 * address using the following linux command:
1002 	 *      ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx  */
1003 	if (!is_valid_ether_addr(dev->dev_addr)) {
1004 		eth_hw_addr_random(dev);
1005 		netdev_dbg(priv->dev, "generated random MAC address %pM\n",
1006 			dev->dev_addr);
1007 	}
1008 
1009 	memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
1010 
1011 	/* Initialize the XGMAC and descriptors */
1012 	xgmac_hw_init(dev);
1013 	xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
1014 	xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause);
1015 
1016 	ret = xgmac_dma_desc_rings_init(dev);
1017 	if (ret < 0)
1018 		return ret;
1019 
1020 	/* Enable the MAC Rx/Tx */
1021 	xgmac_mac_enable(ioaddr);
1022 
1023 	napi_enable(&priv->napi);
1024 	netif_start_queue(dev);
1025 
1026 	return 0;
1027 }
1028 
1029 /**
1030  *  xgmac_release - close entry point of the driver
1031  *  @dev : device pointer.
1032  *  Description:
1033  *  This is the stop entry point of the driver.
1034  */
1035 static int xgmac_stop(struct net_device *dev)
1036 {
1037 	struct xgmac_priv *priv = netdev_priv(dev);
1038 
1039 	netif_stop_queue(dev);
1040 
1041 	if (readl(priv->base + XGMAC_DMA_INTR_ENA))
1042 		napi_disable(&priv->napi);
1043 
1044 	writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1045 
1046 	/* Disable the MAC core */
1047 	xgmac_mac_disable(priv->base);
1048 
1049 	/* Release and free the Rx/Tx resources */
1050 	xgmac_free_dma_desc_rings(priv);
1051 
1052 	return 0;
1053 }
1054 
1055 /**
1056  *  xgmac_xmit:
1057  *  @skb : the socket buffer
1058  *  @dev : device pointer
1059  *  Description : Tx entry point of the driver.
1060  */
1061 static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1062 {
1063 	struct xgmac_priv *priv = netdev_priv(dev);
1064 	unsigned int entry;
1065 	int i;
1066 	u32 irq_flag;
1067 	int nfrags = skb_shinfo(skb)->nr_frags;
1068 	struct xgmac_dma_desc *desc, *first;
1069 	unsigned int desc_flags;
1070 	unsigned int len;
1071 	dma_addr_t paddr;
1072 
1073 	priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1);
1074 	irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT;
1075 
1076 	desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
1077 		TXDESC_CSUM_ALL : 0;
1078 	entry = priv->tx_head;
1079 	desc = priv->dma_tx + entry;
1080 	first = desc;
1081 
1082 	len = skb_headlen(skb);
1083 	paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
1084 	if (dma_mapping_error(priv->device, paddr)) {
1085 		dev_kfree_skb(skb);
1086 		return -EIO;
1087 	}
1088 	priv->tx_skbuff[entry] = skb;
1089 	desc_set_buf_addr_and_size(desc, paddr, len);
1090 
1091 	for (i = 0; i < nfrags; i++) {
1092 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1093 
1094 		len = frag->size;
1095 
1096 		paddr = skb_frag_dma_map(priv->device, frag, 0, len,
1097 					 DMA_TO_DEVICE);
1098 		if (dma_mapping_error(priv->device, paddr)) {
1099 			dev_kfree_skb(skb);
1100 			return -EIO;
1101 		}
1102 
1103 		entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
1104 		desc = priv->dma_tx + entry;
1105 		priv->tx_skbuff[entry] = NULL;
1106 
1107 		desc_set_buf_addr_and_size(desc, paddr, len);
1108 		if (i < (nfrags - 1))
1109 			desc_set_tx_owner(desc, desc_flags);
1110 	}
1111 
1112 	/* Interrupt on completition only for the latest segment */
1113 	if (desc != first)
1114 		desc_set_tx_owner(desc, desc_flags |
1115 			TXDESC_LAST_SEG | irq_flag);
1116 	else
1117 		desc_flags |= TXDESC_LAST_SEG | irq_flag;
1118 
1119 	/* Set owner on first desc last to avoid race condition */
1120 	wmb();
1121 	desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
1122 
1123 	priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
1124 
1125 	writel(1, priv->base + XGMAC_DMA_TX_POLL);
1126 	if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
1127 	    MAX_SKB_FRAGS)
1128 		netif_stop_queue(dev);
1129 
1130 	return NETDEV_TX_OK;
1131 }
1132 
1133 static int xgmac_rx(struct xgmac_priv *priv, int limit)
1134 {
1135 	unsigned int entry;
1136 	unsigned int count = 0;
1137 	struct xgmac_dma_desc *p;
1138 
1139 	while (count < limit) {
1140 		int ip_checksum;
1141 		struct sk_buff *skb;
1142 		int frame_len;
1143 
1144 		entry = priv->rx_tail;
1145 		p = priv->dma_rx + entry;
1146 		if (desc_get_owner(p))
1147 			break;
1148 
1149 		count++;
1150 		priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ);
1151 
1152 		/* read the status of the incoming frame */
1153 		ip_checksum = desc_get_rx_status(priv, p);
1154 		if (ip_checksum < 0)
1155 			continue;
1156 
1157 		skb = priv->rx_skbuff[entry];
1158 		if (unlikely(!skb)) {
1159 			netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n");
1160 			break;
1161 		}
1162 		priv->rx_skbuff[entry] = NULL;
1163 
1164 		frame_len = desc_get_rx_frame_len(p);
1165 		netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n",
1166 			frame_len, ip_checksum);
1167 
1168 		skb_put(skb, frame_len);
1169 		dma_unmap_single(priv->device, desc_get_buf_addr(p),
1170 				 frame_len, DMA_FROM_DEVICE);
1171 
1172 		skb->protocol = eth_type_trans(skb, priv->dev);
1173 		skb->ip_summed = ip_checksum;
1174 		if (ip_checksum == CHECKSUM_NONE)
1175 			netif_receive_skb(skb);
1176 		else
1177 			napi_gro_receive(&priv->napi, skb);
1178 	}
1179 
1180 	xgmac_rx_refill(priv);
1181 
1182 	return count;
1183 }
1184 
1185 /**
1186  *  xgmac_poll - xgmac poll method (NAPI)
1187  *  @napi : pointer to the napi structure.
1188  *  @budget : maximum number of packets that the current CPU can receive from
1189  *	      all interfaces.
1190  *  Description :
1191  *   This function implements the the reception process.
1192  *   Also it runs the TX completion thread
1193  */
1194 static int xgmac_poll(struct napi_struct *napi, int budget)
1195 {
1196 	struct xgmac_priv *priv = container_of(napi,
1197 				       struct xgmac_priv, napi);
1198 	int work_done = 0;
1199 
1200 	xgmac_tx_complete(priv);
1201 	work_done = xgmac_rx(priv, budget);
1202 
1203 	if (work_done < budget) {
1204 		napi_complete(napi);
1205 		__raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
1206 	}
1207 	return work_done;
1208 }
1209 
1210 /**
1211  *  xgmac_tx_timeout
1212  *  @dev : Pointer to net device structure
1213  *  Description: this function is called when a packet transmission fails to
1214  *   complete within a reasonable tmrate. The driver will mark the error in the
1215  *   netdev structure and arrange for the device to be reset to a sane state
1216  *   in order to transmit a new packet.
1217  */
1218 static void xgmac_tx_timeout(struct net_device *dev)
1219 {
1220 	struct xgmac_priv *priv = netdev_priv(dev);
1221 
1222 	/* Clear Tx resources and restart transmitting again */
1223 	xgmac_tx_err(priv);
1224 }
1225 
1226 /**
1227  *  xgmac_set_rx_mode - entry point for multicast addressing
1228  *  @dev : pointer to the device structure
1229  *  Description:
1230  *  This function is a driver entry point which gets called by the kernel
1231  *  whenever multicast addresses must be enabled/disabled.
1232  *  Return value:
1233  *  void.
1234  */
1235 static void xgmac_set_rx_mode(struct net_device *dev)
1236 {
1237 	int i;
1238 	struct xgmac_priv *priv = netdev_priv(dev);
1239 	void __iomem *ioaddr = priv->base;
1240 	unsigned int value = 0;
1241 	u32 hash_filter[XGMAC_NUM_HASH];
1242 	int reg = 1;
1243 	struct netdev_hw_addr *ha;
1244 	bool use_hash = false;
1245 
1246 	netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
1247 		 netdev_mc_count(dev), netdev_uc_count(dev));
1248 
1249 	if (dev->flags & IFF_PROMISC) {
1250 		writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
1251 		return;
1252 	}
1253 
1254 	memset(hash_filter, 0, sizeof(hash_filter));
1255 
1256 	if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
1257 		use_hash = true;
1258 		value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
1259 	}
1260 	netdev_for_each_uc_addr(ha, dev) {
1261 		if (use_hash) {
1262 			u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
1263 
1264 			/* The most significant 4 bits determine the register to
1265 			 * use (H/L) while the other 5 bits determine the bit
1266 			 * within the register. */
1267 			hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1268 		} else {
1269 			xgmac_set_mac_addr(ioaddr, ha->addr, reg);
1270 			reg++;
1271 		}
1272 	}
1273 
1274 	if (dev->flags & IFF_ALLMULTI) {
1275 		value |= XGMAC_FRAME_FILTER_PM;
1276 		goto out;
1277 	}
1278 
1279 	if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
1280 		use_hash = true;
1281 		value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
1282 	}
1283 	netdev_for_each_mc_addr(ha, dev) {
1284 		if (use_hash) {
1285 			u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
1286 
1287 			/* The most significant 4 bits determine the register to
1288 			 * use (H/L) while the other 5 bits determine the bit
1289 			 * within the register. */
1290 			hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1291 		} else {
1292 			xgmac_set_mac_addr(ioaddr, ha->addr, reg);
1293 			reg++;
1294 		}
1295 	}
1296 
1297 out:
1298 	for (i = 0; i < XGMAC_NUM_HASH; i++)
1299 		writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
1300 
1301 	writel(value, ioaddr + XGMAC_FRAME_FILTER);
1302 }
1303 
1304 /**
1305  *  xgmac_change_mtu - entry point to change MTU size for the device.
1306  *  @dev : device pointer.
1307  *  @new_mtu : the new MTU size for the device.
1308  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
1309  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
1310  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
1311  *  Return value:
1312  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1313  *  file on failure.
1314  */
1315 static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
1316 {
1317 	struct xgmac_priv *priv = netdev_priv(dev);
1318 	int old_mtu;
1319 
1320 	if ((new_mtu < 46) || (new_mtu > MAX_MTU)) {
1321 		netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU);
1322 		return -EINVAL;
1323 	}
1324 
1325 	old_mtu = dev->mtu;
1326 	dev->mtu = new_mtu;
1327 
1328 	/* return early if the buffer sizes will not change */
1329 	if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
1330 		return 0;
1331 	if (old_mtu == new_mtu)
1332 		return 0;
1333 
1334 	/* Stop everything, get ready to change the MTU */
1335 	if (!netif_running(dev))
1336 		return 0;
1337 
1338 	/* Bring the interface down and then back up */
1339 	xgmac_stop(dev);
1340 	return xgmac_open(dev);
1341 }
1342 
1343 static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
1344 {
1345 	u32 intr_status;
1346 	struct net_device *dev = (struct net_device *)dev_id;
1347 	struct xgmac_priv *priv = netdev_priv(dev);
1348 	void __iomem *ioaddr = priv->base;
1349 
1350 	intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT);
1351 	if (intr_status & XGMAC_INT_STAT_PMT) {
1352 		netdev_dbg(priv->dev, "received Magic frame\n");
1353 		/* clear the PMT bits 5 and 6 by reading the PMT */
1354 		readl(ioaddr + XGMAC_PMT);
1355 	}
1356 	return IRQ_HANDLED;
1357 }
1358 
1359 static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
1360 {
1361 	u32 intr_status;
1362 	bool tx_err = false;
1363 	struct net_device *dev = (struct net_device *)dev_id;
1364 	struct xgmac_priv *priv = netdev_priv(dev);
1365 	struct xgmac_extra_stats *x = &priv->xstats;
1366 
1367 	/* read the status register (CSR5) */
1368 	intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS);
1369 	intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA);
1370 	__raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS);
1371 
1372 	/* It displays the DMA process states (CSR5 register) */
1373 	/* ABNORMAL interrupts */
1374 	if (unlikely(intr_status & DMA_STATUS_AIS)) {
1375 		if (intr_status & DMA_STATUS_TJT) {
1376 			netdev_err(priv->dev, "transmit jabber\n");
1377 			x->tx_jabber++;
1378 		}
1379 		if (intr_status & DMA_STATUS_RU)
1380 			x->rx_buf_unav++;
1381 		if (intr_status & DMA_STATUS_RPS) {
1382 			netdev_err(priv->dev, "receive process stopped\n");
1383 			x->rx_process_stopped++;
1384 		}
1385 		if (intr_status & DMA_STATUS_ETI) {
1386 			netdev_err(priv->dev, "transmit early interrupt\n");
1387 			x->tx_early++;
1388 		}
1389 		if (intr_status & DMA_STATUS_TPS) {
1390 			netdev_err(priv->dev, "transmit process stopped\n");
1391 			x->tx_process_stopped++;
1392 			tx_err = true;
1393 		}
1394 		if (intr_status & DMA_STATUS_FBI) {
1395 			netdev_err(priv->dev, "fatal bus error\n");
1396 			x->fatal_bus_error++;
1397 			tx_err = true;
1398 		}
1399 
1400 		if (tx_err)
1401 			xgmac_tx_err(priv);
1402 	}
1403 
1404 	/* TX/RX NORMAL interrupts */
1405 	if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) {
1406 		__raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
1407 		napi_schedule(&priv->napi);
1408 	}
1409 
1410 	return IRQ_HANDLED;
1411 }
1412 
1413 #ifdef CONFIG_NET_POLL_CONTROLLER
1414 /* Polling receive - used by NETCONSOLE and other diagnostic tools
1415  * to allow network I/O with interrupts disabled. */
1416 static void xgmac_poll_controller(struct net_device *dev)
1417 {
1418 	disable_irq(dev->irq);
1419 	xgmac_interrupt(dev->irq, dev);
1420 	enable_irq(dev->irq);
1421 }
1422 #endif
1423 
1424 static struct rtnl_link_stats64 *
1425 xgmac_get_stats64(struct net_device *dev,
1426 		       struct rtnl_link_stats64 *storage)
1427 {
1428 	struct xgmac_priv *priv = netdev_priv(dev);
1429 	void __iomem *base = priv->base;
1430 	u32 count;
1431 
1432 	spin_lock_bh(&priv->stats_lock);
1433 	writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL);
1434 
1435 	storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO);
1436 	storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32;
1437 
1438 	storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO);
1439 	storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G);
1440 	storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR);
1441 	storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR);
1442 	storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW);
1443 
1444 	storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO);
1445 	storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32;
1446 
1447 	count = readl(base + XGMAC_MMC_TXFRAME_GB_LO);
1448 	storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO);
1449 	storage->tx_packets = count;
1450 	storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW);
1451 
1452 	writel(0, base + XGMAC_MMC_CTRL);
1453 	spin_unlock_bh(&priv->stats_lock);
1454 	return storage;
1455 }
1456 
1457 static int xgmac_set_mac_address(struct net_device *dev, void *p)
1458 {
1459 	struct xgmac_priv *priv = netdev_priv(dev);
1460 	void __iomem *ioaddr = priv->base;
1461 	struct sockaddr *addr = p;
1462 
1463 	if (!is_valid_ether_addr(addr->sa_data))
1464 		return -EADDRNOTAVAIL;
1465 
1466 	dev->addr_assign_type &= ~NET_ADDR_RANDOM;
1467 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1468 
1469 	xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
1470 
1471 	return 0;
1472 }
1473 
1474 static int xgmac_set_features(struct net_device *dev, netdev_features_t features)
1475 {
1476 	u32 ctrl;
1477 	struct xgmac_priv *priv = netdev_priv(dev);
1478 	void __iomem *ioaddr = priv->base;
1479 	u32 changed = dev->features ^ features;
1480 
1481 	if (!(changed & NETIF_F_RXCSUM))
1482 		return 0;
1483 
1484 	ctrl = readl(ioaddr + XGMAC_CONTROL);
1485 	if (features & NETIF_F_RXCSUM)
1486 		ctrl |= XGMAC_CONTROL_IPC;
1487 	else
1488 		ctrl &= ~XGMAC_CONTROL_IPC;
1489 	writel(ctrl, ioaddr + XGMAC_CONTROL);
1490 
1491 	return 0;
1492 }
1493 
1494 static const struct net_device_ops xgmac_netdev_ops = {
1495 	.ndo_open = xgmac_open,
1496 	.ndo_start_xmit = xgmac_xmit,
1497 	.ndo_stop = xgmac_stop,
1498 	.ndo_change_mtu = xgmac_change_mtu,
1499 	.ndo_set_rx_mode = xgmac_set_rx_mode,
1500 	.ndo_tx_timeout = xgmac_tx_timeout,
1501 	.ndo_get_stats64 = xgmac_get_stats64,
1502 #ifdef CONFIG_NET_POLL_CONTROLLER
1503 	.ndo_poll_controller = xgmac_poll_controller,
1504 #endif
1505 	.ndo_set_mac_address = xgmac_set_mac_address,
1506 	.ndo_set_features = xgmac_set_features,
1507 };
1508 
1509 static int xgmac_ethtool_getsettings(struct net_device *dev,
1510 					  struct ethtool_cmd *cmd)
1511 {
1512 	cmd->autoneg = 0;
1513 	cmd->duplex = DUPLEX_FULL;
1514 	ethtool_cmd_speed_set(cmd, 10000);
1515 	cmd->supported = 0;
1516 	cmd->advertising = 0;
1517 	cmd->transceiver = XCVR_INTERNAL;
1518 	return 0;
1519 }
1520 
1521 static void xgmac_get_pauseparam(struct net_device *netdev,
1522 				      struct ethtool_pauseparam *pause)
1523 {
1524 	struct xgmac_priv *priv = netdev_priv(netdev);
1525 
1526 	pause->rx_pause = priv->rx_pause;
1527 	pause->tx_pause = priv->tx_pause;
1528 }
1529 
1530 static int xgmac_set_pauseparam(struct net_device *netdev,
1531 				     struct ethtool_pauseparam *pause)
1532 {
1533 	struct xgmac_priv *priv = netdev_priv(netdev);
1534 
1535 	if (pause->autoneg)
1536 		return -EINVAL;
1537 
1538 	return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause);
1539 }
1540 
1541 struct xgmac_stats {
1542 	char stat_string[ETH_GSTRING_LEN];
1543 	int stat_offset;
1544 	bool is_reg;
1545 };
1546 
1547 #define XGMAC_STAT(m)	\
1548 	{ #m, offsetof(struct xgmac_priv, xstats.m), false }
1549 #define XGMAC_HW_STAT(m, reg_offset)	\
1550 	{ #m, reg_offset, true }
1551 
1552 static const struct xgmac_stats xgmac_gstrings_stats[] = {
1553 	XGMAC_STAT(tx_frame_flushed),
1554 	XGMAC_STAT(tx_payload_error),
1555 	XGMAC_STAT(tx_ip_header_error),
1556 	XGMAC_STAT(tx_local_fault),
1557 	XGMAC_STAT(tx_remote_fault),
1558 	XGMAC_STAT(tx_early),
1559 	XGMAC_STAT(tx_process_stopped),
1560 	XGMAC_STAT(tx_jabber),
1561 	XGMAC_STAT(rx_buf_unav),
1562 	XGMAC_STAT(rx_process_stopped),
1563 	XGMAC_STAT(rx_payload_error),
1564 	XGMAC_STAT(rx_ip_header_error),
1565 	XGMAC_STAT(rx_da_filter_fail),
1566 	XGMAC_STAT(rx_sa_filter_fail),
1567 	XGMAC_STAT(fatal_bus_error),
1568 	XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
1569 	XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
1570 	XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME),
1571 	XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME),
1572 	XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME),
1573 };
1574 #define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats)
1575 
1576 static void xgmac_get_ethtool_stats(struct net_device *dev,
1577 					 struct ethtool_stats *dummy,
1578 					 u64 *data)
1579 {
1580 	struct xgmac_priv *priv = netdev_priv(dev);
1581 	void *p = priv;
1582 	int i;
1583 
1584 	for (i = 0; i < XGMAC_STATS_LEN; i++) {
1585 		if (xgmac_gstrings_stats[i].is_reg)
1586 			*data++ = readl(priv->base +
1587 				xgmac_gstrings_stats[i].stat_offset);
1588 		else
1589 			*data++ = *(u32 *)(p +
1590 				xgmac_gstrings_stats[i].stat_offset);
1591 	}
1592 }
1593 
1594 static int xgmac_get_sset_count(struct net_device *netdev, int sset)
1595 {
1596 	switch (sset) {
1597 	case ETH_SS_STATS:
1598 		return XGMAC_STATS_LEN;
1599 	default:
1600 		return -EINVAL;
1601 	}
1602 }
1603 
1604 static void xgmac_get_strings(struct net_device *dev, u32 stringset,
1605 				   u8 *data)
1606 {
1607 	int i;
1608 	u8 *p = data;
1609 
1610 	switch (stringset) {
1611 	case ETH_SS_STATS:
1612 		for (i = 0; i < XGMAC_STATS_LEN; i++) {
1613 			memcpy(p, xgmac_gstrings_stats[i].stat_string,
1614 			       ETH_GSTRING_LEN);
1615 			p += ETH_GSTRING_LEN;
1616 		}
1617 		break;
1618 	default:
1619 		WARN_ON(1);
1620 		break;
1621 	}
1622 }
1623 
1624 static void xgmac_get_wol(struct net_device *dev,
1625 			       struct ethtool_wolinfo *wol)
1626 {
1627 	struct xgmac_priv *priv = netdev_priv(dev);
1628 
1629 	if (device_can_wakeup(priv->device)) {
1630 		wol->supported = WAKE_MAGIC | WAKE_UCAST;
1631 		wol->wolopts = priv->wolopts;
1632 	}
1633 }
1634 
1635 static int xgmac_set_wol(struct net_device *dev,
1636 			      struct ethtool_wolinfo *wol)
1637 {
1638 	struct xgmac_priv *priv = netdev_priv(dev);
1639 	u32 support = WAKE_MAGIC | WAKE_UCAST;
1640 
1641 	if (!device_can_wakeup(priv->device))
1642 		return -ENOTSUPP;
1643 
1644 	if (wol->wolopts & ~support)
1645 		return -EINVAL;
1646 
1647 	priv->wolopts = wol->wolopts;
1648 
1649 	if (wol->wolopts) {
1650 		device_set_wakeup_enable(priv->device, 1);
1651 		enable_irq_wake(dev->irq);
1652 	} else {
1653 		device_set_wakeup_enable(priv->device, 0);
1654 		disable_irq_wake(dev->irq);
1655 	}
1656 
1657 	return 0;
1658 }
1659 
1660 static const struct ethtool_ops xgmac_ethtool_ops = {
1661 	.get_settings = xgmac_ethtool_getsettings,
1662 	.get_link = ethtool_op_get_link,
1663 	.get_pauseparam = xgmac_get_pauseparam,
1664 	.set_pauseparam = xgmac_set_pauseparam,
1665 	.get_ethtool_stats = xgmac_get_ethtool_stats,
1666 	.get_strings = xgmac_get_strings,
1667 	.get_wol = xgmac_get_wol,
1668 	.set_wol = xgmac_set_wol,
1669 	.get_sset_count = xgmac_get_sset_count,
1670 };
1671 
1672 /**
1673  * xgmac_probe
1674  * @pdev: platform device pointer
1675  * Description: the driver is initialized through platform_device.
1676  */
1677 static int xgmac_probe(struct platform_device *pdev)
1678 {
1679 	int ret = 0;
1680 	struct resource *res;
1681 	struct net_device *ndev = NULL;
1682 	struct xgmac_priv *priv = NULL;
1683 	u32 uid;
1684 
1685 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1686 	if (!res)
1687 		return -ENODEV;
1688 
1689 	if (!request_mem_region(res->start, resource_size(res), pdev->name))
1690 		return -EBUSY;
1691 
1692 	ndev = alloc_etherdev(sizeof(struct xgmac_priv));
1693 	if (!ndev) {
1694 		ret = -ENOMEM;
1695 		goto err_alloc;
1696 	}
1697 
1698 	SET_NETDEV_DEV(ndev, &pdev->dev);
1699 	priv = netdev_priv(ndev);
1700 	platform_set_drvdata(pdev, ndev);
1701 	ether_setup(ndev);
1702 	ndev->netdev_ops = &xgmac_netdev_ops;
1703 	SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
1704 	spin_lock_init(&priv->stats_lock);
1705 
1706 	priv->device = &pdev->dev;
1707 	priv->dev = ndev;
1708 	priv->rx_pause = 1;
1709 	priv->tx_pause = 1;
1710 
1711 	priv->base = ioremap(res->start, resource_size(res));
1712 	if (!priv->base) {
1713 		netdev_err(ndev, "ioremap failed\n");
1714 		ret = -ENOMEM;
1715 		goto err_io;
1716 	}
1717 
1718 	uid = readl(priv->base + XGMAC_VERSION);
1719 	netdev_info(ndev, "h/w version is 0x%x\n", uid);
1720 
1721 	writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1722 	ndev->irq = platform_get_irq(pdev, 0);
1723 	if (ndev->irq == -ENXIO) {
1724 		netdev_err(ndev, "No irq resource\n");
1725 		ret = ndev->irq;
1726 		goto err_irq;
1727 	}
1728 
1729 	ret = request_irq(ndev->irq, xgmac_interrupt, 0,
1730 			  dev_name(&pdev->dev), ndev);
1731 	if (ret < 0) {
1732 		netdev_err(ndev, "Could not request irq %d - ret %d)\n",
1733 			ndev->irq, ret);
1734 		goto err_irq;
1735 	}
1736 
1737 	priv->pmt_irq = platform_get_irq(pdev, 1);
1738 	if (priv->pmt_irq == -ENXIO) {
1739 		netdev_err(ndev, "No pmt irq resource\n");
1740 		ret = priv->pmt_irq;
1741 		goto err_pmt_irq;
1742 	}
1743 
1744 	ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0,
1745 			  dev_name(&pdev->dev), ndev);
1746 	if (ret < 0) {
1747 		netdev_err(ndev, "Could not request irq %d - ret %d)\n",
1748 			priv->pmt_irq, ret);
1749 		goto err_pmt_irq;
1750 	}
1751 
1752 	device_set_wakeup_capable(&pdev->dev, 1);
1753 	if (device_can_wakeup(priv->device))
1754 		priv->wolopts = WAKE_MAGIC;	/* Magic Frame as default */
1755 
1756 	ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
1757 	if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
1758 		ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1759 				     NETIF_F_RXCSUM;
1760 	ndev->features |= ndev->hw_features;
1761 	ndev->priv_flags |= IFF_UNICAST_FLT;
1762 
1763 	/* Get the MAC address */
1764 	xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
1765 	if (!is_valid_ether_addr(ndev->dev_addr))
1766 		netdev_warn(ndev, "MAC address %pM not valid",
1767 			 ndev->dev_addr);
1768 
1769 	netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
1770 	ret = register_netdev(ndev);
1771 	if (ret)
1772 		goto err_reg;
1773 
1774 	return 0;
1775 
1776 err_reg:
1777 	netif_napi_del(&priv->napi);
1778 	free_irq(priv->pmt_irq, ndev);
1779 err_pmt_irq:
1780 	free_irq(ndev->irq, ndev);
1781 err_irq:
1782 	iounmap(priv->base);
1783 err_io:
1784 	free_netdev(ndev);
1785 err_alloc:
1786 	release_mem_region(res->start, resource_size(res));
1787 	platform_set_drvdata(pdev, NULL);
1788 	return ret;
1789 }
1790 
1791 /**
1792  * xgmac_dvr_remove
1793  * @pdev: platform device pointer
1794  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
1795  * changes the link status, releases the DMA descriptor rings,
1796  * unregisters the MDIO bus and unmaps the allocated memory.
1797  */
1798 static int xgmac_remove(struct platform_device *pdev)
1799 {
1800 	struct net_device *ndev = platform_get_drvdata(pdev);
1801 	struct xgmac_priv *priv = netdev_priv(ndev);
1802 	struct resource *res;
1803 
1804 	xgmac_mac_disable(priv->base);
1805 
1806 	/* Free the IRQ lines */
1807 	free_irq(ndev->irq, ndev);
1808 	free_irq(priv->pmt_irq, ndev);
1809 
1810 	platform_set_drvdata(pdev, NULL);
1811 	unregister_netdev(ndev);
1812 	netif_napi_del(&priv->napi);
1813 
1814 	iounmap(priv->base);
1815 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1816 	release_mem_region(res->start, resource_size(res));
1817 
1818 	free_netdev(ndev);
1819 
1820 	return 0;
1821 }
1822 
1823 #ifdef CONFIG_PM_SLEEP
1824 static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
1825 {
1826 	unsigned int pmt = 0;
1827 
1828 	if (mode & WAKE_MAGIC)
1829 		pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT;
1830 	if (mode & WAKE_UCAST)
1831 		pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
1832 
1833 	writel(pmt, ioaddr + XGMAC_PMT);
1834 }
1835 
1836 static int xgmac_suspend(struct device *dev)
1837 {
1838 	struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
1839 	struct xgmac_priv *priv = netdev_priv(ndev);
1840 	u32 value;
1841 
1842 	if (!ndev || !netif_running(ndev))
1843 		return 0;
1844 
1845 	netif_device_detach(ndev);
1846 	napi_disable(&priv->napi);
1847 	writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1848 
1849 	if (device_may_wakeup(priv->device)) {
1850 		/* Stop TX/RX DMA Only */
1851 		value = readl(priv->base + XGMAC_DMA_CONTROL);
1852 		value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
1853 		writel(value, priv->base + XGMAC_DMA_CONTROL);
1854 
1855 		xgmac_pmt(priv->base, priv->wolopts);
1856 	} else
1857 		xgmac_mac_disable(priv->base);
1858 
1859 	return 0;
1860 }
1861 
1862 static int xgmac_resume(struct device *dev)
1863 {
1864 	struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
1865 	struct xgmac_priv *priv = netdev_priv(ndev);
1866 	void __iomem *ioaddr = priv->base;
1867 
1868 	if (!netif_running(ndev))
1869 		return 0;
1870 
1871 	xgmac_pmt(ioaddr, 0);
1872 
1873 	/* Enable the MAC and DMA */
1874 	xgmac_mac_enable(ioaddr);
1875 	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
1876 	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
1877 
1878 	netif_device_attach(ndev);
1879 	napi_enable(&priv->napi);
1880 
1881 	return 0;
1882 }
1883 
1884 static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume);
1885 #define XGMAC_PM_OPS (&xgmac_pm_ops)
1886 #else
1887 #define XGMAC_PM_OPS NULL
1888 #endif /* CONFIG_PM_SLEEP */
1889 
1890 static const struct of_device_id xgmac_of_match[] = {
1891 	{ .compatible = "calxeda,hb-xgmac", },
1892 	{},
1893 };
1894 MODULE_DEVICE_TABLE(of, xgmac_of_match);
1895 
1896 static struct platform_driver xgmac_driver = {
1897 	.driver = {
1898 		.name = "calxedaxgmac",
1899 		.of_match_table = xgmac_of_match,
1900 	},
1901 	.probe = xgmac_probe,
1902 	.remove = xgmac_remove,
1903 	.driver.pm = XGMAC_PM_OPS,
1904 };
1905 
1906 module_platform_driver(xgmac_driver);
1907 
1908 MODULE_AUTHOR("Calxeda, Inc.");
1909 MODULE_DESCRIPTION("Calxeda 10G XGMAC driver");
1910 MODULE_LICENSE("GPL v2");
1911