xref: /openbmc/linux/drivers/net/ethernet/nxp/lpc_eth.c (revision d0b73b48)
1 /*
2  * drivers/net/ethernet/nxp/lpc_eth.c
3  *
4  * Author: Kevin Wells <kevin.wells@nxp.com>
5  *
6  * Copyright (C) 2010 NXP Semiconductors
7  * Copyright (C) 2012 Roland Stigge <stigge@antcom.de>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19 
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/interrupt.h>
29 #include <linux/errno.h>
30 #include <linux/ioport.h>
31 #include <linux/crc32.h>
32 #include <linux/platform_device.h>
33 #include <linux/spinlock.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/clk.h>
37 #include <linux/workqueue.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/phy.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/of.h>
44 #include <linux/of_net.h>
45 #include <linux/types.h>
46 
47 #include <linux/io.h>
48 #include <mach/board.h>
49 #include <mach/platform.h>
50 #include <mach/hardware.h>
51 
52 #define MODNAME "lpc-eth"
53 #define DRV_VERSION "1.00"
54 
55 #define ENET_MAXF_SIZE 1536
56 #define ENET_RX_DESC 48
57 #define ENET_TX_DESC 16
58 
59 #define NAPI_WEIGHT 16
60 
61 /*
62  * Ethernet MAC controller Register offsets
63  */
64 #define LPC_ENET_MAC1(x)			(x + 0x000)
65 #define LPC_ENET_MAC2(x)			(x + 0x004)
66 #define LPC_ENET_IPGT(x)			(x + 0x008)
67 #define LPC_ENET_IPGR(x)			(x + 0x00C)
68 #define LPC_ENET_CLRT(x)			(x + 0x010)
69 #define LPC_ENET_MAXF(x)			(x + 0x014)
70 #define LPC_ENET_SUPP(x)			(x + 0x018)
71 #define LPC_ENET_TEST(x)			(x + 0x01C)
72 #define LPC_ENET_MCFG(x)			(x + 0x020)
73 #define LPC_ENET_MCMD(x)			(x + 0x024)
74 #define LPC_ENET_MADR(x)			(x + 0x028)
75 #define LPC_ENET_MWTD(x)			(x + 0x02C)
76 #define LPC_ENET_MRDD(x)			(x + 0x030)
77 #define LPC_ENET_MIND(x)			(x + 0x034)
78 #define LPC_ENET_SA0(x)				(x + 0x040)
79 #define LPC_ENET_SA1(x)				(x + 0x044)
80 #define LPC_ENET_SA2(x)				(x + 0x048)
81 #define LPC_ENET_COMMAND(x)			(x + 0x100)
82 #define LPC_ENET_STATUS(x)			(x + 0x104)
83 #define LPC_ENET_RXDESCRIPTOR(x)		(x + 0x108)
84 #define LPC_ENET_RXSTATUS(x)			(x + 0x10C)
85 #define LPC_ENET_RXDESCRIPTORNUMBER(x)		(x + 0x110)
86 #define LPC_ENET_RXPRODUCEINDEX(x)		(x + 0x114)
87 #define LPC_ENET_RXCONSUMEINDEX(x)		(x + 0x118)
88 #define LPC_ENET_TXDESCRIPTOR(x)		(x + 0x11C)
89 #define LPC_ENET_TXSTATUS(x)			(x + 0x120)
90 #define LPC_ENET_TXDESCRIPTORNUMBER(x)		(x + 0x124)
91 #define LPC_ENET_TXPRODUCEINDEX(x)		(x + 0x128)
92 #define LPC_ENET_TXCONSUMEINDEX(x)		(x + 0x12C)
93 #define LPC_ENET_TSV0(x)			(x + 0x158)
94 #define LPC_ENET_TSV1(x)			(x + 0x15C)
95 #define LPC_ENET_RSV(x)				(x + 0x160)
96 #define LPC_ENET_FLOWCONTROLCOUNTER(x)		(x + 0x170)
97 #define LPC_ENET_FLOWCONTROLSTATUS(x)		(x + 0x174)
98 #define LPC_ENET_RXFILTER_CTRL(x)		(x + 0x200)
99 #define LPC_ENET_RXFILTERWOLSTATUS(x)		(x + 0x204)
100 #define LPC_ENET_RXFILTERWOLCLEAR(x)		(x + 0x208)
101 #define LPC_ENET_HASHFILTERL(x)			(x + 0x210)
102 #define LPC_ENET_HASHFILTERH(x)			(x + 0x214)
103 #define LPC_ENET_INTSTATUS(x)			(x + 0xFE0)
104 #define LPC_ENET_INTENABLE(x)			(x + 0xFE4)
105 #define LPC_ENET_INTCLEAR(x)			(x + 0xFE8)
106 #define LPC_ENET_INTSET(x)			(x + 0xFEC)
107 #define LPC_ENET_POWERDOWN(x)			(x + 0xFF4)
108 
109 /*
110  * mac1 register definitions
111  */
112 #define LPC_MAC1_RECV_ENABLE			(1 << 0)
113 #define LPC_MAC1_PASS_ALL_RX_FRAMES		(1 << 1)
114 #define LPC_MAC1_RX_FLOW_CONTROL		(1 << 2)
115 #define LPC_MAC1_TX_FLOW_CONTROL		(1 << 3)
116 #define LPC_MAC1_LOOPBACK			(1 << 4)
117 #define LPC_MAC1_RESET_TX			(1 << 8)
118 #define LPC_MAC1_RESET_MCS_TX			(1 << 9)
119 #define LPC_MAC1_RESET_RX			(1 << 10)
120 #define LPC_MAC1_RESET_MCS_RX			(1 << 11)
121 #define LPC_MAC1_SIMULATION_RESET		(1 << 14)
122 #define LPC_MAC1_SOFT_RESET			(1 << 15)
123 
124 /*
125  * mac2 register definitions
126  */
127 #define LPC_MAC2_FULL_DUPLEX			(1 << 0)
128 #define LPC_MAC2_FRAME_LENGTH_CHECKING		(1 << 1)
129 #define LPC_MAC2_HUGH_LENGTH_CHECKING		(1 << 2)
130 #define LPC_MAC2_DELAYED_CRC			(1 << 3)
131 #define LPC_MAC2_CRC_ENABLE			(1 << 4)
132 #define LPC_MAC2_PAD_CRC_ENABLE			(1 << 5)
133 #define LPC_MAC2_VLAN_PAD_ENABLE		(1 << 6)
134 #define LPC_MAC2_AUTO_DETECT_PAD_ENABLE		(1 << 7)
135 #define LPC_MAC2_PURE_PREAMBLE_ENFORCEMENT	(1 << 8)
136 #define LPC_MAC2_LONG_PREAMBLE_ENFORCEMENT	(1 << 9)
137 #define LPC_MAC2_NO_BACKOFF			(1 << 12)
138 #define LPC_MAC2_BACK_PRESSURE			(1 << 13)
139 #define LPC_MAC2_EXCESS_DEFER			(1 << 14)
140 
141 /*
142  * ipgt register definitions
143  */
144 #define LPC_IPGT_LOAD(n)			((n) & 0x7F)
145 
146 /*
147  * ipgr register definitions
148  */
149 #define LPC_IPGR_LOAD_PART2(n)			((n) & 0x7F)
150 #define LPC_IPGR_LOAD_PART1(n)			(((n) & 0x7F) << 8)
151 
152 /*
153  * clrt register definitions
154  */
155 #define LPC_CLRT_LOAD_RETRY_MAX(n)		((n) & 0xF)
156 #define LPC_CLRT_LOAD_COLLISION_WINDOW(n)	(((n) & 0x3F) << 8)
157 
158 /*
159  * maxf register definitions
160  */
161 #define LPC_MAXF_LOAD_MAX_FRAME_LEN(n)		((n) & 0xFFFF)
162 
163 /*
164  * supp register definitions
165  */
166 #define LPC_SUPP_SPEED				(1 << 8)
167 #define LPC_SUPP_RESET_RMII			(1 << 11)
168 
169 /*
170  * test register definitions
171  */
172 #define LPC_TEST_SHORTCUT_PAUSE_QUANTA		(1 << 0)
173 #define LPC_TEST_PAUSE				(1 << 1)
174 #define LPC_TEST_BACKPRESSURE			(1 << 2)
175 
176 /*
177  * mcfg register definitions
178  */
179 #define LPC_MCFG_SCAN_INCREMENT			(1 << 0)
180 #define LPC_MCFG_SUPPRESS_PREAMBLE		(1 << 1)
181 #define LPC_MCFG_CLOCK_SELECT(n)		(((n) & 0x7) << 2)
182 #define LPC_MCFG_CLOCK_HOST_DIV_4		0
183 #define LPC_MCFG_CLOCK_HOST_DIV_6		2
184 #define LPC_MCFG_CLOCK_HOST_DIV_8		3
185 #define LPC_MCFG_CLOCK_HOST_DIV_10		4
186 #define LPC_MCFG_CLOCK_HOST_DIV_14		5
187 #define LPC_MCFG_CLOCK_HOST_DIV_20		6
188 #define LPC_MCFG_CLOCK_HOST_DIV_28		7
189 #define LPC_MCFG_RESET_MII_MGMT			(1 << 15)
190 
191 /*
192  * mcmd register definitions
193  */
194 #define LPC_MCMD_READ				(1 << 0)
195 #define LPC_MCMD_SCAN				(1 << 1)
196 
197 /*
198  * madr register definitions
199  */
200 #define LPC_MADR_REGISTER_ADDRESS(n)		((n) & 0x1F)
201 #define LPC_MADR_PHY_0ADDRESS(n)		(((n) & 0x1F) << 8)
202 
203 /*
204  * mwtd register definitions
205  */
206 #define LPC_MWDT_WRITE(n)			((n) & 0xFFFF)
207 
208 /*
209  * mrdd register definitions
210  */
211 #define LPC_MRDD_READ_MASK			0xFFFF
212 
213 /*
214  * mind register definitions
215  */
216 #define LPC_MIND_BUSY				(1 << 0)
217 #define LPC_MIND_SCANNING			(1 << 1)
218 #define LPC_MIND_NOT_VALID			(1 << 2)
219 #define LPC_MIND_MII_LINK_FAIL			(1 << 3)
220 
221 /*
222  * command register definitions
223  */
224 #define LPC_COMMAND_RXENABLE			(1 << 0)
225 #define LPC_COMMAND_TXENABLE			(1 << 1)
226 #define LPC_COMMAND_REG_RESET			(1 << 3)
227 #define LPC_COMMAND_TXRESET			(1 << 4)
228 #define LPC_COMMAND_RXRESET			(1 << 5)
229 #define LPC_COMMAND_PASSRUNTFRAME		(1 << 6)
230 #define LPC_COMMAND_PASSRXFILTER		(1 << 7)
231 #define LPC_COMMAND_TXFLOWCONTROL		(1 << 8)
232 #define LPC_COMMAND_RMII			(1 << 9)
233 #define LPC_COMMAND_FULLDUPLEX			(1 << 10)
234 
235 /*
236  * status register definitions
237  */
238 #define LPC_STATUS_RXACTIVE			(1 << 0)
239 #define LPC_STATUS_TXACTIVE			(1 << 1)
240 
241 /*
242  * tsv0 register definitions
243  */
244 #define LPC_TSV0_CRC_ERROR			(1 << 0)
245 #define LPC_TSV0_LENGTH_CHECK_ERROR		(1 << 1)
246 #define LPC_TSV0_LENGTH_OUT_OF_RANGE		(1 << 2)
247 #define LPC_TSV0_DONE				(1 << 3)
248 #define LPC_TSV0_MULTICAST			(1 << 4)
249 #define LPC_TSV0_BROADCAST			(1 << 5)
250 #define LPC_TSV0_PACKET_DEFER			(1 << 6)
251 #define LPC_TSV0_ESCESSIVE_DEFER		(1 << 7)
252 #define LPC_TSV0_ESCESSIVE_COLLISION		(1 << 8)
253 #define LPC_TSV0_LATE_COLLISION			(1 << 9)
254 #define LPC_TSV0_GIANT				(1 << 10)
255 #define LPC_TSV0_UNDERRUN			(1 << 11)
256 #define LPC_TSV0_TOTAL_BYTES(n)			(((n) >> 12) & 0xFFFF)
257 #define LPC_TSV0_CONTROL_FRAME			(1 << 28)
258 #define LPC_TSV0_PAUSE				(1 << 29)
259 #define LPC_TSV0_BACKPRESSURE			(1 << 30)
260 #define LPC_TSV0_VLAN				(1 << 31)
261 
262 /*
263  * tsv1 register definitions
264  */
265 #define LPC_TSV1_TRANSMIT_BYTE_COUNT(n)		((n) & 0xFFFF)
266 #define LPC_TSV1_COLLISION_COUNT(n)		(((n) >> 16) & 0xF)
267 
268 /*
269  * rsv register definitions
270  */
271 #define LPC_RSV_RECEIVED_BYTE_COUNT(n)		((n) & 0xFFFF)
272 #define LPC_RSV_RXDV_EVENT_IGNORED		(1 << 16)
273 #define LPC_RSV_RXDV_EVENT_PREVIOUSLY_SEEN	(1 << 17)
274 #define LPC_RSV_CARRIER_EVNT_PREVIOUS_SEEN	(1 << 18)
275 #define LPC_RSV_RECEIVE_CODE_VIOLATION		(1 << 19)
276 #define LPC_RSV_CRC_ERROR			(1 << 20)
277 #define LPC_RSV_LENGTH_CHECK_ERROR		(1 << 21)
278 #define LPC_RSV_LENGTH_OUT_OF_RANGE		(1 << 22)
279 #define LPC_RSV_RECEIVE_OK			(1 << 23)
280 #define LPC_RSV_MULTICAST			(1 << 24)
281 #define LPC_RSV_BROADCAST			(1 << 25)
282 #define LPC_RSV_DRIBBLE_NIBBLE			(1 << 26)
283 #define LPC_RSV_CONTROL_FRAME			(1 << 27)
284 #define LPC_RSV_PAUSE				(1 << 28)
285 #define LPC_RSV_UNSUPPORTED_OPCODE		(1 << 29)
286 #define LPC_RSV_VLAN				(1 << 30)
287 
288 /*
289  * flowcontrolcounter register definitions
290  */
291 #define LPC_FCCR_MIRRORCOUNTER(n)		((n) & 0xFFFF)
292 #define LPC_FCCR_PAUSETIMER(n)			(((n) >> 16) & 0xFFFF)
293 
294 /*
295  * flowcontrolstatus register definitions
296  */
297 #define LPC_FCCR_MIRRORCOUNTERCURRENT(n)	((n) & 0xFFFF)
298 
299 /*
300  * rxfliterctrl, rxfilterwolstatus, and rxfilterwolclear shared
301  * register definitions
302  */
303 #define LPC_RXFLTRW_ACCEPTUNICAST		(1 << 0)
304 #define LPC_RXFLTRW_ACCEPTUBROADCAST		(1 << 1)
305 #define LPC_RXFLTRW_ACCEPTUMULTICAST		(1 << 2)
306 #define LPC_RXFLTRW_ACCEPTUNICASTHASH		(1 << 3)
307 #define LPC_RXFLTRW_ACCEPTUMULTICASTHASH	(1 << 4)
308 #define LPC_RXFLTRW_ACCEPTPERFECT		(1 << 5)
309 
310 /*
311  * rxfliterctrl register definitions
312  */
313 #define LPC_RXFLTRWSTS_MAGICPACKETENWOL		(1 << 12)
314 #define LPC_RXFLTRWSTS_RXFILTERENWOL		(1 << 13)
315 
316 /*
317  * rxfilterwolstatus/rxfilterwolclear register definitions
318  */
319 #define LPC_RXFLTRWSTS_RXFILTERWOL		(1 << 7)
320 #define LPC_RXFLTRWSTS_MAGICPACKETWOL		(1 << 8)
321 
322 /*
323  * intstatus, intenable, intclear, and Intset shared register
324  * definitions
325  */
326 #define LPC_MACINT_RXOVERRUNINTEN		(1 << 0)
327 #define LPC_MACINT_RXERRORONINT			(1 << 1)
328 #define LPC_MACINT_RXFINISHEDINTEN		(1 << 2)
329 #define LPC_MACINT_RXDONEINTEN			(1 << 3)
330 #define LPC_MACINT_TXUNDERRUNINTEN		(1 << 4)
331 #define LPC_MACINT_TXERRORINTEN			(1 << 5)
332 #define LPC_MACINT_TXFINISHEDINTEN		(1 << 6)
333 #define LPC_MACINT_TXDONEINTEN			(1 << 7)
334 #define LPC_MACINT_SOFTINTEN			(1 << 12)
335 #define LPC_MACINT_WAKEUPINTEN			(1 << 13)
336 
337 /*
338  * powerdown register definitions
339  */
340 #define LPC_POWERDOWN_MACAHB			(1 << 31)
341 
342 static phy_interface_t lpc_phy_interface_mode(struct device *dev)
343 {
344 	if (dev && dev->of_node) {
345 		const char *mode = of_get_property(dev->of_node,
346 						   "phy-mode", NULL);
347 		if (mode && !strcmp(mode, "mii"))
348 			return PHY_INTERFACE_MODE_MII;
349 	}
350 	return PHY_INTERFACE_MODE_RMII;
351 }
352 
353 static bool use_iram_for_net(struct device *dev)
354 {
355 	if (dev && dev->of_node)
356 		return of_property_read_bool(dev->of_node, "use-iram");
357 	return false;
358 }
359 
360 /* Receive Status information word */
361 #define RXSTATUS_SIZE			0x000007FF
362 #define RXSTATUS_CONTROL		(1 << 18)
363 #define RXSTATUS_VLAN			(1 << 19)
364 #define RXSTATUS_FILTER			(1 << 20)
365 #define RXSTATUS_MULTICAST		(1 << 21)
366 #define RXSTATUS_BROADCAST		(1 << 22)
367 #define RXSTATUS_CRC			(1 << 23)
368 #define RXSTATUS_SYMBOL			(1 << 24)
369 #define RXSTATUS_LENGTH			(1 << 25)
370 #define RXSTATUS_RANGE			(1 << 26)
371 #define RXSTATUS_ALIGN			(1 << 27)
372 #define RXSTATUS_OVERRUN		(1 << 28)
373 #define RXSTATUS_NODESC			(1 << 29)
374 #define RXSTATUS_LAST			(1 << 30)
375 #define RXSTATUS_ERROR			(1 << 31)
376 
377 #define RXSTATUS_STATUS_ERROR \
378 	(RXSTATUS_NODESC | RXSTATUS_OVERRUN | RXSTATUS_ALIGN | \
379 	 RXSTATUS_RANGE | RXSTATUS_LENGTH | RXSTATUS_SYMBOL | RXSTATUS_CRC)
380 
381 /* Receive Descriptor control word */
382 #define RXDESC_CONTROL_SIZE		0x000007FF
383 #define RXDESC_CONTROL_INT		(1 << 31)
384 
385 /* Transmit Status information word */
386 #define TXSTATUS_COLLISIONS_GET(x)	(((x) >> 21) & 0xF)
387 #define TXSTATUS_DEFER			(1 << 25)
388 #define TXSTATUS_EXCESSDEFER		(1 << 26)
389 #define TXSTATUS_EXCESSCOLL		(1 << 27)
390 #define TXSTATUS_LATECOLL		(1 << 28)
391 #define TXSTATUS_UNDERRUN		(1 << 29)
392 #define TXSTATUS_NODESC			(1 << 30)
393 #define TXSTATUS_ERROR			(1 << 31)
394 
395 /* Transmit Descriptor control word */
396 #define TXDESC_CONTROL_SIZE		0x000007FF
397 #define TXDESC_CONTROL_OVERRIDE		(1 << 26)
398 #define TXDESC_CONTROL_HUGE		(1 << 27)
399 #define TXDESC_CONTROL_PAD		(1 << 28)
400 #define TXDESC_CONTROL_CRC		(1 << 29)
401 #define TXDESC_CONTROL_LAST		(1 << 30)
402 #define TXDESC_CONTROL_INT		(1 << 31)
403 
404 /*
405  * Structure of a TX/RX descriptors and RX status
406  */
407 struct txrx_desc_t {
408 	__le32 packet;
409 	__le32 control;
410 };
411 struct rx_status_t {
412 	__le32 statusinfo;
413 	__le32 statushashcrc;
414 };
415 
416 /*
417  * Device driver data structure
418  */
419 struct netdata_local {
420 	struct platform_device	*pdev;
421 	struct net_device	*ndev;
422 	spinlock_t		lock;
423 	void __iomem		*net_base;
424 	u32			msg_enable;
425 	unsigned int		skblen[ENET_TX_DESC];
426 	unsigned int		last_tx_idx;
427 	unsigned int		num_used_tx_buffs;
428 	struct mii_bus		*mii_bus;
429 	struct phy_device	*phy_dev;
430 	struct clk		*clk;
431 	dma_addr_t		dma_buff_base_p;
432 	void			*dma_buff_base_v;
433 	size_t			dma_buff_size;
434 	struct txrx_desc_t	*tx_desc_v;
435 	u32			*tx_stat_v;
436 	void			*tx_buff_v;
437 	struct txrx_desc_t	*rx_desc_v;
438 	struct rx_status_t	*rx_stat_v;
439 	void			*rx_buff_v;
440 	int			link;
441 	int			speed;
442 	int			duplex;
443 	struct napi_struct	napi;
444 };
445 
446 /*
447  * MAC support functions
448  */
449 static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac)
450 {
451 	u32 tmp;
452 
453 	/* Set station address */
454 	tmp = mac[0] | ((u32)mac[1] << 8);
455 	writel(tmp, LPC_ENET_SA2(pldat->net_base));
456 	tmp = mac[2] | ((u32)mac[3] << 8);
457 	writel(tmp, LPC_ENET_SA1(pldat->net_base));
458 	tmp = mac[4] | ((u32)mac[5] << 8);
459 	writel(tmp, LPC_ENET_SA0(pldat->net_base));
460 
461 	netdev_dbg(pldat->ndev, "Ethernet MAC address %pM\n", mac);
462 }
463 
464 static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
465 {
466 	u32 tmp;
467 
468 	/* Get station address */
469 	tmp = readl(LPC_ENET_SA2(pldat->net_base));
470 	mac[0] = tmp & 0xFF;
471 	mac[1] = tmp >> 8;
472 	tmp = readl(LPC_ENET_SA1(pldat->net_base));
473 	mac[2] = tmp & 0xFF;
474 	mac[3] = tmp >> 8;
475 	tmp = readl(LPC_ENET_SA0(pldat->net_base));
476 	mac[4] = tmp & 0xFF;
477 	mac[5] = tmp >> 8;
478 }
479 
480 static void __lpc_eth_clock_enable(struct netdata_local *pldat,
481 				   bool enable)
482 {
483 	if (enable)
484 		clk_enable(pldat->clk);
485 	else
486 		clk_disable(pldat->clk);
487 }
488 
489 static void __lpc_params_setup(struct netdata_local *pldat)
490 {
491 	u32 tmp;
492 
493 	if (pldat->duplex == DUPLEX_FULL) {
494 		tmp = readl(LPC_ENET_MAC2(pldat->net_base));
495 		tmp |= LPC_MAC2_FULL_DUPLEX;
496 		writel(tmp, LPC_ENET_MAC2(pldat->net_base));
497 		tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
498 		tmp |= LPC_COMMAND_FULLDUPLEX;
499 		writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
500 		writel(LPC_IPGT_LOAD(0x15), LPC_ENET_IPGT(pldat->net_base));
501 	} else {
502 		tmp = readl(LPC_ENET_MAC2(pldat->net_base));
503 		tmp &= ~LPC_MAC2_FULL_DUPLEX;
504 		writel(tmp, LPC_ENET_MAC2(pldat->net_base));
505 		tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
506 		tmp &= ~LPC_COMMAND_FULLDUPLEX;
507 		writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
508 		writel(LPC_IPGT_LOAD(0x12), LPC_ENET_IPGT(pldat->net_base));
509 	}
510 
511 	if (pldat->speed == SPEED_100)
512 		writel(LPC_SUPP_SPEED, LPC_ENET_SUPP(pldat->net_base));
513 	else
514 		writel(0, LPC_ENET_SUPP(pldat->net_base));
515 }
516 
517 static void __lpc_eth_reset(struct netdata_local *pldat)
518 {
519 	/* Reset all MAC logic */
520 	writel((LPC_MAC1_RESET_TX | LPC_MAC1_RESET_MCS_TX | LPC_MAC1_RESET_RX |
521 		LPC_MAC1_RESET_MCS_RX | LPC_MAC1_SIMULATION_RESET |
522 		LPC_MAC1_SOFT_RESET), LPC_ENET_MAC1(pldat->net_base));
523 	writel((LPC_COMMAND_REG_RESET | LPC_COMMAND_TXRESET |
524 		LPC_COMMAND_RXRESET), LPC_ENET_COMMAND(pldat->net_base));
525 }
526 
527 static int __lpc_mii_mngt_reset(struct netdata_local *pldat)
528 {
529 	/* Reset MII management hardware */
530 	writel(LPC_MCFG_RESET_MII_MGMT, LPC_ENET_MCFG(pldat->net_base));
531 
532 	/* Setup MII clock to slowest rate with a /28 divider */
533 	writel(LPC_MCFG_CLOCK_SELECT(LPC_MCFG_CLOCK_HOST_DIV_28),
534 	       LPC_ENET_MCFG(pldat->net_base));
535 
536 	return 0;
537 }
538 
539 static inline phys_addr_t __va_to_pa(void *addr, struct netdata_local *pldat)
540 {
541 	phys_addr_t phaddr;
542 
543 	phaddr = addr - pldat->dma_buff_base_v;
544 	phaddr += pldat->dma_buff_base_p;
545 
546 	return phaddr;
547 }
548 
549 static void lpc_eth_enable_int(void __iomem *regbase)
550 {
551 	writel((LPC_MACINT_RXDONEINTEN | LPC_MACINT_TXDONEINTEN),
552 	       LPC_ENET_INTENABLE(regbase));
553 }
554 
555 static void lpc_eth_disable_int(void __iomem *regbase)
556 {
557 	writel(0, LPC_ENET_INTENABLE(regbase));
558 }
559 
560 /* Setup TX/RX descriptors */
561 static void __lpc_txrx_desc_setup(struct netdata_local *pldat)
562 {
563 	u32 *ptxstat;
564 	void *tbuff;
565 	int i;
566 	struct txrx_desc_t *ptxrxdesc;
567 	struct rx_status_t *prxstat;
568 
569 	tbuff = PTR_ALIGN(pldat->dma_buff_base_v, 16);
570 
571 	/* Setup TX descriptors, status, and buffers */
572 	pldat->tx_desc_v = tbuff;
573 	tbuff += sizeof(struct txrx_desc_t) * ENET_TX_DESC;
574 
575 	pldat->tx_stat_v = tbuff;
576 	tbuff += sizeof(u32) * ENET_TX_DESC;
577 
578 	tbuff = PTR_ALIGN(tbuff, 16);
579 	pldat->tx_buff_v = tbuff;
580 	tbuff += ENET_MAXF_SIZE * ENET_TX_DESC;
581 
582 	/* Setup RX descriptors, status, and buffers */
583 	pldat->rx_desc_v = tbuff;
584 	tbuff += sizeof(struct txrx_desc_t) * ENET_RX_DESC;
585 
586 	tbuff = PTR_ALIGN(tbuff, 16);
587 	pldat->rx_stat_v = tbuff;
588 	tbuff += sizeof(struct rx_status_t) * ENET_RX_DESC;
589 
590 	tbuff = PTR_ALIGN(tbuff, 16);
591 	pldat->rx_buff_v = tbuff;
592 	tbuff += ENET_MAXF_SIZE * ENET_RX_DESC;
593 
594 	/* Map the TX descriptors to the TX buffers in hardware */
595 	for (i = 0; i < ENET_TX_DESC; i++) {
596 		ptxstat = &pldat->tx_stat_v[i];
597 		ptxrxdesc = &pldat->tx_desc_v[i];
598 
599 		ptxrxdesc->packet = __va_to_pa(
600 				pldat->tx_buff_v + i * ENET_MAXF_SIZE, pldat);
601 		ptxrxdesc->control = 0;
602 		*ptxstat = 0;
603 	}
604 
605 	/* Map the RX descriptors to the RX buffers in hardware */
606 	for (i = 0; i < ENET_RX_DESC; i++) {
607 		prxstat = &pldat->rx_stat_v[i];
608 		ptxrxdesc = &pldat->rx_desc_v[i];
609 
610 		ptxrxdesc->packet = __va_to_pa(
611 				pldat->rx_buff_v + i * ENET_MAXF_SIZE, pldat);
612 		ptxrxdesc->control = RXDESC_CONTROL_INT | (ENET_MAXF_SIZE - 1);
613 		prxstat->statusinfo = 0;
614 		prxstat->statushashcrc = 0;
615 	}
616 
617 	/* Setup base addresses in hardware to point to buffers and
618 	 * descriptors
619 	 */
620 	writel((ENET_TX_DESC - 1),
621 	       LPC_ENET_TXDESCRIPTORNUMBER(pldat->net_base));
622 	writel(__va_to_pa(pldat->tx_desc_v, pldat),
623 	       LPC_ENET_TXDESCRIPTOR(pldat->net_base));
624 	writel(__va_to_pa(pldat->tx_stat_v, pldat),
625 	       LPC_ENET_TXSTATUS(pldat->net_base));
626 	writel((ENET_RX_DESC - 1),
627 	       LPC_ENET_RXDESCRIPTORNUMBER(pldat->net_base));
628 	writel(__va_to_pa(pldat->rx_desc_v, pldat),
629 	       LPC_ENET_RXDESCRIPTOR(pldat->net_base));
630 	writel(__va_to_pa(pldat->rx_stat_v, pldat),
631 	       LPC_ENET_RXSTATUS(pldat->net_base));
632 }
633 
634 static void __lpc_eth_init(struct netdata_local *pldat)
635 {
636 	u32 tmp;
637 
638 	/* Disable controller and reset */
639 	tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
640 	tmp &= ~LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
641 	writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
642 	tmp = readl(LPC_ENET_MAC1(pldat->net_base));
643 	tmp &= ~LPC_MAC1_RECV_ENABLE;
644 	writel(tmp, LPC_ENET_MAC1(pldat->net_base));
645 
646 	/* Initial MAC setup */
647 	writel(LPC_MAC1_PASS_ALL_RX_FRAMES, LPC_ENET_MAC1(pldat->net_base));
648 	writel((LPC_MAC2_PAD_CRC_ENABLE | LPC_MAC2_CRC_ENABLE),
649 	       LPC_ENET_MAC2(pldat->net_base));
650 	writel(ENET_MAXF_SIZE, LPC_ENET_MAXF(pldat->net_base));
651 
652 	/* Collision window, gap */
653 	writel((LPC_CLRT_LOAD_RETRY_MAX(0xF) |
654 		LPC_CLRT_LOAD_COLLISION_WINDOW(0x37)),
655 	       LPC_ENET_CLRT(pldat->net_base));
656 	writel(LPC_IPGR_LOAD_PART2(0x12), LPC_ENET_IPGR(pldat->net_base));
657 
658 	if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
659 		writel(LPC_COMMAND_PASSRUNTFRAME,
660 		       LPC_ENET_COMMAND(pldat->net_base));
661 	else {
662 		writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
663 		       LPC_ENET_COMMAND(pldat->net_base));
664 		writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
665 	}
666 
667 	__lpc_params_setup(pldat);
668 
669 	/* Setup TX and RX descriptors */
670 	__lpc_txrx_desc_setup(pldat);
671 
672 	/* Setup packet filtering */
673 	writel((LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT),
674 	       LPC_ENET_RXFILTER_CTRL(pldat->net_base));
675 
676 	/* Get the next TX buffer output index */
677 	pldat->num_used_tx_buffs = 0;
678 	pldat->last_tx_idx =
679 		readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
680 
681 	/* Clear and enable interrupts */
682 	writel(0xFFFF, LPC_ENET_INTCLEAR(pldat->net_base));
683 	smp_wmb();
684 	lpc_eth_enable_int(pldat->net_base);
685 
686 	/* Enable controller */
687 	tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
688 	tmp |= LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
689 	writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
690 	tmp = readl(LPC_ENET_MAC1(pldat->net_base));
691 	tmp |= LPC_MAC1_RECV_ENABLE;
692 	writel(tmp, LPC_ENET_MAC1(pldat->net_base));
693 }
694 
695 static void __lpc_eth_shutdown(struct netdata_local *pldat)
696 {
697 	/* Reset ethernet and power down PHY */
698 	__lpc_eth_reset(pldat);
699 	writel(0, LPC_ENET_MAC1(pldat->net_base));
700 	writel(0, LPC_ENET_MAC2(pldat->net_base));
701 }
702 
703 /*
704  * MAC<--->PHY support functions
705  */
706 static int lpc_mdio_read(struct mii_bus *bus, int phy_id, int phyreg)
707 {
708 	struct netdata_local *pldat = bus->priv;
709 	unsigned long timeout = jiffies + msecs_to_jiffies(100);
710 	int lps;
711 
712 	writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
713 	writel(LPC_MCMD_READ, LPC_ENET_MCMD(pldat->net_base));
714 
715 	/* Wait for unbusy status */
716 	while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
717 		if (time_after(jiffies, timeout))
718 			return -EIO;
719 		cpu_relax();
720 	}
721 
722 	lps = readl(LPC_ENET_MRDD(pldat->net_base));
723 	writel(0, LPC_ENET_MCMD(pldat->net_base));
724 
725 	return lps;
726 }
727 
728 static int lpc_mdio_write(struct mii_bus *bus, int phy_id, int phyreg,
729 			u16 phydata)
730 {
731 	struct netdata_local *pldat = bus->priv;
732 	unsigned long timeout = jiffies + msecs_to_jiffies(100);
733 
734 	writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
735 	writel(phydata, LPC_ENET_MWTD(pldat->net_base));
736 
737 	/* Wait for completion */
738 	while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
739 		if (time_after(jiffies, timeout))
740 			return -EIO;
741 		cpu_relax();
742 	}
743 
744 	return 0;
745 }
746 
747 static int lpc_mdio_reset(struct mii_bus *bus)
748 {
749 	return __lpc_mii_mngt_reset((struct netdata_local *)bus->priv);
750 }
751 
752 static void lpc_handle_link_change(struct net_device *ndev)
753 {
754 	struct netdata_local *pldat = netdev_priv(ndev);
755 	struct phy_device *phydev = pldat->phy_dev;
756 	unsigned long flags;
757 
758 	bool status_change = false;
759 
760 	spin_lock_irqsave(&pldat->lock, flags);
761 
762 	if (phydev->link) {
763 		if ((pldat->speed != phydev->speed) ||
764 		    (pldat->duplex != phydev->duplex)) {
765 			pldat->speed = phydev->speed;
766 			pldat->duplex = phydev->duplex;
767 			status_change = true;
768 		}
769 	}
770 
771 	if (phydev->link != pldat->link) {
772 		if (!phydev->link) {
773 			pldat->speed = 0;
774 			pldat->duplex = -1;
775 		}
776 		pldat->link = phydev->link;
777 
778 		status_change = true;
779 	}
780 
781 	spin_unlock_irqrestore(&pldat->lock, flags);
782 
783 	if (status_change)
784 		__lpc_params_setup(pldat);
785 }
786 
787 static int lpc_mii_probe(struct net_device *ndev)
788 {
789 	struct netdata_local *pldat = netdev_priv(ndev);
790 	struct phy_device *phydev = phy_find_first(pldat->mii_bus);
791 
792 	if (!phydev) {
793 		netdev_err(ndev, "no PHY found\n");
794 		return -ENODEV;
795 	}
796 
797 	/* Attach to the PHY */
798 	if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
799 		netdev_info(ndev, "using MII interface\n");
800 	else
801 		netdev_info(ndev, "using RMII interface\n");
802 	phydev = phy_connect(ndev, dev_name(&phydev->dev),
803 			     &lpc_handle_link_change, 0,
804 			     lpc_phy_interface_mode(&pldat->pdev->dev));
805 
806 	if (IS_ERR(phydev)) {
807 		netdev_err(ndev, "Could not attach to PHY\n");
808 		return PTR_ERR(phydev);
809 	}
810 
811 	/* mask with MAC supported features */
812 	phydev->supported &= PHY_BASIC_FEATURES;
813 
814 	phydev->advertising = phydev->supported;
815 
816 	pldat->link = 0;
817 	pldat->speed = 0;
818 	pldat->duplex = -1;
819 	pldat->phy_dev = phydev;
820 
821 	netdev_info(ndev,
822 		"attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
823 		phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
824 	return 0;
825 }
826 
827 static int lpc_mii_init(struct netdata_local *pldat)
828 {
829 	int err = -ENXIO, i;
830 
831 	pldat->mii_bus = mdiobus_alloc();
832 	if (!pldat->mii_bus) {
833 		err = -ENOMEM;
834 		goto err_out;
835 	}
836 
837 	/* Setup MII mode */
838 	if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
839 		writel(LPC_COMMAND_PASSRUNTFRAME,
840 		       LPC_ENET_COMMAND(pldat->net_base));
841 	else {
842 		writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
843 		       LPC_ENET_COMMAND(pldat->net_base));
844 		writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
845 	}
846 
847 	pldat->mii_bus->name = "lpc_mii_bus";
848 	pldat->mii_bus->read = &lpc_mdio_read;
849 	pldat->mii_bus->write = &lpc_mdio_write;
850 	pldat->mii_bus->reset = &lpc_mdio_reset;
851 	snprintf(pldat->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
852 		 pldat->pdev->name, pldat->pdev->id);
853 	pldat->mii_bus->priv = pldat;
854 	pldat->mii_bus->parent = &pldat->pdev->dev;
855 
856 	pldat->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
857 	if (!pldat->mii_bus->irq) {
858 		err = -ENOMEM;
859 		goto err_out_1;
860 	}
861 
862 	for (i = 0; i < PHY_MAX_ADDR; i++)
863 		pldat->mii_bus->irq[i] = PHY_POLL;
864 
865 	platform_set_drvdata(pldat->pdev, pldat->mii_bus);
866 
867 	if (mdiobus_register(pldat->mii_bus))
868 		goto err_out_free_mdio_irq;
869 
870 	if (lpc_mii_probe(pldat->ndev) != 0)
871 		goto err_out_unregister_bus;
872 
873 	return 0;
874 
875 err_out_unregister_bus:
876 	mdiobus_unregister(pldat->mii_bus);
877 err_out_free_mdio_irq:
878 	kfree(pldat->mii_bus->irq);
879 err_out_1:
880 	mdiobus_free(pldat->mii_bus);
881 err_out:
882 	return err;
883 }
884 
885 static void __lpc_handle_xmit(struct net_device *ndev)
886 {
887 	struct netdata_local *pldat = netdev_priv(ndev);
888 	u32 txcidx, *ptxstat, txstat;
889 
890 	txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
891 	while (pldat->last_tx_idx != txcidx) {
892 		unsigned int skblen = pldat->skblen[pldat->last_tx_idx];
893 
894 		/* A buffer is available, get buffer status */
895 		ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx];
896 		txstat = *ptxstat;
897 
898 		/* Next buffer and decrement used buffer counter */
899 		pldat->num_used_tx_buffs--;
900 		pldat->last_tx_idx++;
901 		if (pldat->last_tx_idx >= ENET_TX_DESC)
902 			pldat->last_tx_idx = 0;
903 
904 		/* Update collision counter */
905 		ndev->stats.collisions += TXSTATUS_COLLISIONS_GET(txstat);
906 
907 		/* Any errors occurred? */
908 		if (txstat & TXSTATUS_ERROR) {
909 			if (txstat & TXSTATUS_UNDERRUN) {
910 				/* FIFO underrun */
911 				ndev->stats.tx_fifo_errors++;
912 			}
913 			if (txstat & TXSTATUS_LATECOLL) {
914 				/* Late collision */
915 				ndev->stats.tx_aborted_errors++;
916 			}
917 			if (txstat & TXSTATUS_EXCESSCOLL) {
918 				/* Excessive collision */
919 				ndev->stats.tx_aborted_errors++;
920 			}
921 			if (txstat & TXSTATUS_EXCESSDEFER) {
922 				/* Defer limit */
923 				ndev->stats.tx_aborted_errors++;
924 			}
925 			ndev->stats.tx_errors++;
926 		} else {
927 			/* Update stats */
928 			ndev->stats.tx_packets++;
929 			ndev->stats.tx_bytes += skblen;
930 		}
931 
932 		txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
933 	}
934 
935 	if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
936 		if (netif_queue_stopped(ndev))
937 			netif_wake_queue(ndev);
938 	}
939 }
940 
941 static int __lpc_handle_recv(struct net_device *ndev, int budget)
942 {
943 	struct netdata_local *pldat = netdev_priv(ndev);
944 	struct sk_buff *skb;
945 	u32 rxconsidx, len, ethst;
946 	struct rx_status_t *prxstat;
947 	u8 *prdbuf;
948 	int rx_done = 0;
949 
950 	/* Get the current RX buffer indexes */
951 	rxconsidx = readl(LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
952 	while (rx_done < budget && rxconsidx !=
953 			readl(LPC_ENET_RXPRODUCEINDEX(pldat->net_base))) {
954 		/* Get pointer to receive status */
955 		prxstat = &pldat->rx_stat_v[rxconsidx];
956 		len = (prxstat->statusinfo & RXSTATUS_SIZE) + 1;
957 
958 		/* Status error? */
959 		ethst = prxstat->statusinfo;
960 		if ((ethst & (RXSTATUS_ERROR | RXSTATUS_STATUS_ERROR)) ==
961 		    (RXSTATUS_ERROR | RXSTATUS_RANGE))
962 			ethst &= ~RXSTATUS_ERROR;
963 
964 		if (ethst & RXSTATUS_ERROR) {
965 			int si = prxstat->statusinfo;
966 			/* Check statuses */
967 			if (si & RXSTATUS_OVERRUN) {
968 				/* Overrun error */
969 				ndev->stats.rx_fifo_errors++;
970 			} else if (si & RXSTATUS_CRC) {
971 				/* CRC error */
972 				ndev->stats.rx_crc_errors++;
973 			} else if (si & RXSTATUS_LENGTH) {
974 				/* Length error */
975 				ndev->stats.rx_length_errors++;
976 			} else if (si & RXSTATUS_ERROR) {
977 				/* Other error */
978 				ndev->stats.rx_length_errors++;
979 			}
980 			ndev->stats.rx_errors++;
981 		} else {
982 			/* Packet is good */
983 			skb = dev_alloc_skb(len);
984 			if (!skb) {
985 				ndev->stats.rx_dropped++;
986 			} else {
987 				prdbuf = skb_put(skb, len);
988 
989 				/* Copy packet from buffer */
990 				memcpy(prdbuf, pldat->rx_buff_v +
991 					rxconsidx * ENET_MAXF_SIZE, len);
992 
993 				/* Pass to upper layer */
994 				skb->protocol = eth_type_trans(skb, ndev);
995 				netif_receive_skb(skb);
996 				ndev->stats.rx_packets++;
997 				ndev->stats.rx_bytes += len;
998 			}
999 		}
1000 
1001 		/* Increment consume index */
1002 		rxconsidx = rxconsidx + 1;
1003 		if (rxconsidx >= ENET_RX_DESC)
1004 			rxconsidx = 0;
1005 		writel(rxconsidx,
1006 		       LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
1007 		rx_done++;
1008 	}
1009 
1010 	return rx_done;
1011 }
1012 
1013 static int lpc_eth_poll(struct napi_struct *napi, int budget)
1014 {
1015 	struct netdata_local *pldat = container_of(napi,
1016 			struct netdata_local, napi);
1017 	struct net_device *ndev = pldat->ndev;
1018 	int rx_done = 0;
1019 	struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0);
1020 
1021 	__netif_tx_lock(txq, smp_processor_id());
1022 	__lpc_handle_xmit(ndev);
1023 	__netif_tx_unlock(txq);
1024 	rx_done = __lpc_handle_recv(ndev, budget);
1025 
1026 	if (rx_done < budget) {
1027 		napi_complete(napi);
1028 		lpc_eth_enable_int(pldat->net_base);
1029 	}
1030 
1031 	return rx_done;
1032 }
1033 
1034 static irqreturn_t __lpc_eth_interrupt(int irq, void *dev_id)
1035 {
1036 	struct net_device *ndev = dev_id;
1037 	struct netdata_local *pldat = netdev_priv(ndev);
1038 	u32 tmp;
1039 
1040 	spin_lock(&pldat->lock);
1041 
1042 	tmp = readl(LPC_ENET_INTSTATUS(pldat->net_base));
1043 	/* Clear interrupts */
1044 	writel(tmp, LPC_ENET_INTCLEAR(pldat->net_base));
1045 
1046 	lpc_eth_disable_int(pldat->net_base);
1047 	if (likely(napi_schedule_prep(&pldat->napi)))
1048 		__napi_schedule(&pldat->napi);
1049 
1050 	spin_unlock(&pldat->lock);
1051 
1052 	return IRQ_HANDLED;
1053 }
1054 
1055 static int lpc_eth_close(struct net_device *ndev)
1056 {
1057 	unsigned long flags;
1058 	struct netdata_local *pldat = netdev_priv(ndev);
1059 
1060 	if (netif_msg_ifdown(pldat))
1061 		dev_dbg(&pldat->pdev->dev, "shutting down %s\n", ndev->name);
1062 
1063 	napi_disable(&pldat->napi);
1064 	netif_stop_queue(ndev);
1065 
1066 	if (pldat->phy_dev)
1067 		phy_stop(pldat->phy_dev);
1068 
1069 	spin_lock_irqsave(&pldat->lock, flags);
1070 	__lpc_eth_reset(pldat);
1071 	netif_carrier_off(ndev);
1072 	writel(0, LPC_ENET_MAC1(pldat->net_base));
1073 	writel(0, LPC_ENET_MAC2(pldat->net_base));
1074 	spin_unlock_irqrestore(&pldat->lock, flags);
1075 
1076 	__lpc_eth_clock_enable(pldat, false);
1077 
1078 	return 0;
1079 }
1080 
1081 static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1082 {
1083 	struct netdata_local *pldat = netdev_priv(ndev);
1084 	u32 len, txidx;
1085 	u32 *ptxstat;
1086 	struct txrx_desc_t *ptxrxdesc;
1087 
1088 	len = skb->len;
1089 
1090 	spin_lock_irq(&pldat->lock);
1091 
1092 	if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) {
1093 		/* This function should never be called when there are no
1094 		   buffers */
1095 		netif_stop_queue(ndev);
1096 		spin_unlock_irq(&pldat->lock);
1097 		WARN(1, "BUG! TX request when no free TX buffers!\n");
1098 		return NETDEV_TX_BUSY;
1099 	}
1100 
1101 	/* Get the next TX descriptor index */
1102 	txidx = readl(LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1103 
1104 	/* Setup control for the transfer */
1105 	ptxstat = &pldat->tx_stat_v[txidx];
1106 	*ptxstat = 0;
1107 	ptxrxdesc = &pldat->tx_desc_v[txidx];
1108 	ptxrxdesc->control =
1109 		(len - 1) | TXDESC_CONTROL_LAST | TXDESC_CONTROL_INT;
1110 
1111 	/* Copy data to the DMA buffer */
1112 	memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
1113 
1114 	/* Save the buffer and increment the buffer counter */
1115 	pldat->skblen[txidx] = len;
1116 	pldat->num_used_tx_buffs++;
1117 
1118 	/* Start transmit */
1119 	txidx++;
1120 	if (txidx >= ENET_TX_DESC)
1121 		txidx = 0;
1122 	writel(txidx, LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1123 
1124 	/* Stop queue if no more TX buffers */
1125 	if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1))
1126 		netif_stop_queue(ndev);
1127 
1128 	spin_unlock_irq(&pldat->lock);
1129 
1130 	dev_kfree_skb(skb);
1131 	return NETDEV_TX_OK;
1132 }
1133 
1134 static int lpc_set_mac_address(struct net_device *ndev, void *p)
1135 {
1136 	struct sockaddr *addr = p;
1137 	struct netdata_local *pldat = netdev_priv(ndev);
1138 	unsigned long flags;
1139 
1140 	if (!is_valid_ether_addr(addr->sa_data))
1141 		return -EADDRNOTAVAIL;
1142 	memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
1143 
1144 	spin_lock_irqsave(&pldat->lock, flags);
1145 
1146 	/* Set station address */
1147 	__lpc_set_mac(pldat, ndev->dev_addr);
1148 
1149 	spin_unlock_irqrestore(&pldat->lock, flags);
1150 
1151 	return 0;
1152 }
1153 
1154 static void lpc_eth_set_multicast_list(struct net_device *ndev)
1155 {
1156 	struct netdata_local *pldat = netdev_priv(ndev);
1157 	struct netdev_hw_addr_list *mcptr = &ndev->mc;
1158 	struct netdev_hw_addr *ha;
1159 	u32 tmp32, hash_val, hashlo, hashhi;
1160 	unsigned long flags;
1161 
1162 	spin_lock_irqsave(&pldat->lock, flags);
1163 
1164 	/* Set station address */
1165 	__lpc_set_mac(pldat, ndev->dev_addr);
1166 
1167 	tmp32 =  LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT;
1168 
1169 	if (ndev->flags & IFF_PROMISC)
1170 		tmp32 |= LPC_RXFLTRW_ACCEPTUNICAST |
1171 			LPC_RXFLTRW_ACCEPTUMULTICAST;
1172 	if (ndev->flags & IFF_ALLMULTI)
1173 		tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICAST;
1174 
1175 	if (netdev_hw_addr_list_count(mcptr))
1176 		tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICASTHASH;
1177 
1178 	writel(tmp32, LPC_ENET_RXFILTER_CTRL(pldat->net_base));
1179 
1180 
1181 	/* Set initial hash table */
1182 	hashlo = 0x0;
1183 	hashhi = 0x0;
1184 
1185 	/* 64 bits : multicast address in hash table */
1186 	netdev_hw_addr_list_for_each(ha, mcptr) {
1187 		hash_val = (ether_crc(6, ha->addr) >> 23) & 0x3F;
1188 
1189 		if (hash_val >= 32)
1190 			hashhi |= 1 << (hash_val - 32);
1191 		else
1192 			hashlo |= 1 << hash_val;
1193 	}
1194 
1195 	writel(hashlo, LPC_ENET_HASHFILTERL(pldat->net_base));
1196 	writel(hashhi, LPC_ENET_HASHFILTERH(pldat->net_base));
1197 
1198 	spin_unlock_irqrestore(&pldat->lock, flags);
1199 }
1200 
1201 static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1202 {
1203 	struct netdata_local *pldat = netdev_priv(ndev);
1204 	struct phy_device *phydev = pldat->phy_dev;
1205 
1206 	if (!netif_running(ndev))
1207 		return -EINVAL;
1208 
1209 	if (!phydev)
1210 		return -ENODEV;
1211 
1212 	return phy_mii_ioctl(phydev, req, cmd);
1213 }
1214 
1215 static int lpc_eth_open(struct net_device *ndev)
1216 {
1217 	struct netdata_local *pldat = netdev_priv(ndev);
1218 
1219 	if (netif_msg_ifup(pldat))
1220 		dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
1221 
1222 	__lpc_eth_clock_enable(pldat, true);
1223 
1224 	/* Reset and initialize */
1225 	__lpc_eth_reset(pldat);
1226 	__lpc_eth_init(pldat);
1227 
1228 	/* schedule a link state check */
1229 	phy_start(pldat->phy_dev);
1230 	netif_start_queue(ndev);
1231 	napi_enable(&pldat->napi);
1232 
1233 	return 0;
1234 }
1235 
1236 /*
1237  * Ethtool ops
1238  */
1239 static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev,
1240 	struct ethtool_drvinfo *info)
1241 {
1242 	strcpy(info->driver, MODNAME);
1243 	strcpy(info->version, DRV_VERSION);
1244 	strcpy(info->bus_info, dev_name(ndev->dev.parent));
1245 }
1246 
1247 static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev)
1248 {
1249 	struct netdata_local *pldat = netdev_priv(ndev);
1250 
1251 	return pldat->msg_enable;
1252 }
1253 
1254 static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level)
1255 {
1256 	struct netdata_local *pldat = netdev_priv(ndev);
1257 
1258 	pldat->msg_enable = level;
1259 }
1260 
1261 static int lpc_eth_ethtool_getsettings(struct net_device *ndev,
1262 	struct ethtool_cmd *cmd)
1263 {
1264 	struct netdata_local *pldat = netdev_priv(ndev);
1265 	struct phy_device *phydev = pldat->phy_dev;
1266 
1267 	if (!phydev)
1268 		return -EOPNOTSUPP;
1269 
1270 	return phy_ethtool_gset(phydev, cmd);
1271 }
1272 
1273 static int lpc_eth_ethtool_setsettings(struct net_device *ndev,
1274 	struct ethtool_cmd *cmd)
1275 {
1276 	struct netdata_local *pldat = netdev_priv(ndev);
1277 	struct phy_device *phydev = pldat->phy_dev;
1278 
1279 	if (!phydev)
1280 		return -EOPNOTSUPP;
1281 
1282 	return phy_ethtool_sset(phydev, cmd);
1283 }
1284 
1285 static const struct ethtool_ops lpc_eth_ethtool_ops = {
1286 	.get_drvinfo	= lpc_eth_ethtool_getdrvinfo,
1287 	.get_settings	= lpc_eth_ethtool_getsettings,
1288 	.set_settings	= lpc_eth_ethtool_setsettings,
1289 	.get_msglevel	= lpc_eth_ethtool_getmsglevel,
1290 	.set_msglevel	= lpc_eth_ethtool_setmsglevel,
1291 	.get_link	= ethtool_op_get_link,
1292 };
1293 
1294 static const struct net_device_ops lpc_netdev_ops = {
1295 	.ndo_open		= lpc_eth_open,
1296 	.ndo_stop		= lpc_eth_close,
1297 	.ndo_start_xmit		= lpc_eth_hard_start_xmit,
1298 	.ndo_set_rx_mode	= lpc_eth_set_multicast_list,
1299 	.ndo_do_ioctl		= lpc_eth_ioctl,
1300 	.ndo_set_mac_address	= lpc_set_mac_address,
1301 	.ndo_validate_addr	= eth_validate_addr,
1302 	.ndo_change_mtu		= eth_change_mtu,
1303 };
1304 
1305 static int lpc_eth_drv_probe(struct platform_device *pdev)
1306 {
1307 	struct resource *res;
1308 	struct net_device *ndev;
1309 	struct netdata_local *pldat;
1310 	struct phy_device *phydev;
1311 	dma_addr_t dma_handle;
1312 	int irq, ret;
1313 	u32 tmp;
1314 
1315 	/* Setup network interface for RMII or MII mode */
1316 	tmp = __raw_readl(LPC32XX_CLKPWR_MACCLK_CTRL);
1317 	tmp &= ~LPC32XX_CLKPWR_MACCTRL_PINS_MSK;
1318 	if (lpc_phy_interface_mode(&pdev->dev) == PHY_INTERFACE_MODE_MII)
1319 		tmp |= LPC32XX_CLKPWR_MACCTRL_USE_MII_PINS;
1320 	else
1321 		tmp |= LPC32XX_CLKPWR_MACCTRL_USE_RMII_PINS;
1322 	__raw_writel(tmp, LPC32XX_CLKPWR_MACCLK_CTRL);
1323 
1324 	/* Get platform resources */
1325 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1326 	irq = platform_get_irq(pdev, 0);
1327 	if ((!res) || (irq < 0) || (irq >= NR_IRQS)) {
1328 		dev_err(&pdev->dev, "error getting resources.\n");
1329 		ret = -ENXIO;
1330 		goto err_exit;
1331 	}
1332 
1333 	/* Allocate net driver data structure */
1334 	ndev = alloc_etherdev(sizeof(struct netdata_local));
1335 	if (!ndev) {
1336 		dev_err(&pdev->dev, "could not allocate device.\n");
1337 		ret = -ENOMEM;
1338 		goto err_exit;
1339 	}
1340 
1341 	SET_NETDEV_DEV(ndev, &pdev->dev);
1342 
1343 	pldat = netdev_priv(ndev);
1344 	pldat->pdev = pdev;
1345 	pldat->ndev = ndev;
1346 
1347 	spin_lock_init(&pldat->lock);
1348 
1349 	/* Save resources */
1350 	ndev->irq = irq;
1351 
1352 	/* Get clock for the device */
1353 	pldat->clk = clk_get(&pdev->dev, NULL);
1354 	if (IS_ERR(pldat->clk)) {
1355 		dev_err(&pdev->dev, "error getting clock.\n");
1356 		ret = PTR_ERR(pldat->clk);
1357 		goto err_out_free_dev;
1358 	}
1359 
1360 	/* Enable network clock */
1361 	__lpc_eth_clock_enable(pldat, true);
1362 
1363 	/* Map IO space */
1364 	pldat->net_base = ioremap(res->start, res->end - res->start + 1);
1365 	if (!pldat->net_base) {
1366 		dev_err(&pdev->dev, "failed to map registers\n");
1367 		ret = -ENOMEM;
1368 		goto err_out_disable_clocks;
1369 	}
1370 	ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0,
1371 			  ndev->name, ndev);
1372 	if (ret) {
1373 		dev_err(&pdev->dev, "error requesting interrupt.\n");
1374 		goto err_out_iounmap;
1375 	}
1376 
1377 	/* Fill in the fields of the device structure with ethernet values. */
1378 	ether_setup(ndev);
1379 
1380 	/* Setup driver functions */
1381 	ndev->netdev_ops = &lpc_netdev_ops;
1382 	ndev->ethtool_ops = &lpc_eth_ethtool_ops;
1383 	ndev->watchdog_timeo = msecs_to_jiffies(2500);
1384 
1385 	/* Get size of DMA buffers/descriptors region */
1386 	pldat->dma_buff_size = (ENET_TX_DESC + ENET_RX_DESC) * (ENET_MAXF_SIZE +
1387 		sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t));
1388 	pldat->dma_buff_base_v = 0;
1389 
1390 	if (use_iram_for_net(&pldat->pdev->dev)) {
1391 		dma_handle = LPC32XX_IRAM_BASE;
1392 		if (pldat->dma_buff_size <= lpc32xx_return_iram_size())
1393 			pldat->dma_buff_base_v =
1394 				io_p2v(LPC32XX_IRAM_BASE);
1395 		else
1396 			netdev_err(ndev,
1397 				"IRAM not big enough for net buffers, using SDRAM instead.\n");
1398 	}
1399 
1400 	if (pldat->dma_buff_base_v == 0) {
1401 		pldat->pdev->dev.coherent_dma_mask = 0xFFFFFFFF;
1402 		pldat->pdev->dev.dma_mask = &pldat->pdev->dev.coherent_dma_mask;
1403 		pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
1404 
1405 		/* Allocate a chunk of memory for the DMA ethernet buffers
1406 		   and descriptors */
1407 		pldat->dma_buff_base_v =
1408 			dma_alloc_coherent(&pldat->pdev->dev,
1409 					   pldat->dma_buff_size, &dma_handle,
1410 					   GFP_KERNEL);
1411 
1412 		if (pldat->dma_buff_base_v == NULL) {
1413 			dev_err(&pdev->dev, "error getting DMA region.\n");
1414 			ret = -ENOMEM;
1415 			goto err_out_free_irq;
1416 		}
1417 	}
1418 	pldat->dma_buff_base_p = dma_handle;
1419 
1420 	netdev_dbg(ndev, "IO address start     :0x%08x\n",
1421 			res->start);
1422 	netdev_dbg(ndev, "IO address size      :%d\n",
1423 			res->end - res->start + 1);
1424 	netdev_dbg(ndev, "IO address (mapped)  :0x%p\n",
1425 			pldat->net_base);
1426 	netdev_dbg(ndev, "IRQ number           :%d\n", ndev->irq);
1427 	netdev_dbg(ndev, "DMA buffer size      :%d\n", pldat->dma_buff_size);
1428 	netdev_dbg(ndev, "DMA buffer P address :0x%08x\n",
1429 			pldat->dma_buff_base_p);
1430 	netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
1431 			pldat->dma_buff_base_v);
1432 
1433 	/* Get MAC address from current HW setting (POR state is all zeros) */
1434 	__lpc_get_mac(pldat, ndev->dev_addr);
1435 
1436 #ifdef CONFIG_OF_NET
1437 	if (!is_valid_ether_addr(ndev->dev_addr)) {
1438 		const char *macaddr = of_get_mac_address(pdev->dev.of_node);
1439 		if (macaddr)
1440 			memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
1441 	}
1442 #endif
1443 	if (!is_valid_ether_addr(ndev->dev_addr))
1444 		eth_hw_addr_random(ndev);
1445 
1446 	/* Reset the ethernet controller */
1447 	__lpc_eth_reset(pldat);
1448 
1449 	/* then shut everything down to save power */
1450 	__lpc_eth_shutdown(pldat);
1451 
1452 	/* Set default parameters */
1453 	pldat->msg_enable = NETIF_MSG_LINK;
1454 
1455 	/* Force an MII interface reset and clock setup */
1456 	__lpc_mii_mngt_reset(pldat);
1457 
1458 	/* Force default PHY interface setup in chip, this will probably be
1459 	   changed by the PHY driver */
1460 	pldat->link = 0;
1461 	pldat->speed = 100;
1462 	pldat->duplex = DUPLEX_FULL;
1463 	__lpc_params_setup(pldat);
1464 
1465 	netif_napi_add(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT);
1466 
1467 	ret = register_netdev(ndev);
1468 	if (ret) {
1469 		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1470 		goto err_out_dma_unmap;
1471 	}
1472 	platform_set_drvdata(pdev, ndev);
1473 
1474 	if (lpc_mii_init(pldat) != 0)
1475 		goto err_out_unregister_netdev;
1476 
1477 	netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
1478 	       res->start, ndev->irq);
1479 
1480 	phydev = pldat->phy_dev;
1481 
1482 	device_init_wakeup(&pdev->dev, 1);
1483 	device_set_wakeup_enable(&pdev->dev, 0);
1484 
1485 	return 0;
1486 
1487 err_out_unregister_netdev:
1488 	platform_set_drvdata(pdev, NULL);
1489 	unregister_netdev(ndev);
1490 err_out_dma_unmap:
1491 	if (!use_iram_for_net(&pldat->pdev->dev) ||
1492 	    pldat->dma_buff_size > lpc32xx_return_iram_size())
1493 		dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
1494 				  pldat->dma_buff_base_v,
1495 				  pldat->dma_buff_base_p);
1496 err_out_free_irq:
1497 	free_irq(ndev->irq, ndev);
1498 err_out_iounmap:
1499 	iounmap(pldat->net_base);
1500 err_out_disable_clocks:
1501 	clk_disable(pldat->clk);
1502 	clk_put(pldat->clk);
1503 err_out_free_dev:
1504 	free_netdev(ndev);
1505 err_exit:
1506 	pr_err("%s: not found (%d).\n", MODNAME, ret);
1507 	return ret;
1508 }
1509 
1510 static int lpc_eth_drv_remove(struct platform_device *pdev)
1511 {
1512 	struct net_device *ndev = platform_get_drvdata(pdev);
1513 	struct netdata_local *pldat = netdev_priv(ndev);
1514 
1515 	unregister_netdev(ndev);
1516 	platform_set_drvdata(pdev, NULL);
1517 
1518 	if (!use_iram_for_net(&pldat->pdev->dev) ||
1519 	    pldat->dma_buff_size > lpc32xx_return_iram_size())
1520 		dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
1521 				  pldat->dma_buff_base_v,
1522 				  pldat->dma_buff_base_p);
1523 	free_irq(ndev->irq, ndev);
1524 	iounmap(pldat->net_base);
1525 	mdiobus_unregister(pldat->mii_bus);
1526 	mdiobus_free(pldat->mii_bus);
1527 	clk_disable(pldat->clk);
1528 	clk_put(pldat->clk);
1529 	free_netdev(ndev);
1530 
1531 	return 0;
1532 }
1533 
1534 #ifdef CONFIG_PM
1535 static int lpc_eth_drv_suspend(struct platform_device *pdev,
1536 	pm_message_t state)
1537 {
1538 	struct net_device *ndev = platform_get_drvdata(pdev);
1539 	struct netdata_local *pldat = netdev_priv(ndev);
1540 
1541 	if (device_may_wakeup(&pdev->dev))
1542 		enable_irq_wake(ndev->irq);
1543 
1544 	if (ndev) {
1545 		if (netif_running(ndev)) {
1546 			netif_device_detach(ndev);
1547 			__lpc_eth_shutdown(pldat);
1548 			clk_disable(pldat->clk);
1549 
1550 			/*
1551 			 * Reset again now clock is disable to be sure
1552 			 * EMC_MDC is down
1553 			 */
1554 			__lpc_eth_reset(pldat);
1555 		}
1556 	}
1557 
1558 	return 0;
1559 }
1560 
1561 static int lpc_eth_drv_resume(struct platform_device *pdev)
1562 {
1563 	struct net_device *ndev = platform_get_drvdata(pdev);
1564 	struct netdata_local *pldat;
1565 
1566 	if (device_may_wakeup(&pdev->dev))
1567 		disable_irq_wake(ndev->irq);
1568 
1569 	if (ndev) {
1570 		if (netif_running(ndev)) {
1571 			pldat = netdev_priv(ndev);
1572 
1573 			/* Enable interface clock */
1574 			clk_enable(pldat->clk);
1575 
1576 			/* Reset and initialize */
1577 			__lpc_eth_reset(pldat);
1578 			__lpc_eth_init(pldat);
1579 
1580 			netif_device_attach(ndev);
1581 		}
1582 	}
1583 
1584 	return 0;
1585 }
1586 #endif
1587 
1588 #ifdef CONFIG_OF
1589 static const struct of_device_id lpc_eth_match[] = {
1590 	{ .compatible = "nxp,lpc-eth" },
1591 	{ }
1592 };
1593 MODULE_DEVICE_TABLE(of, lpc_eth_match);
1594 #endif
1595 
1596 static struct platform_driver lpc_eth_driver = {
1597 	.probe		= lpc_eth_drv_probe,
1598 	.remove		= lpc_eth_drv_remove,
1599 #ifdef CONFIG_PM
1600 	.suspend	= lpc_eth_drv_suspend,
1601 	.resume		= lpc_eth_drv_resume,
1602 #endif
1603 	.driver		= {
1604 		.name	= MODNAME,
1605 		.of_match_table = of_match_ptr(lpc_eth_match),
1606 	},
1607 };
1608 
1609 module_platform_driver(lpc_eth_driver);
1610 
1611 MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
1612 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
1613 MODULE_DESCRIPTION("LPC Ethernet Driver");
1614 MODULE_LICENSE("GPL");
1615