1 /*
2  * Freescale Ethernet controllers
3  *
4  * Copyright (c) 2005 Intracom S.A.
5  *  by Pantelis Antoniou <panto@intracom.gr>
6  *
7  * 2005 (c) MontaVista Software, Inc.
8  * Vitaly Bordug <vbordug@ru.mvista.com>
9  *
10  * This file is licensed under the terms of the GNU General Public License
11  * version 2. This program is licensed "as is" without any warranty of any
12  * kind, whether express or implied.
13  */
14 
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/string.h>
19 #include <linux/ptrace.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/spinlock.h>
28 #include <linux/mii.h>
29 #include <linux/ethtool.h>
30 #include <linux/bitops.h>
31 #include <linux/fs.h>
32 #include <linux/platform_device.h>
33 #include <linux/of_address.h>
34 #include <linux/of_device.h>
35 #include <linux/of_irq.h>
36 #include <linux/gfp.h>
37 
38 #include <asm/irq.h>
39 #include <asm/uaccess.h>
40 
41 #ifdef CONFIG_8xx
42 #include <asm/8xx_immap.h>
43 #include <asm/pgtable.h>
44 #include <asm/cpm1.h>
45 #endif
46 
47 #include "fs_enet.h"
48 #include "fec.h"
49 
50 /*************************************************/
51 
52 #if defined(CONFIG_CPM1)
53 /* for a CPM1 __raw_xxx's are sufficient */
54 #define __fs_out32(addr, x)	__raw_writel(x, addr)
55 #define __fs_out16(addr, x)	__raw_writew(x, addr)
56 #define __fs_in32(addr)	__raw_readl(addr)
57 #define __fs_in16(addr)	__raw_readw(addr)
58 #else
59 /* for others play it safe */
60 #define __fs_out32(addr, x)	out_be32(addr, x)
61 #define __fs_out16(addr, x)	out_be16(addr, x)
62 #define __fs_in32(addr)	in_be32(addr)
63 #define __fs_in16(addr)	in_be16(addr)
64 #endif
65 
66 /* write */
67 #define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v))
68 
69 /* read */
70 #define FR(_fecp, _reg)	__fs_in32(&(_fecp)->fec_ ## _reg)
71 
72 /* set bits */
73 #define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v))
74 
75 /* clear bits */
76 #define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
77 
78 /*
79  * Delay to wait for FEC reset command to complete (in us)
80  */
81 #define FEC_RESET_DELAY		50
82 
83 static int whack_reset(struct fec __iomem *fecp)
84 {
85 	int i;
86 
87 	FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET);
88 	for (i = 0; i < FEC_RESET_DELAY; i++) {
89 		if ((FR(fecp, ecntrl) & FEC_ECNTRL_RESET) == 0)
90 			return 0;	/* OK */
91 		udelay(1);
92 	}
93 
94 	return -1;
95 }
96 
97 static int do_pd_setup(struct fs_enet_private *fep)
98 {
99 	struct platform_device *ofdev = to_platform_device(fep->dev);
100 
101 	fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
102 	if (fep->interrupt == NO_IRQ)
103 		return -EINVAL;
104 
105 	fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
106 	if (!fep->fcc.fccp)
107 		return -EINVAL;
108 
109 	return 0;
110 }
111 
112 #define FEC_NAPI_RX_EVENT_MSK	(FEC_ENET_RXF | FEC_ENET_RXB)
113 #define FEC_NAPI_TX_EVENT_MSK	(FEC_ENET_TXF | FEC_ENET_TXB)
114 #define FEC_RX_EVENT		(FEC_ENET_RXF)
115 #define FEC_TX_EVENT		(FEC_ENET_TXF)
116 #define FEC_ERR_EVENT_MSK	(FEC_ENET_HBERR | FEC_ENET_BABR | \
117 				 FEC_ENET_BABT | FEC_ENET_EBERR)
118 
119 static int setup_data(struct net_device *dev)
120 {
121 	struct fs_enet_private *fep = netdev_priv(dev);
122 
123 	if (do_pd_setup(fep) != 0)
124 		return -EINVAL;
125 
126 	fep->fec.hthi = 0;
127 	fep->fec.htlo = 0;
128 
129 	fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK;
130 	fep->ev_napi_tx = FEC_NAPI_TX_EVENT_MSK;
131 	fep->ev_rx = FEC_RX_EVENT;
132 	fep->ev_tx = FEC_TX_EVENT;
133 	fep->ev_err = FEC_ERR_EVENT_MSK;
134 
135 	return 0;
136 }
137 
138 static int allocate_bd(struct net_device *dev)
139 {
140 	struct fs_enet_private *fep = netdev_priv(dev);
141 	const struct fs_platform_info *fpi = fep->fpi;
142 
143 	fep->ring_base = (void __force __iomem *)dma_alloc_coherent(fep->dev,
144 					    (fpi->tx_ring + fpi->rx_ring) *
145 					    sizeof(cbd_t), &fep->ring_mem_addr,
146 					    GFP_KERNEL);
147 	if (fep->ring_base == NULL)
148 		return -ENOMEM;
149 
150 	return 0;
151 }
152 
153 static void free_bd(struct net_device *dev)
154 {
155 	struct fs_enet_private *fep = netdev_priv(dev);
156 	const struct fs_platform_info *fpi = fep->fpi;
157 
158 	if(fep->ring_base)
159 		dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring)
160 					* sizeof(cbd_t),
161 					(void __force *)fep->ring_base,
162 					fep->ring_mem_addr);
163 }
164 
165 static void cleanup_data(struct net_device *dev)
166 {
167 	/* nothing */
168 }
169 
170 static void set_promiscuous_mode(struct net_device *dev)
171 {
172 	struct fs_enet_private *fep = netdev_priv(dev);
173 	struct fec __iomem *fecp = fep->fec.fecp;
174 
175 	FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
176 }
177 
178 static void set_multicast_start(struct net_device *dev)
179 {
180 	struct fs_enet_private *fep = netdev_priv(dev);
181 
182 	fep->fec.hthi = 0;
183 	fep->fec.htlo = 0;
184 }
185 
186 static void set_multicast_one(struct net_device *dev, const u8 *mac)
187 {
188 	struct fs_enet_private *fep = netdev_priv(dev);
189 	int temp, hash_index, i, j;
190 	u32 crc, csrVal;
191 	u8 byte, msb;
192 
193 	crc = 0xffffffff;
194 	for (i = 0; i < 6; i++) {
195 		byte = mac[i];
196 		for (j = 0; j < 8; j++) {
197 			msb = crc >> 31;
198 			crc <<= 1;
199 			if (msb ^ (byte & 0x1))
200 				crc ^= FEC_CRC_POLY;
201 			byte >>= 1;
202 		}
203 	}
204 
205 	temp = (crc & 0x3f) >> 1;
206 	hash_index = ((temp & 0x01) << 4) |
207 		     ((temp & 0x02) << 2) |
208 		     ((temp & 0x04)) |
209 		     ((temp & 0x08) >> 2) |
210 		     ((temp & 0x10) >> 4);
211 	csrVal = 1 << hash_index;
212 	if (crc & 1)
213 		fep->fec.hthi |= csrVal;
214 	else
215 		fep->fec.htlo |= csrVal;
216 }
217 
218 static void set_multicast_finish(struct net_device *dev)
219 {
220 	struct fs_enet_private *fep = netdev_priv(dev);
221 	struct fec __iomem *fecp = fep->fec.fecp;
222 
223 	/* if all multi or too many multicasts; just enable all */
224 	if ((dev->flags & IFF_ALLMULTI) != 0 ||
225 	    netdev_mc_count(dev) > FEC_MAX_MULTICAST_ADDRS) {
226 		fep->fec.hthi = 0xffffffffU;
227 		fep->fec.htlo = 0xffffffffU;
228 	}
229 
230 	FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
231 	FW(fecp, grp_hash_table_high, fep->fec.hthi);
232 	FW(fecp, grp_hash_table_low, fep->fec.htlo);
233 }
234 
235 static void set_multicast_list(struct net_device *dev)
236 {
237 	struct netdev_hw_addr *ha;
238 
239 	if ((dev->flags & IFF_PROMISC) == 0) {
240 		set_multicast_start(dev);
241 		netdev_for_each_mc_addr(ha, dev)
242 			set_multicast_one(dev, ha->addr);
243 		set_multicast_finish(dev);
244 	} else
245 		set_promiscuous_mode(dev);
246 }
247 
248 static void restart(struct net_device *dev)
249 {
250 	struct fs_enet_private *fep = netdev_priv(dev);
251 	struct fec __iomem *fecp = fep->fec.fecp;
252 	const struct fs_platform_info *fpi = fep->fpi;
253 	dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
254 	int r;
255 	u32 addrhi, addrlo;
256 
257 	struct mii_bus* mii = fep->phydev->bus;
258 	struct fec_info* fec_inf = mii->priv;
259 
260 	r = whack_reset(fep->fec.fecp);
261 	if (r != 0)
262 		dev_err(fep->dev, "FEC Reset FAILED!\n");
263 	/*
264 	 * Set station address.
265 	 */
266 	addrhi = ((u32) dev->dev_addr[0] << 24) |
267 		 ((u32) dev->dev_addr[1] << 16) |
268 		 ((u32) dev->dev_addr[2] <<  8) |
269 		  (u32) dev->dev_addr[3];
270 	addrlo = ((u32) dev->dev_addr[4] << 24) |
271 		 ((u32) dev->dev_addr[5] << 16);
272 	FW(fecp, addr_low, addrhi);
273 	FW(fecp, addr_high, addrlo);
274 
275 	/*
276 	 * Reset all multicast.
277 	 */
278 	FW(fecp, grp_hash_table_high, fep->fec.hthi);
279 	FW(fecp, grp_hash_table_low, fep->fec.htlo);
280 
281 	/*
282 	 * Set maximum receive buffer size.
283 	 */
284 	FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
285 #ifdef CONFIG_FS_ENET_MPC5121_FEC
286 	FW(fecp, r_cntrl, PKT_MAXBUF_SIZE << 16);
287 #else
288 	FW(fecp, r_hash, PKT_MAXBUF_SIZE);
289 #endif
290 
291 	/* get physical address */
292 	rx_bd_base_phys = fep->ring_mem_addr;
293 	tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
294 
295 	/*
296 	 * Set receive and transmit descriptor base.
297 	 */
298 	FW(fecp, r_des_start, rx_bd_base_phys);
299 	FW(fecp, x_des_start, tx_bd_base_phys);
300 
301 	fs_init_bds(dev);
302 
303 	/*
304 	 * Enable big endian and don't care about SDMA FC.
305 	 */
306 #ifdef CONFIG_FS_ENET_MPC5121_FEC
307 	FS(fecp, dma_control, 0xC0000000);
308 #else
309 	FW(fecp, fun_code, 0x78000000);
310 #endif
311 
312 	/*
313 	 * Set MII speed.
314 	 */
315 	FW(fecp, mii_speed, fec_inf->mii_speed);
316 
317 	/*
318 	 * Clear any outstanding interrupt.
319 	 */
320 	FW(fecp, ievent, 0xffc0);
321 #ifndef CONFIG_FS_ENET_MPC5121_FEC
322 	FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29);
323 
324 	FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE);	/* MII enable */
325 #else
326 	/*
327 	 * Only set MII/RMII mode - do not touch maximum frame length
328 	 * configured before.
329 	 */
330 	FS(fecp, r_cntrl, fpi->use_rmii ?
331 			FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE);
332 #endif
333 	/*
334 	 * adjust to duplex mode
335 	 */
336 	if (fep->phydev->duplex) {
337 		FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
338 		FS(fecp, x_cntrl, FEC_TCNTRL_FDEN);	/* FD enable */
339 	} else {
340 		FS(fecp, r_cntrl, FEC_RCNTRL_DRT);
341 		FC(fecp, x_cntrl, FEC_TCNTRL_FDEN);	/* FD disable */
342 	}
343 
344 	/* Restore multicast and promiscuous settings */
345 	set_multicast_list(dev);
346 
347 	/*
348 	 * Enable interrupts we wish to service.
349 	 */
350 	FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB |
351 	   FEC_ENET_RXF | FEC_ENET_RXB);
352 
353 	/*
354 	 * And last, enable the transmit and receive processing.
355 	 */
356 	FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
357 	FW(fecp, r_des_active, 0x01000000);
358 }
359 
360 static void stop(struct net_device *dev)
361 {
362 	struct fs_enet_private *fep = netdev_priv(dev);
363 	const struct fs_platform_info *fpi = fep->fpi;
364 	struct fec __iomem *fecp = fep->fec.fecp;
365 
366 	struct fec_info* feci= fep->phydev->bus->priv;
367 
368 	int i;
369 
370 	if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
371 		return;		/* already down */
372 
373 	FW(fecp, x_cntrl, 0x01);	/* Graceful transmit stop */
374 	for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) &&
375 	     i < FEC_RESET_DELAY; i++)
376 		udelay(1);
377 
378 	if (i == FEC_RESET_DELAY)
379 		dev_warn(fep->dev, "FEC timeout on graceful transmit stop\n");
380 	/*
381 	 * Disable FEC. Let only MII interrupts.
382 	 */
383 	FW(fecp, imask, 0);
384 	FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN);
385 
386 	fs_cleanup_bds(dev);
387 
388 	/* shut down FEC1? that's where the mii bus is */
389 	if (fpi->has_phy) {
390 		FS(fecp, r_cntrl, fpi->use_rmii ?
391 				FEC_RCNTRL_RMII_MODE :
392 				FEC_RCNTRL_MII_MODE);	/* MII/RMII enable */
393 		FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
394 		FW(fecp, ievent, FEC_ENET_MII);
395 		FW(fecp, mii_speed, feci->mii_speed);
396 	}
397 }
398 
399 static void napi_clear_rx_event(struct net_device *dev)
400 {
401 	struct fs_enet_private *fep = netdev_priv(dev);
402 	struct fec __iomem *fecp = fep->fec.fecp;
403 
404 	FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
405 }
406 
407 static void napi_enable_rx(struct net_device *dev)
408 {
409 	struct fs_enet_private *fep = netdev_priv(dev);
410 	struct fec __iomem *fecp = fep->fec.fecp;
411 
412 	FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
413 }
414 
415 static void napi_disable_rx(struct net_device *dev)
416 {
417 	struct fs_enet_private *fep = netdev_priv(dev);
418 	struct fec __iomem *fecp = fep->fec.fecp;
419 
420 	FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
421 }
422 
423 static void napi_clear_tx_event(struct net_device *dev)
424 {
425 	struct fs_enet_private *fep = netdev_priv(dev);
426 	struct fec __iomem *fecp = fep->fec.fecp;
427 
428 	FW(fecp, ievent, FEC_NAPI_TX_EVENT_MSK);
429 }
430 
431 static void napi_enable_tx(struct net_device *dev)
432 {
433 	struct fs_enet_private *fep = netdev_priv(dev);
434 	struct fec __iomem *fecp = fep->fec.fecp;
435 
436 	FS(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
437 }
438 
439 static void napi_disable_tx(struct net_device *dev)
440 {
441 	struct fs_enet_private *fep = netdev_priv(dev);
442 	struct fec __iomem *fecp = fep->fec.fecp;
443 
444 	FC(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
445 }
446 
447 static void rx_bd_done(struct net_device *dev)
448 {
449 	struct fs_enet_private *fep = netdev_priv(dev);
450 	struct fec __iomem *fecp = fep->fec.fecp;
451 
452 	FW(fecp, r_des_active, 0x01000000);
453 }
454 
455 static void tx_kickstart(struct net_device *dev)
456 {
457 	struct fs_enet_private *fep = netdev_priv(dev);
458 	struct fec __iomem *fecp = fep->fec.fecp;
459 
460 	FW(fecp, x_des_active, 0x01000000);
461 }
462 
463 static u32 get_int_events(struct net_device *dev)
464 {
465 	struct fs_enet_private *fep = netdev_priv(dev);
466 	struct fec __iomem *fecp = fep->fec.fecp;
467 
468 	return FR(fecp, ievent) & FR(fecp, imask);
469 }
470 
471 static void clear_int_events(struct net_device *dev, u32 int_events)
472 {
473 	struct fs_enet_private *fep = netdev_priv(dev);
474 	struct fec __iomem *fecp = fep->fec.fecp;
475 
476 	FW(fecp, ievent, int_events);
477 }
478 
479 static void ev_error(struct net_device *dev, u32 int_events)
480 {
481 	struct fs_enet_private *fep = netdev_priv(dev);
482 
483 	dev_warn(fep->dev, "FEC ERROR(s) 0x%x\n", int_events);
484 }
485 
486 static int get_regs(struct net_device *dev, void *p, int *sizep)
487 {
488 	struct fs_enet_private *fep = netdev_priv(dev);
489 
490 	if (*sizep < sizeof(struct fec))
491 		return -EINVAL;
492 
493 	memcpy_fromio(p, fep->fec.fecp, sizeof(struct fec));
494 
495 	return 0;
496 }
497 
498 static int get_regs_len(struct net_device *dev)
499 {
500 	return sizeof(struct fec);
501 }
502 
503 static void tx_restart(struct net_device *dev)
504 {
505 	/* nothing */
506 }
507 
508 /*************************************************************************/
509 
510 const struct fs_ops fs_fec_ops = {
511 	.setup_data		= setup_data,
512 	.cleanup_data		= cleanup_data,
513 	.set_multicast_list	= set_multicast_list,
514 	.restart		= restart,
515 	.stop			= stop,
516 	.napi_clear_rx_event	= napi_clear_rx_event,
517 	.napi_enable_rx		= napi_enable_rx,
518 	.napi_disable_rx	= napi_disable_rx,
519 	.napi_clear_tx_event	= napi_clear_tx_event,
520 	.napi_enable_tx		= napi_enable_tx,
521 	.napi_disable_tx	= napi_disable_tx,
522 	.rx_bd_done		= rx_bd_done,
523 	.tx_kickstart		= tx_kickstart,
524 	.get_int_events		= get_int_events,
525 	.clear_int_events	= clear_int_events,
526 	.ev_error		= ev_error,
527 	.get_regs		= get_regs,
528 	.get_regs_len		= get_regs_len,
529 	.tx_restart		= tx_restart,
530 	.allocate_bd		= allocate_bd,
531 	.free_bd		= free_bd,
532 };
533 
534