1 /*
2  * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
3  *
4  * Copyright (c) 2003 Intracom S.A.
5  *  by Pantelis Antoniou <panto@intracom.gr>
6  *
7  * 2005 (c) MontaVista Software, Inc.
8  * Vitaly Bordug <vbordug@ru.mvista.com>
9  *
10  * This file is licensed under the terms of the GNU General Public License
11  * version 2. This program is licensed "as is" without any warranty of any
12  * kind, whether express or implied.
13  */
14 
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/string.h>
19 #include <linux/ptrace.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/spinlock.h>
28 #include <linux/mii.h>
29 #include <linux/ethtool.h>
30 #include <linux/bitops.h>
31 #include <linux/fs.h>
32 #include <linux/platform_device.h>
33 #include <linux/of_address.h>
34 #include <linux/of_irq.h>
35 #include <linux/of_platform.h>
36 
37 #include <asm/irq.h>
38 #include <asm/uaccess.h>
39 
40 #ifdef CONFIG_8xx
41 #include <asm/8xx_immap.h>
42 #include <asm/pgtable.h>
43 #include <asm/cpm1.h>
44 #endif
45 
46 #include "fs_enet.h"
47 
48 /*************************************************/
49 #if defined(CONFIG_CPM1)
50 /* for a 8xx __raw_xxx's are sufficient */
51 #define __fs_out32(addr, x)	__raw_writel(x, addr)
52 #define __fs_out16(addr, x)	__raw_writew(x, addr)
53 #define __fs_out8(addr, x)	__raw_writeb(x, addr)
54 #define __fs_in32(addr)	__raw_readl(addr)
55 #define __fs_in16(addr)	__raw_readw(addr)
56 #define __fs_in8(addr)	__raw_readb(addr)
57 #else
58 /* for others play it safe */
59 #define __fs_out32(addr, x)	out_be32(addr, x)
60 #define __fs_out16(addr, x)	out_be16(addr, x)
61 #define __fs_in32(addr)	in_be32(addr)
62 #define __fs_in16(addr)	in_be16(addr)
63 #define __fs_out8(addr, x)	out_8(addr, x)
64 #define __fs_in8(addr)	in_8(addr)
65 #endif
66 
67 /* write, read, set bits, clear bits */
68 #define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
69 #define R32(_p, _m)     __fs_in32(&(_p)->_m)
70 #define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
71 #define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
72 
73 #define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
74 #define R16(_p, _m)     __fs_in16(&(_p)->_m)
75 #define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
76 #define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
77 
78 #define W8(_p, _m, _v)  __fs_out8(&(_p)->_m, (_v))
79 #define R8(_p, _m)      __fs_in8(&(_p)->_m)
80 #define S8(_p, _m, _v)  W8(_p, _m, R8(_p, _m) | (_v))
81 #define C8(_p, _m, _v)  W8(_p, _m, R8(_p, _m) & ~(_v))
82 
83 #define SCC_MAX_MULTICAST_ADDRS	64
84 
85 /*
86  * Delay to wait for SCC reset command to complete (in us)
87  */
88 #define SCC_RESET_DELAY		50
89 
90 static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
91 {
92 	const struct fs_platform_info *fpi = fep->fpi;
93 
94 	return cpm_command(fpi->cp_command, op);
95 }
96 
97 static int do_pd_setup(struct fs_enet_private *fep)
98 {
99 	struct platform_device *ofdev = to_platform_device(fep->dev);
100 
101 	fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
102 	if (fep->interrupt == NO_IRQ)
103 		return -EINVAL;
104 
105 	fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
106 	if (!fep->scc.sccp)
107 		return -EINVAL;
108 
109 	fep->scc.ep = of_iomap(ofdev->dev.of_node, 1);
110 	if (!fep->scc.ep) {
111 		iounmap(fep->scc.sccp);
112 		return -EINVAL;
113 	}
114 
115 	return 0;
116 }
117 
118 #define SCC_NAPI_RX_EVENT_MSK	(SCCE_ENET_RXF | SCCE_ENET_RXB)
119 #define SCC_NAPI_TX_EVENT_MSK	(SCCE_ENET_TXB)
120 #define SCC_RX_EVENT		(SCCE_ENET_RXF)
121 #define SCC_TX_EVENT		(SCCE_ENET_TXB)
122 #define SCC_ERR_EVENT_MSK	(SCCE_ENET_TXE | SCCE_ENET_BSY)
123 
124 static int setup_data(struct net_device *dev)
125 {
126 	struct fs_enet_private *fep = netdev_priv(dev);
127 
128 	do_pd_setup(fep);
129 
130 	fep->scc.hthi = 0;
131 	fep->scc.htlo = 0;
132 
133 	fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK;
134 	fep->ev_napi_tx = SCC_NAPI_TX_EVENT_MSK;
135 	fep->ev_rx = SCC_RX_EVENT;
136 	fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE;
137 	fep->ev_err = SCC_ERR_EVENT_MSK;
138 
139 	return 0;
140 }
141 
142 static int allocate_bd(struct net_device *dev)
143 {
144 	struct fs_enet_private *fep = netdev_priv(dev);
145 	const struct fs_platform_info *fpi = fep->fpi;
146 
147 	fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) *
148 					 sizeof(cbd_t), 8);
149 	if (IS_ERR_VALUE(fep->ring_mem_addr))
150 		return -ENOMEM;
151 
152 	fep->ring_base = (void __iomem __force*)
153 		cpm_dpram_addr(fep->ring_mem_addr);
154 
155 	return 0;
156 }
157 
158 static void free_bd(struct net_device *dev)
159 {
160 	struct fs_enet_private *fep = netdev_priv(dev);
161 
162 	if (fep->ring_base)
163 		cpm_dpfree(fep->ring_mem_addr);
164 }
165 
166 static void cleanup_data(struct net_device *dev)
167 {
168 	/* nothing */
169 }
170 
171 static void set_promiscuous_mode(struct net_device *dev)
172 {
173 	struct fs_enet_private *fep = netdev_priv(dev);
174 	scc_t __iomem *sccp = fep->scc.sccp;
175 
176 	S16(sccp, scc_psmr, SCC_PSMR_PRO);
177 }
178 
179 static void set_multicast_start(struct net_device *dev)
180 {
181 	struct fs_enet_private *fep = netdev_priv(dev);
182 	scc_enet_t __iomem *ep = fep->scc.ep;
183 
184 	W16(ep, sen_gaddr1, 0);
185 	W16(ep, sen_gaddr2, 0);
186 	W16(ep, sen_gaddr3, 0);
187 	W16(ep, sen_gaddr4, 0);
188 }
189 
190 static void set_multicast_one(struct net_device *dev, const u8 * mac)
191 {
192 	struct fs_enet_private *fep = netdev_priv(dev);
193 	scc_enet_t __iomem *ep = fep->scc.ep;
194 	u16 taddrh, taddrm, taddrl;
195 
196 	taddrh = ((u16) mac[5] << 8) | mac[4];
197 	taddrm = ((u16) mac[3] << 8) | mac[2];
198 	taddrl = ((u16) mac[1] << 8) | mac[0];
199 
200 	W16(ep, sen_taddrh, taddrh);
201 	W16(ep, sen_taddrm, taddrm);
202 	W16(ep, sen_taddrl, taddrl);
203 	scc_cr_cmd(fep, CPM_CR_SET_GADDR);
204 }
205 
206 static void set_multicast_finish(struct net_device *dev)
207 {
208 	struct fs_enet_private *fep = netdev_priv(dev);
209 	scc_t __iomem *sccp = fep->scc.sccp;
210 	scc_enet_t __iomem *ep = fep->scc.ep;
211 
212 	/* clear promiscuous always */
213 	C16(sccp, scc_psmr, SCC_PSMR_PRO);
214 
215 	/* if all multi or too many multicasts; just enable all */
216 	if ((dev->flags & IFF_ALLMULTI) != 0 ||
217 	    netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) {
218 
219 		W16(ep, sen_gaddr1, 0xffff);
220 		W16(ep, sen_gaddr2, 0xffff);
221 		W16(ep, sen_gaddr3, 0xffff);
222 		W16(ep, sen_gaddr4, 0xffff);
223 	}
224 }
225 
226 static void set_multicast_list(struct net_device *dev)
227 {
228 	struct netdev_hw_addr *ha;
229 
230 	if ((dev->flags & IFF_PROMISC) == 0) {
231 		set_multicast_start(dev);
232 		netdev_for_each_mc_addr(ha, dev)
233 			set_multicast_one(dev, ha->addr);
234 		set_multicast_finish(dev);
235 	} else
236 		set_promiscuous_mode(dev);
237 }
238 
239 /*
240  * This function is called to start or restart the FEC during a link
241  * change.  This only happens when switching between half and full
242  * duplex.
243  */
244 static void restart(struct net_device *dev)
245 {
246 	struct fs_enet_private *fep = netdev_priv(dev);
247 	scc_t __iomem *sccp = fep->scc.sccp;
248 	scc_enet_t __iomem *ep = fep->scc.ep;
249 	const struct fs_platform_info *fpi = fep->fpi;
250 	u16 paddrh, paddrm, paddrl;
251 	const unsigned char *mac;
252 	int i;
253 
254 	C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
255 
256 	/* clear everything (slow & steady does it) */
257 	for (i = 0; i < sizeof(*ep); i++)
258 		__fs_out8((u8 __iomem *)ep + i, 0);
259 
260 	/* point to bds */
261 	W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
262 	W16(ep, sen_genscc.scc_tbase,
263 	    fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
264 
265 	/* Initialize function code registers for big-endian.
266 	 */
267 #ifndef CONFIG_NOT_COHERENT_CACHE
268 	W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL);
269 	W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL);
270 #else
271 	W8(ep, sen_genscc.scc_rfcr, SCC_EB);
272 	W8(ep, sen_genscc.scc_tfcr, SCC_EB);
273 #endif
274 
275 	/* Set maximum bytes per receive buffer.
276 	 * This appears to be an Ethernet frame size, not the buffer
277 	 * fragment size.  It must be a multiple of four.
278 	 */
279 	W16(ep, sen_genscc.scc_mrblr, 0x5f0);
280 
281 	/* Set CRC preset and mask.
282 	 */
283 	W32(ep, sen_cpres, 0xffffffff);
284 	W32(ep, sen_cmask, 0xdebb20e3);
285 
286 	W32(ep, sen_crcec, 0);	/* CRC Error counter */
287 	W32(ep, sen_alec, 0);	/* alignment error counter */
288 	W32(ep, sen_disfc, 0);	/* discard frame counter */
289 
290 	W16(ep, sen_pads, 0x8888);	/* Tx short frame pad character */
291 	W16(ep, sen_retlim, 15);	/* Retry limit threshold */
292 
293 	W16(ep, sen_maxflr, 0x5ee);	/* maximum frame length register */
294 
295 	W16(ep, sen_minflr, PKT_MINBUF_SIZE);	/* minimum frame length register */
296 
297 	W16(ep, sen_maxd1, 0x000005f0);	/* maximum DMA1 length */
298 	W16(ep, sen_maxd2, 0x000005f0);	/* maximum DMA2 length */
299 
300 	/* Clear hash tables.
301 	 */
302 	W16(ep, sen_gaddr1, 0);
303 	W16(ep, sen_gaddr2, 0);
304 	W16(ep, sen_gaddr3, 0);
305 	W16(ep, sen_gaddr4, 0);
306 	W16(ep, sen_iaddr1, 0);
307 	W16(ep, sen_iaddr2, 0);
308 	W16(ep, sen_iaddr3, 0);
309 	W16(ep, sen_iaddr4, 0);
310 
311 	/* set address
312 	 */
313 	mac = dev->dev_addr;
314 	paddrh = ((u16) mac[5] << 8) | mac[4];
315 	paddrm = ((u16) mac[3] << 8) | mac[2];
316 	paddrl = ((u16) mac[1] << 8) | mac[0];
317 
318 	W16(ep, sen_paddrh, paddrh);
319 	W16(ep, sen_paddrm, paddrm);
320 	W16(ep, sen_paddrl, paddrl);
321 
322 	W16(ep, sen_pper, 0);
323 	W16(ep, sen_taddrl, 0);
324 	W16(ep, sen_taddrm, 0);
325 	W16(ep, sen_taddrh, 0);
326 
327 	fs_init_bds(dev);
328 
329 	scc_cr_cmd(fep, CPM_CR_INIT_TRX);
330 
331 	W16(sccp, scc_scce, 0xffff);
332 
333 	/* Enable interrupts we wish to service.
334 	 */
335 	W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
336 
337 	/* Set GSMR_H to enable all normal operating modes.
338 	 * Set GSMR_L to enable Ethernet to MC68160.
339 	 */
340 	W32(sccp, scc_gsmrh, 0);
341 	W32(sccp, scc_gsmrl,
342 	    SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 |
343 	    SCC_GSMRL_MODE_ENET);
344 
345 	/* Set sync/delimiters.
346 	 */
347 	W16(sccp, scc_dsr, 0xd555);
348 
349 	/* Set processing mode.  Use Ethernet CRC, catch broadcast, and
350 	 * start frame search 22 bit times after RENA.
351 	 */
352 	W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
353 
354 	/* Set full duplex mode if needed */
355 	if (fep->phydev->duplex)
356 		S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
357 
358 	/* Restore multicast and promiscuous settings */
359 	set_multicast_list(dev);
360 
361 	S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
362 }
363 
364 static void stop(struct net_device *dev)
365 {
366 	struct fs_enet_private *fep = netdev_priv(dev);
367 	scc_t __iomem *sccp = fep->scc.sccp;
368 	int i;
369 
370 	for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++)
371 		udelay(1);
372 
373 	if (i == SCC_RESET_DELAY)
374 		dev_warn(fep->dev, "SCC timeout on graceful transmit stop\n");
375 
376 	W16(sccp, scc_sccm, 0);
377 	C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
378 
379 	fs_cleanup_bds(dev);
380 }
381 
382 static void napi_clear_rx_event(struct net_device *dev)
383 {
384 	struct fs_enet_private *fep = netdev_priv(dev);
385 	scc_t __iomem *sccp = fep->scc.sccp;
386 
387 	W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK);
388 }
389 
390 static void napi_enable_rx(struct net_device *dev)
391 {
392 	struct fs_enet_private *fep = netdev_priv(dev);
393 	scc_t __iomem *sccp = fep->scc.sccp;
394 
395 	S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
396 }
397 
398 static void napi_disable_rx(struct net_device *dev)
399 {
400 	struct fs_enet_private *fep = netdev_priv(dev);
401 	scc_t __iomem *sccp = fep->scc.sccp;
402 
403 	C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
404 }
405 
406 static void napi_clear_tx_event(struct net_device *dev)
407 {
408 	struct fs_enet_private *fep = netdev_priv(dev);
409 	scc_t __iomem *sccp = fep->scc.sccp;
410 
411 	W16(sccp, scc_scce, SCC_NAPI_TX_EVENT_MSK);
412 }
413 
414 static void napi_enable_tx(struct net_device *dev)
415 {
416 	struct fs_enet_private *fep = netdev_priv(dev);
417 	scc_t __iomem *sccp = fep->scc.sccp;
418 
419 	S16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
420 }
421 
422 static void napi_disable_tx(struct net_device *dev)
423 {
424 	struct fs_enet_private *fep = netdev_priv(dev);
425 	scc_t __iomem *sccp = fep->scc.sccp;
426 
427 	C16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
428 }
429 
430 static void rx_bd_done(struct net_device *dev)
431 {
432 	/* nothing */
433 }
434 
435 static void tx_kickstart(struct net_device *dev)
436 {
437 	/* nothing */
438 }
439 
440 static u32 get_int_events(struct net_device *dev)
441 {
442 	struct fs_enet_private *fep = netdev_priv(dev);
443 	scc_t __iomem *sccp = fep->scc.sccp;
444 
445 	return (u32) R16(sccp, scc_scce);
446 }
447 
448 static void clear_int_events(struct net_device *dev, u32 int_events)
449 {
450 	struct fs_enet_private *fep = netdev_priv(dev);
451 	scc_t __iomem *sccp = fep->scc.sccp;
452 
453 	W16(sccp, scc_scce, int_events & 0xffff);
454 }
455 
456 static void ev_error(struct net_device *dev, u32 int_events)
457 {
458 	struct fs_enet_private *fep = netdev_priv(dev);
459 
460 	dev_warn(fep->dev, "SCC ERROR(s) 0x%x\n", int_events);
461 }
462 
463 static int get_regs(struct net_device *dev, void *p, int *sizep)
464 {
465 	struct fs_enet_private *fep = netdev_priv(dev);
466 
467 	if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t __iomem *))
468 		return -EINVAL;
469 
470 	memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
471 	p = (char *)p + sizeof(scc_t);
472 
473 	memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *));
474 
475 	return 0;
476 }
477 
478 static int get_regs_len(struct net_device *dev)
479 {
480 	return sizeof(scc_t) + sizeof(scc_enet_t __iomem *);
481 }
482 
483 static void tx_restart(struct net_device *dev)
484 {
485 	struct fs_enet_private *fep = netdev_priv(dev);
486 
487 	scc_cr_cmd(fep, CPM_CR_RESTART_TX);
488 }
489 
490 
491 
492 /*************************************************************************/
493 
494 const struct fs_ops fs_scc_ops = {
495 	.setup_data		= setup_data,
496 	.cleanup_data		= cleanup_data,
497 	.set_multicast_list	= set_multicast_list,
498 	.restart		= restart,
499 	.stop			= stop,
500 	.napi_clear_rx_event	= napi_clear_rx_event,
501 	.napi_enable_rx		= napi_enable_rx,
502 	.napi_disable_rx	= napi_disable_rx,
503 	.napi_clear_tx_event	= napi_clear_tx_event,
504 	.napi_enable_tx		= napi_enable_tx,
505 	.napi_disable_tx	= napi_disable_tx,
506 	.rx_bd_done		= rx_bd_done,
507 	.tx_kickstart		= tx_kickstart,
508 	.get_int_events		= get_int_events,
509 	.clear_int_events	= clear_int_events,
510 	.ev_error		= ev_error,
511 	.get_regs		= get_regs,
512 	.get_regs_len		= get_regs_len,
513 	.tx_restart		= tx_restart,
514 	.allocate_bd		= allocate_bd,
515 	.free_bd		= free_bd,
516 };
517