1 /*
2  * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
3  *
4  * Copyright (c) 2003 Intracom S.A.
5  *  by Pantelis Antoniou <panto@intracom.gr>
6  *
7  * 2005 (c) MontaVista Software, Inc.
8  * Vitaly Bordug <vbordug@ru.mvista.com>
9  *
10  * This file is licensed under the terms of the GNU General Public License
11  * version 2. This program is licensed "as is" without any warranty of any
12  * kind, whether express or implied.
13  */
14 
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/string.h>
19 #include <linux/ptrace.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/spinlock.h>
28 #include <linux/mii.h>
29 #include <linux/ethtool.h>
30 #include <linux/bitops.h>
31 #include <linux/fs.h>
32 #include <linux/platform_device.h>
33 #include <linux/of_address.h>
34 #include <linux/of_irq.h>
35 
36 #include <asm/irq.h>
37 #include <linux/uaccess.h>
38 
39 #include "fs_enet.h"
40 
41 /*************************************************/
42 #if defined(CONFIG_CPM1)
43 /* for a 8xx __raw_xxx's are sufficient */
44 #define __fs_out32(addr, x)	__raw_writel(x, addr)
45 #define __fs_out16(addr, x)	__raw_writew(x, addr)
46 #define __fs_out8(addr, x)	__raw_writeb(x, addr)
47 #define __fs_in32(addr)	__raw_readl(addr)
48 #define __fs_in16(addr)	__raw_readw(addr)
49 #define __fs_in8(addr)	__raw_readb(addr)
50 #else
51 /* for others play it safe */
52 #define __fs_out32(addr, x)	out_be32(addr, x)
53 #define __fs_out16(addr, x)	out_be16(addr, x)
54 #define __fs_in32(addr)	in_be32(addr)
55 #define __fs_in16(addr)	in_be16(addr)
56 #define __fs_out8(addr, x)	out_8(addr, x)
57 #define __fs_in8(addr)	in_8(addr)
58 #endif
59 
60 /* write, read, set bits, clear bits */
61 #define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
62 #define R32(_p, _m)     __fs_in32(&(_p)->_m)
63 #define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
64 #define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
65 
66 #define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
67 #define R16(_p, _m)     __fs_in16(&(_p)->_m)
68 #define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
69 #define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
70 
71 #define W8(_p, _m, _v)  __fs_out8(&(_p)->_m, (_v))
72 #define R8(_p, _m)      __fs_in8(&(_p)->_m)
73 #define S8(_p, _m, _v)  W8(_p, _m, R8(_p, _m) | (_v))
74 #define C8(_p, _m, _v)  W8(_p, _m, R8(_p, _m) & ~(_v))
75 
76 #define SCC_MAX_MULTICAST_ADDRS	64
77 
78 /*
79  * Delay to wait for SCC reset command to complete (in us)
80  */
81 #define SCC_RESET_DELAY		50
82 
scc_cr_cmd(struct fs_enet_private * fep,u32 op)83 static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
84 {
85 	const struct fs_platform_info *fpi = fep->fpi;
86 
87 	return cpm_command(fpi->cp_command, op);
88 }
89 
do_pd_setup(struct fs_enet_private * fep)90 static int do_pd_setup(struct fs_enet_private *fep)
91 {
92 	struct platform_device *ofdev = to_platform_device(fep->dev);
93 
94 	fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
95 	if (!fep->interrupt)
96 		return -EINVAL;
97 
98 	fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
99 	if (!fep->scc.sccp)
100 		return -EINVAL;
101 
102 	fep->scc.ep = of_iomap(ofdev->dev.of_node, 1);
103 	if (!fep->scc.ep) {
104 		iounmap(fep->scc.sccp);
105 		return -EINVAL;
106 	}
107 
108 	return 0;
109 }
110 
111 #define SCC_NAPI_EVENT_MSK	(SCCE_ENET_RXF | SCCE_ENET_RXB | SCCE_ENET_TXB)
112 #define SCC_EVENT		(SCCE_ENET_RXF | SCCE_ENET_TXB)
113 #define SCC_ERR_EVENT_MSK	(SCCE_ENET_TXE | SCCE_ENET_BSY)
114 
setup_data(struct net_device * dev)115 static int setup_data(struct net_device *dev)
116 {
117 	struct fs_enet_private *fep = netdev_priv(dev);
118 
119 	do_pd_setup(fep);
120 
121 	fep->scc.hthi = 0;
122 	fep->scc.htlo = 0;
123 
124 	fep->ev_napi = SCC_NAPI_EVENT_MSK;
125 	fep->ev = SCC_EVENT | SCCE_ENET_TXE;
126 	fep->ev_err = SCC_ERR_EVENT_MSK;
127 
128 	return 0;
129 }
130 
allocate_bd(struct net_device * dev)131 static int allocate_bd(struct net_device *dev)
132 {
133 	struct fs_enet_private *fep = netdev_priv(dev);
134 	const struct fs_platform_info *fpi = fep->fpi;
135 
136 	fep->ring_mem_addr = cpm_muram_alloc((fpi->tx_ring + fpi->rx_ring) *
137 					     sizeof(cbd_t), 8);
138 	if (IS_ERR_VALUE(fep->ring_mem_addr))
139 		return -ENOMEM;
140 
141 	fep->ring_base = (void __iomem __force*)
142 		cpm_muram_addr(fep->ring_mem_addr);
143 
144 	return 0;
145 }
146 
free_bd(struct net_device * dev)147 static void free_bd(struct net_device *dev)
148 {
149 	struct fs_enet_private *fep = netdev_priv(dev);
150 
151 	if (fep->ring_base)
152 		cpm_muram_free(fep->ring_mem_addr);
153 }
154 
cleanup_data(struct net_device * dev)155 static void cleanup_data(struct net_device *dev)
156 {
157 	/* nothing */
158 }
159 
set_promiscuous_mode(struct net_device * dev)160 static void set_promiscuous_mode(struct net_device *dev)
161 {
162 	struct fs_enet_private *fep = netdev_priv(dev);
163 	scc_t __iomem *sccp = fep->scc.sccp;
164 
165 	S16(sccp, scc_psmr, SCC_PSMR_PRO);
166 }
167 
set_multicast_start(struct net_device * dev)168 static void set_multicast_start(struct net_device *dev)
169 {
170 	struct fs_enet_private *fep = netdev_priv(dev);
171 	scc_enet_t __iomem *ep = fep->scc.ep;
172 
173 	W16(ep, sen_gaddr1, 0);
174 	W16(ep, sen_gaddr2, 0);
175 	W16(ep, sen_gaddr3, 0);
176 	W16(ep, sen_gaddr4, 0);
177 }
178 
set_multicast_one(struct net_device * dev,const u8 * mac)179 static void set_multicast_one(struct net_device *dev, const u8 * mac)
180 {
181 	struct fs_enet_private *fep = netdev_priv(dev);
182 	scc_enet_t __iomem *ep = fep->scc.ep;
183 	u16 taddrh, taddrm, taddrl;
184 
185 	taddrh = ((u16) mac[5] << 8) | mac[4];
186 	taddrm = ((u16) mac[3] << 8) | mac[2];
187 	taddrl = ((u16) mac[1] << 8) | mac[0];
188 
189 	W16(ep, sen_taddrh, taddrh);
190 	W16(ep, sen_taddrm, taddrm);
191 	W16(ep, sen_taddrl, taddrl);
192 	scc_cr_cmd(fep, CPM_CR_SET_GADDR);
193 }
194 
set_multicast_finish(struct net_device * dev)195 static void set_multicast_finish(struct net_device *dev)
196 {
197 	struct fs_enet_private *fep = netdev_priv(dev);
198 	scc_t __iomem *sccp = fep->scc.sccp;
199 	scc_enet_t __iomem *ep = fep->scc.ep;
200 
201 	/* clear promiscuous always */
202 	C16(sccp, scc_psmr, SCC_PSMR_PRO);
203 
204 	/* if all multi or too many multicasts; just enable all */
205 	if ((dev->flags & IFF_ALLMULTI) != 0 ||
206 	    netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) {
207 
208 		W16(ep, sen_gaddr1, 0xffff);
209 		W16(ep, sen_gaddr2, 0xffff);
210 		W16(ep, sen_gaddr3, 0xffff);
211 		W16(ep, sen_gaddr4, 0xffff);
212 	}
213 }
214 
set_multicast_list(struct net_device * dev)215 static void set_multicast_list(struct net_device *dev)
216 {
217 	struct netdev_hw_addr *ha;
218 
219 	if ((dev->flags & IFF_PROMISC) == 0) {
220 		set_multicast_start(dev);
221 		netdev_for_each_mc_addr(ha, dev)
222 			set_multicast_one(dev, ha->addr);
223 		set_multicast_finish(dev);
224 	} else
225 		set_promiscuous_mode(dev);
226 }
227 
228 /*
229  * This function is called to start or restart the FEC during a link
230  * change.  This only happens when switching between half and full
231  * duplex.
232  */
restart(struct net_device * dev)233 static void restart(struct net_device *dev)
234 {
235 	struct fs_enet_private *fep = netdev_priv(dev);
236 	scc_t __iomem *sccp = fep->scc.sccp;
237 	scc_enet_t __iomem *ep = fep->scc.ep;
238 	const struct fs_platform_info *fpi = fep->fpi;
239 	u16 paddrh, paddrm, paddrl;
240 	const unsigned char *mac;
241 	int i;
242 
243 	C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
244 
245 	/* clear everything (slow & steady does it) */
246 	for (i = 0; i < sizeof(*ep); i++)
247 		__fs_out8((u8 __iomem *)ep + i, 0);
248 
249 	/* point to bds */
250 	W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
251 	W16(ep, sen_genscc.scc_tbase,
252 	    fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
253 
254 	/* Initialize function code registers for big-endian.
255 	 */
256 #ifndef CONFIG_NOT_COHERENT_CACHE
257 	W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL);
258 	W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL);
259 #else
260 	W8(ep, sen_genscc.scc_rfcr, SCC_EB);
261 	W8(ep, sen_genscc.scc_tfcr, SCC_EB);
262 #endif
263 
264 	/* Set maximum bytes per receive buffer.
265 	 * This appears to be an Ethernet frame size, not the buffer
266 	 * fragment size.  It must be a multiple of four.
267 	 */
268 	W16(ep, sen_genscc.scc_mrblr, 0x5f0);
269 
270 	/* Set CRC preset and mask.
271 	 */
272 	W32(ep, sen_cpres, 0xffffffff);
273 	W32(ep, sen_cmask, 0xdebb20e3);
274 
275 	W32(ep, sen_crcec, 0);	/* CRC Error counter */
276 	W32(ep, sen_alec, 0);	/* alignment error counter */
277 	W32(ep, sen_disfc, 0);	/* discard frame counter */
278 
279 	W16(ep, sen_pads, 0x8888);	/* Tx short frame pad character */
280 	W16(ep, sen_retlim, 15);	/* Retry limit threshold */
281 
282 	W16(ep, sen_maxflr, 0x5ee);	/* maximum frame length register */
283 
284 	W16(ep, sen_minflr, PKT_MINBUF_SIZE);	/* minimum frame length register */
285 
286 	W16(ep, sen_maxd1, 0x000005f0);	/* maximum DMA1 length */
287 	W16(ep, sen_maxd2, 0x000005f0);	/* maximum DMA2 length */
288 
289 	/* Clear hash tables.
290 	 */
291 	W16(ep, sen_gaddr1, 0);
292 	W16(ep, sen_gaddr2, 0);
293 	W16(ep, sen_gaddr3, 0);
294 	W16(ep, sen_gaddr4, 0);
295 	W16(ep, sen_iaddr1, 0);
296 	W16(ep, sen_iaddr2, 0);
297 	W16(ep, sen_iaddr3, 0);
298 	W16(ep, sen_iaddr4, 0);
299 
300 	/* set address
301 	 */
302 	mac = dev->dev_addr;
303 	paddrh = ((u16) mac[5] << 8) | mac[4];
304 	paddrm = ((u16) mac[3] << 8) | mac[2];
305 	paddrl = ((u16) mac[1] << 8) | mac[0];
306 
307 	W16(ep, sen_paddrh, paddrh);
308 	W16(ep, sen_paddrm, paddrm);
309 	W16(ep, sen_paddrl, paddrl);
310 
311 	W16(ep, sen_pper, 0);
312 	W16(ep, sen_taddrl, 0);
313 	W16(ep, sen_taddrm, 0);
314 	W16(ep, sen_taddrh, 0);
315 
316 	fs_init_bds(dev);
317 
318 	scc_cr_cmd(fep, CPM_CR_INIT_TRX);
319 
320 	W16(sccp, scc_scce, 0xffff);
321 
322 	/* Enable interrupts we wish to service.
323 	 */
324 	W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
325 
326 	/* Set GSMR_H to enable all normal operating modes.
327 	 * Set GSMR_L to enable Ethernet to MC68160.
328 	 */
329 	W32(sccp, scc_gsmrh, 0);
330 	W32(sccp, scc_gsmrl,
331 	    SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 |
332 	    SCC_GSMRL_MODE_ENET);
333 
334 	/* Set sync/delimiters.
335 	 */
336 	W16(sccp, scc_dsr, 0xd555);
337 
338 	/* Set processing mode.  Use Ethernet CRC, catch broadcast, and
339 	 * start frame search 22 bit times after RENA.
340 	 */
341 	W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
342 
343 	/* Set full duplex mode if needed */
344 	if (dev->phydev->duplex)
345 		S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
346 
347 	/* Restore multicast and promiscuous settings */
348 	set_multicast_list(dev);
349 
350 	S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
351 }
352 
stop(struct net_device * dev)353 static void stop(struct net_device *dev)
354 {
355 	struct fs_enet_private *fep = netdev_priv(dev);
356 	scc_t __iomem *sccp = fep->scc.sccp;
357 	int i;
358 
359 	for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++)
360 		udelay(1);
361 
362 	if (i == SCC_RESET_DELAY)
363 		dev_warn(fep->dev, "SCC timeout on graceful transmit stop\n");
364 
365 	W16(sccp, scc_sccm, 0);
366 	C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
367 
368 	fs_cleanup_bds(dev);
369 }
370 
napi_clear_event_fs(struct net_device * dev)371 static void napi_clear_event_fs(struct net_device *dev)
372 {
373 	struct fs_enet_private *fep = netdev_priv(dev);
374 	scc_t __iomem *sccp = fep->scc.sccp;
375 
376 	W16(sccp, scc_scce, SCC_NAPI_EVENT_MSK);
377 }
378 
napi_enable_fs(struct net_device * dev)379 static void napi_enable_fs(struct net_device *dev)
380 {
381 	struct fs_enet_private *fep = netdev_priv(dev);
382 	scc_t __iomem *sccp = fep->scc.sccp;
383 
384 	S16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
385 }
386 
napi_disable_fs(struct net_device * dev)387 static void napi_disable_fs(struct net_device *dev)
388 {
389 	struct fs_enet_private *fep = netdev_priv(dev);
390 	scc_t __iomem *sccp = fep->scc.sccp;
391 
392 	C16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
393 }
394 
rx_bd_done(struct net_device * dev)395 static void rx_bd_done(struct net_device *dev)
396 {
397 	/* nothing */
398 }
399 
tx_kickstart(struct net_device * dev)400 static void tx_kickstart(struct net_device *dev)
401 {
402 	/* nothing */
403 }
404 
get_int_events(struct net_device * dev)405 static u32 get_int_events(struct net_device *dev)
406 {
407 	struct fs_enet_private *fep = netdev_priv(dev);
408 	scc_t __iomem *sccp = fep->scc.sccp;
409 
410 	return (u32) R16(sccp, scc_scce);
411 }
412 
clear_int_events(struct net_device * dev,u32 int_events)413 static void clear_int_events(struct net_device *dev, u32 int_events)
414 {
415 	struct fs_enet_private *fep = netdev_priv(dev);
416 	scc_t __iomem *sccp = fep->scc.sccp;
417 
418 	W16(sccp, scc_scce, int_events & 0xffff);
419 }
420 
ev_error(struct net_device * dev,u32 int_events)421 static void ev_error(struct net_device *dev, u32 int_events)
422 {
423 	struct fs_enet_private *fep = netdev_priv(dev);
424 
425 	dev_warn(fep->dev, "SCC ERROR(s) 0x%x\n", int_events);
426 }
427 
get_regs(struct net_device * dev,void * p,int * sizep)428 static int get_regs(struct net_device *dev, void *p, int *sizep)
429 {
430 	struct fs_enet_private *fep = netdev_priv(dev);
431 
432 	if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t __iomem *))
433 		return -EINVAL;
434 
435 	memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
436 	p = (char *)p + sizeof(scc_t);
437 
438 	memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *));
439 
440 	return 0;
441 }
442 
get_regs_len(struct net_device * dev)443 static int get_regs_len(struct net_device *dev)
444 {
445 	return sizeof(scc_t) + sizeof(scc_enet_t __iomem *);
446 }
447 
tx_restart(struct net_device * dev)448 static void tx_restart(struct net_device *dev)
449 {
450 	struct fs_enet_private *fep = netdev_priv(dev);
451 
452 	scc_cr_cmd(fep, CPM_CR_RESTART_TX);
453 }
454 
455 
456 
457 /*************************************************************************/
458 
459 const struct fs_ops fs_scc_ops = {
460 	.setup_data		= setup_data,
461 	.cleanup_data		= cleanup_data,
462 	.set_multicast_list	= set_multicast_list,
463 	.restart		= restart,
464 	.stop			= stop,
465 	.napi_clear_event	= napi_clear_event_fs,
466 	.napi_enable		= napi_enable_fs,
467 	.napi_disable		= napi_disable_fs,
468 	.rx_bd_done		= rx_bd_done,
469 	.tx_kickstart		= tx_kickstart,
470 	.get_int_events		= get_int_events,
471 	.clear_int_events	= clear_int_events,
472 	.ev_error		= ev_error,
473 	.get_regs		= get_regs,
474 	.get_regs_len		= get_regs_len,
475 	.tx_restart		= tx_restart,
476 	.allocate_bd		= allocate_bd,
477 	.free_bd		= free_bd,
478 };
479