1 /*
2  *  SuperH Ethernet device driver
3  *
4  *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5  *  Copyright (C) 2008-2012 Renesas Solutions Corp.
6  *
7  *  This program is free software; you can redistribute it and/or modify it
8  *  under the terms and conditions of the GNU General Public License,
9  *  version 2, as published by the Free Software Foundation.
10  *
11  *  This program is distributed in the hope it will be useful, but WITHOUT
12  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  *  more details.
15  *  You should have received a copy of the GNU General Public License along with
16  *  this program; if not, write to the Free Software Foundation, Inc.,
17  *  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  *  The full GNU General Public License is included in this distribution in
20  *  the file called "COPYING".
21  */
22 
23 #include <linux/init.h>
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/spinlock.h>
27 #include <linux/interrupt.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/etherdevice.h>
30 #include <linux/delay.h>
31 #include <linux/platform_device.h>
32 #include <linux/mdio-bitbang.h>
33 #include <linux/netdevice.h>
34 #include <linux/phy.h>
35 #include <linux/cache.h>
36 #include <linux/io.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/slab.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_vlan.h>
41 #include <linux/clk.h>
42 #include <linux/sh_eth.h>
43 
44 #include "sh_eth.h"
45 
46 #define SH_ETH_DEF_MSG_ENABLE \
47 		(NETIF_MSG_LINK	| \
48 		NETIF_MSG_TIMER	| \
49 		NETIF_MSG_RX_ERR| \
50 		NETIF_MSG_TX_ERR)
51 
52 #if defined(CONFIG_CPU_SUBTYPE_SH7734) || \
53 	defined(CONFIG_CPU_SUBTYPE_SH7763) || \
54 	defined(CONFIG_ARCH_R8A7740)
55 static void sh_eth_select_mii(struct net_device *ndev)
56 {
57 	u32 value = 0x0;
58 	struct sh_eth_private *mdp = netdev_priv(ndev);
59 
60 	switch (mdp->phy_interface) {
61 	case PHY_INTERFACE_MODE_GMII:
62 		value = 0x2;
63 		break;
64 	case PHY_INTERFACE_MODE_MII:
65 		value = 0x1;
66 		break;
67 	case PHY_INTERFACE_MODE_RMII:
68 		value = 0x0;
69 		break;
70 	default:
71 		pr_warn("PHY interface mode was not setup. Set to MII.\n");
72 		value = 0x1;
73 		break;
74 	}
75 
76 	sh_eth_write(ndev, value, RMII_MII);
77 }
78 #endif
79 
80 /* There is CPU dependent code */
81 #if defined(CONFIG_CPU_SUBTYPE_SH7724)
82 #define SH_ETH_RESET_DEFAULT	1
83 static void sh_eth_set_duplex(struct net_device *ndev)
84 {
85 	struct sh_eth_private *mdp = netdev_priv(ndev);
86 
87 	if (mdp->duplex) /* Full */
88 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
89 	else		/* Half */
90 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
91 }
92 
93 static void sh_eth_set_rate(struct net_device *ndev)
94 {
95 	struct sh_eth_private *mdp = netdev_priv(ndev);
96 
97 	switch (mdp->speed) {
98 	case 10: /* 10BASE */
99 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
100 		break;
101 	case 100:/* 100BASE */
102 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
103 		break;
104 	default:
105 		break;
106 	}
107 }
108 
109 /* SH7724 */
110 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
111 	.set_duplex	= sh_eth_set_duplex,
112 	.set_rate	= sh_eth_set_rate,
113 
114 	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
115 	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
116 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
117 
118 	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
119 	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
120 			  EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
121 	.tx_error_check	= EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
122 
123 	.apr		= 1,
124 	.mpr		= 1,
125 	.tpauser	= 1,
126 	.hw_swap	= 1,
127 	.rpadir		= 1,
128 	.rpadir_value	= 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
129 };
130 #elif defined(CONFIG_CPU_SUBTYPE_SH7757)
131 #define SH_ETH_HAS_BOTH_MODULES	1
132 #define SH_ETH_HAS_TSU	1
133 static int sh_eth_check_reset(struct net_device *ndev);
134 
135 static void sh_eth_set_duplex(struct net_device *ndev)
136 {
137 	struct sh_eth_private *mdp = netdev_priv(ndev);
138 
139 	if (mdp->duplex) /* Full */
140 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
141 	else		/* Half */
142 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
143 }
144 
145 static void sh_eth_set_rate(struct net_device *ndev)
146 {
147 	struct sh_eth_private *mdp = netdev_priv(ndev);
148 
149 	switch (mdp->speed) {
150 	case 10: /* 10BASE */
151 		sh_eth_write(ndev, 0, RTRATE);
152 		break;
153 	case 100:/* 100BASE */
154 		sh_eth_write(ndev, 1, RTRATE);
155 		break;
156 	default:
157 		break;
158 	}
159 }
160 
161 /* SH7757 */
162 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
163 	.set_duplex		= sh_eth_set_duplex,
164 	.set_rate		= sh_eth_set_rate,
165 
166 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
167 	.rmcr_value	= 0x00000001,
168 
169 	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
170 	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
171 			  EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
172 	.tx_error_check	= EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
173 
174 	.apr		= 1,
175 	.mpr		= 1,
176 	.tpauser	= 1,
177 	.hw_swap	= 1,
178 	.no_ade		= 1,
179 	.rpadir		= 1,
180 	.rpadir_value   = 2 << 16,
181 };
182 
183 #define SH_GIGA_ETH_BASE	0xfee00000
184 #define GIGA_MALR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
185 #define GIGA_MAHR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
186 static void sh_eth_chip_reset_giga(struct net_device *ndev)
187 {
188 	int i;
189 	unsigned long mahr[2], malr[2];
190 
191 	/* save MAHR and MALR */
192 	for (i = 0; i < 2; i++) {
193 		malr[i] = ioread32((void *)GIGA_MALR(i));
194 		mahr[i] = ioread32((void *)GIGA_MAHR(i));
195 	}
196 
197 	/* reset device */
198 	iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
199 	mdelay(1);
200 
201 	/* restore MAHR and MALR */
202 	for (i = 0; i < 2; i++) {
203 		iowrite32(malr[i], (void *)GIGA_MALR(i));
204 		iowrite32(mahr[i], (void *)GIGA_MAHR(i));
205 	}
206 }
207 
208 static int sh_eth_is_gether(struct sh_eth_private *mdp);
209 static int sh_eth_reset(struct net_device *ndev)
210 {
211 	struct sh_eth_private *mdp = netdev_priv(ndev);
212 	int ret = 0;
213 
214 	if (sh_eth_is_gether(mdp)) {
215 		sh_eth_write(ndev, 0x03, EDSR);
216 		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
217 				EDMR);
218 
219 		ret = sh_eth_check_reset(ndev);
220 		if (ret)
221 			goto out;
222 
223 		/* Table Init */
224 		sh_eth_write(ndev, 0x0, TDLAR);
225 		sh_eth_write(ndev, 0x0, TDFAR);
226 		sh_eth_write(ndev, 0x0, TDFXR);
227 		sh_eth_write(ndev, 0x0, TDFFR);
228 		sh_eth_write(ndev, 0x0, RDLAR);
229 		sh_eth_write(ndev, 0x0, RDFAR);
230 		sh_eth_write(ndev, 0x0, RDFXR);
231 		sh_eth_write(ndev, 0x0, RDFFR);
232 	} else {
233 		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
234 				EDMR);
235 		mdelay(3);
236 		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
237 				EDMR);
238 	}
239 
240 out:
241 	return ret;
242 }
243 
244 static void sh_eth_set_duplex_giga(struct net_device *ndev)
245 {
246 	struct sh_eth_private *mdp = netdev_priv(ndev);
247 
248 	if (mdp->duplex) /* Full */
249 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
250 	else		/* Half */
251 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
252 }
253 
254 static void sh_eth_set_rate_giga(struct net_device *ndev)
255 {
256 	struct sh_eth_private *mdp = netdev_priv(ndev);
257 
258 	switch (mdp->speed) {
259 	case 10: /* 10BASE */
260 		sh_eth_write(ndev, 0x00000000, GECMR);
261 		break;
262 	case 100:/* 100BASE */
263 		sh_eth_write(ndev, 0x00000010, GECMR);
264 		break;
265 	case 1000: /* 1000BASE */
266 		sh_eth_write(ndev, 0x00000020, GECMR);
267 		break;
268 	default:
269 		break;
270 	}
271 }
272 
273 /* SH7757(GETHERC) */
274 static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
275 	.chip_reset	= sh_eth_chip_reset_giga,
276 	.set_duplex	= sh_eth_set_duplex_giga,
277 	.set_rate	= sh_eth_set_rate_giga,
278 
279 	.ecsr_value	= ECSR_ICD | ECSR_MPD,
280 	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
281 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
282 
283 	.tx_check	= EESR_TC1 | EESR_FTC,
284 	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
285 			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
286 			  EESR_ECI,
287 	.tx_error_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
288 			  EESR_TFE,
289 	.fdr_value	= 0x0000072f,
290 	.rmcr_value	= 0x00000001,
291 
292 	.apr		= 1,
293 	.mpr		= 1,
294 	.tpauser	= 1,
295 	.bculr		= 1,
296 	.hw_swap	= 1,
297 	.rpadir		= 1,
298 	.rpadir_value   = 2 << 16,
299 	.no_trimd	= 1,
300 	.no_ade		= 1,
301 	.tsu		= 1,
302 };
303 
304 static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
305 {
306 	if (sh_eth_is_gether(mdp))
307 		return &sh_eth_my_cpu_data_giga;
308 	else
309 		return &sh_eth_my_cpu_data;
310 }
311 
312 #elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
313 #define SH_ETH_HAS_TSU	1
314 static int sh_eth_check_reset(struct net_device *ndev);
315 static void sh_eth_reset_hw_crc(struct net_device *ndev);
316 
317 static void sh_eth_chip_reset(struct net_device *ndev)
318 {
319 	struct sh_eth_private *mdp = netdev_priv(ndev);
320 
321 	/* reset device */
322 	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
323 	mdelay(1);
324 }
325 
326 static void sh_eth_set_duplex(struct net_device *ndev)
327 {
328 	struct sh_eth_private *mdp = netdev_priv(ndev);
329 
330 	if (mdp->duplex) /* Full */
331 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
332 	else		/* Half */
333 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
334 }
335 
336 static void sh_eth_set_rate(struct net_device *ndev)
337 {
338 	struct sh_eth_private *mdp = netdev_priv(ndev);
339 
340 	switch (mdp->speed) {
341 	case 10: /* 10BASE */
342 		sh_eth_write(ndev, GECMR_10, GECMR);
343 		break;
344 	case 100:/* 100BASE */
345 		sh_eth_write(ndev, GECMR_100, GECMR);
346 		break;
347 	case 1000: /* 1000BASE */
348 		sh_eth_write(ndev, GECMR_1000, GECMR);
349 		break;
350 	default:
351 		break;
352 	}
353 }
354 
355 /* sh7763 */
356 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
357 	.chip_reset	= sh_eth_chip_reset,
358 	.set_duplex	= sh_eth_set_duplex,
359 	.set_rate	= sh_eth_set_rate,
360 
361 	.ecsr_value	= ECSR_ICD | ECSR_MPD,
362 	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
363 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
364 
365 	.tx_check	= EESR_TC1 | EESR_FTC,
366 	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
367 			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
368 			  EESR_ECI,
369 	.tx_error_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
370 			  EESR_TFE,
371 
372 	.apr		= 1,
373 	.mpr		= 1,
374 	.tpauser	= 1,
375 	.bculr		= 1,
376 	.hw_swap	= 1,
377 	.no_trimd	= 1,
378 	.no_ade		= 1,
379 	.tsu		= 1,
380 #if defined(CONFIG_CPU_SUBTYPE_SH7734)
381 	.hw_crc     = 1,
382 	.select_mii = 1,
383 #endif
384 };
385 
386 static int sh_eth_reset(struct net_device *ndev)
387 {
388 	int ret = 0;
389 
390 	sh_eth_write(ndev, EDSR_ENALL, EDSR);
391 	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
392 
393 	ret = sh_eth_check_reset(ndev);
394 	if (ret)
395 		goto out;
396 
397 	/* Table Init */
398 	sh_eth_write(ndev, 0x0, TDLAR);
399 	sh_eth_write(ndev, 0x0, TDFAR);
400 	sh_eth_write(ndev, 0x0, TDFXR);
401 	sh_eth_write(ndev, 0x0, TDFFR);
402 	sh_eth_write(ndev, 0x0, RDLAR);
403 	sh_eth_write(ndev, 0x0, RDFAR);
404 	sh_eth_write(ndev, 0x0, RDFXR);
405 	sh_eth_write(ndev, 0x0, RDFFR);
406 
407 	/* Reset HW CRC register */
408 	sh_eth_reset_hw_crc(ndev);
409 
410 	/* Select MII mode */
411 	if (sh_eth_my_cpu_data.select_mii)
412 		sh_eth_select_mii(ndev);
413 out:
414 	return ret;
415 }
416 
417 static void sh_eth_reset_hw_crc(struct net_device *ndev)
418 {
419 	if (sh_eth_my_cpu_data.hw_crc)
420 		sh_eth_write(ndev, 0x0, CSMR);
421 }
422 
423 #elif defined(CONFIG_ARCH_R8A7740)
424 #define SH_ETH_HAS_TSU	1
425 static int sh_eth_check_reset(struct net_device *ndev);
426 
427 static void sh_eth_chip_reset(struct net_device *ndev)
428 {
429 	struct sh_eth_private *mdp = netdev_priv(ndev);
430 
431 	/* reset device */
432 	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
433 	mdelay(1);
434 
435 	sh_eth_select_mii(ndev);
436 }
437 
438 static int sh_eth_reset(struct net_device *ndev)
439 {
440 	int ret = 0;
441 
442 	sh_eth_write(ndev, EDSR_ENALL, EDSR);
443 	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
444 
445 	ret = sh_eth_check_reset(ndev);
446 	if (ret)
447 		goto out;
448 
449 	/* Table Init */
450 	sh_eth_write(ndev, 0x0, TDLAR);
451 	sh_eth_write(ndev, 0x0, TDFAR);
452 	sh_eth_write(ndev, 0x0, TDFXR);
453 	sh_eth_write(ndev, 0x0, TDFFR);
454 	sh_eth_write(ndev, 0x0, RDLAR);
455 	sh_eth_write(ndev, 0x0, RDFAR);
456 	sh_eth_write(ndev, 0x0, RDFXR);
457 	sh_eth_write(ndev, 0x0, RDFFR);
458 
459 out:
460 	return ret;
461 }
462 
463 static void sh_eth_set_duplex(struct net_device *ndev)
464 {
465 	struct sh_eth_private *mdp = netdev_priv(ndev);
466 
467 	if (mdp->duplex) /* Full */
468 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
469 	else		/* Half */
470 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
471 }
472 
473 static void sh_eth_set_rate(struct net_device *ndev)
474 {
475 	struct sh_eth_private *mdp = netdev_priv(ndev);
476 
477 	switch (mdp->speed) {
478 	case 10: /* 10BASE */
479 		sh_eth_write(ndev, GECMR_10, GECMR);
480 		break;
481 	case 100:/* 100BASE */
482 		sh_eth_write(ndev, GECMR_100, GECMR);
483 		break;
484 	case 1000: /* 1000BASE */
485 		sh_eth_write(ndev, GECMR_1000, GECMR);
486 		break;
487 	default:
488 		break;
489 	}
490 }
491 
492 /* R8A7740 */
493 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
494 	.chip_reset	= sh_eth_chip_reset,
495 	.set_duplex	= sh_eth_set_duplex,
496 	.set_rate	= sh_eth_set_rate,
497 
498 	.ecsr_value	= ECSR_ICD | ECSR_MPD,
499 	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
500 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
501 
502 	.tx_check	= EESR_TC1 | EESR_FTC,
503 	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
504 			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
505 			  EESR_ECI,
506 	.tx_error_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
507 			  EESR_TFE,
508 
509 	.apr		= 1,
510 	.mpr		= 1,
511 	.tpauser	= 1,
512 	.bculr		= 1,
513 	.hw_swap	= 1,
514 	.no_trimd	= 1,
515 	.no_ade		= 1,
516 	.tsu		= 1,
517 	.select_mii	= 1,
518 };
519 
520 #elif defined(CONFIG_CPU_SUBTYPE_SH7619)
521 #define SH_ETH_RESET_DEFAULT	1
522 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
523 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
524 
525 	.apr		= 1,
526 	.mpr		= 1,
527 	.tpauser	= 1,
528 	.hw_swap	= 1,
529 };
530 #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
531 #define SH_ETH_RESET_DEFAULT	1
532 #define SH_ETH_HAS_TSU	1
533 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
534 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
535 	.tsu		= 1,
536 };
537 #endif
538 
539 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
540 {
541 	if (!cd->ecsr_value)
542 		cd->ecsr_value = DEFAULT_ECSR_INIT;
543 
544 	if (!cd->ecsipr_value)
545 		cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
546 
547 	if (!cd->fcftr_value)
548 		cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
549 				  DEFAULT_FIFO_F_D_RFD;
550 
551 	if (!cd->fdr_value)
552 		cd->fdr_value = DEFAULT_FDR_INIT;
553 
554 	if (!cd->rmcr_value)
555 		cd->rmcr_value = DEFAULT_RMCR_VALUE;
556 
557 	if (!cd->tx_check)
558 		cd->tx_check = DEFAULT_TX_CHECK;
559 
560 	if (!cd->eesr_err_check)
561 		cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
562 
563 	if (!cd->tx_error_check)
564 		cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
565 }
566 
567 #if defined(SH_ETH_RESET_DEFAULT)
568 /* Chip Reset */
569 static int  sh_eth_reset(struct net_device *ndev)
570 {
571 	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
572 	mdelay(3);
573 	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
574 
575 	return 0;
576 }
577 #else
578 static int sh_eth_check_reset(struct net_device *ndev)
579 {
580 	int ret = 0;
581 	int cnt = 100;
582 
583 	while (cnt > 0) {
584 		if (!(sh_eth_read(ndev, EDMR) & 0x3))
585 			break;
586 		mdelay(1);
587 		cnt--;
588 	}
589 	if (cnt < 0) {
590 		printk(KERN_ERR "Device reset fail\n");
591 		ret = -ETIMEDOUT;
592 	}
593 	return ret;
594 }
595 #endif
596 
597 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
598 static void sh_eth_set_receive_align(struct sk_buff *skb)
599 {
600 	int reserve;
601 
602 	reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
603 	if (reserve)
604 		skb_reserve(skb, reserve);
605 }
606 #else
607 static void sh_eth_set_receive_align(struct sk_buff *skb)
608 {
609 	skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
610 }
611 #endif
612 
613 
614 /* CPU <-> EDMAC endian convert */
615 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
616 {
617 	switch (mdp->edmac_endian) {
618 	case EDMAC_LITTLE_ENDIAN:
619 		return cpu_to_le32(x);
620 	case EDMAC_BIG_ENDIAN:
621 		return cpu_to_be32(x);
622 	}
623 	return x;
624 }
625 
626 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
627 {
628 	switch (mdp->edmac_endian) {
629 	case EDMAC_LITTLE_ENDIAN:
630 		return le32_to_cpu(x);
631 	case EDMAC_BIG_ENDIAN:
632 		return be32_to_cpu(x);
633 	}
634 	return x;
635 }
636 
637 /*
638  * Program the hardware MAC address from dev->dev_addr.
639  */
640 static void update_mac_address(struct net_device *ndev)
641 {
642 	sh_eth_write(ndev,
643 		(ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
644 		(ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
645 	sh_eth_write(ndev,
646 		(ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
647 }
648 
649 /*
650  * Get MAC address from SuperH MAC address register
651  *
652  * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
653  * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
654  * When you want use this device, you must set MAC address in bootloader.
655  *
656  */
657 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
658 {
659 	if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
660 		memcpy(ndev->dev_addr, mac, 6);
661 	} else {
662 		ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
663 		ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
664 		ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
665 		ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
666 		ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
667 		ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
668 	}
669 }
670 
671 static int sh_eth_is_gether(struct sh_eth_private *mdp)
672 {
673 	if (mdp->reg_offset == sh_eth_offset_gigabit)
674 		return 1;
675 	else
676 		return 0;
677 }
678 
679 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
680 {
681 	if (sh_eth_is_gether(mdp))
682 		return EDTRR_TRNS_GETHER;
683 	else
684 		return EDTRR_TRNS_ETHER;
685 }
686 
687 struct bb_info {
688 	void (*set_gate)(void *addr);
689 	struct mdiobb_ctrl ctrl;
690 	void *addr;
691 	u32 mmd_msk;/* MMD */
692 	u32 mdo_msk;
693 	u32 mdi_msk;
694 	u32 mdc_msk;
695 };
696 
697 /* PHY bit set */
698 static void bb_set(void *addr, u32 msk)
699 {
700 	iowrite32(ioread32(addr) | msk, addr);
701 }
702 
703 /* PHY bit clear */
704 static void bb_clr(void *addr, u32 msk)
705 {
706 	iowrite32((ioread32(addr) & ~msk), addr);
707 }
708 
709 /* PHY bit read */
710 static int bb_read(void *addr, u32 msk)
711 {
712 	return (ioread32(addr) & msk) != 0;
713 }
714 
715 /* Data I/O pin control */
716 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
717 {
718 	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
719 
720 	if (bitbang->set_gate)
721 		bitbang->set_gate(bitbang->addr);
722 
723 	if (bit)
724 		bb_set(bitbang->addr, bitbang->mmd_msk);
725 	else
726 		bb_clr(bitbang->addr, bitbang->mmd_msk);
727 }
728 
729 /* Set bit data*/
730 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
731 {
732 	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
733 
734 	if (bitbang->set_gate)
735 		bitbang->set_gate(bitbang->addr);
736 
737 	if (bit)
738 		bb_set(bitbang->addr, bitbang->mdo_msk);
739 	else
740 		bb_clr(bitbang->addr, bitbang->mdo_msk);
741 }
742 
743 /* Get bit data*/
744 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
745 {
746 	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
747 
748 	if (bitbang->set_gate)
749 		bitbang->set_gate(bitbang->addr);
750 
751 	return bb_read(bitbang->addr, bitbang->mdi_msk);
752 }
753 
754 /* MDC pin control */
755 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
756 {
757 	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
758 
759 	if (bitbang->set_gate)
760 		bitbang->set_gate(bitbang->addr);
761 
762 	if (bit)
763 		bb_set(bitbang->addr, bitbang->mdc_msk);
764 	else
765 		bb_clr(bitbang->addr, bitbang->mdc_msk);
766 }
767 
768 /* mdio bus control struct */
769 static struct mdiobb_ops bb_ops = {
770 	.owner = THIS_MODULE,
771 	.set_mdc = sh_mdc_ctrl,
772 	.set_mdio_dir = sh_mmd_ctrl,
773 	.set_mdio_data = sh_set_mdio,
774 	.get_mdio_data = sh_get_mdio,
775 };
776 
777 /* free skb and descriptor buffer */
778 static void sh_eth_ring_free(struct net_device *ndev)
779 {
780 	struct sh_eth_private *mdp = netdev_priv(ndev);
781 	int i;
782 
783 	/* Free Rx skb ringbuffer */
784 	if (mdp->rx_skbuff) {
785 		for (i = 0; i < mdp->num_rx_ring; i++) {
786 			if (mdp->rx_skbuff[i])
787 				dev_kfree_skb(mdp->rx_skbuff[i]);
788 		}
789 	}
790 	kfree(mdp->rx_skbuff);
791 	mdp->rx_skbuff = NULL;
792 
793 	/* Free Tx skb ringbuffer */
794 	if (mdp->tx_skbuff) {
795 		for (i = 0; i < mdp->num_tx_ring; i++) {
796 			if (mdp->tx_skbuff[i])
797 				dev_kfree_skb(mdp->tx_skbuff[i]);
798 		}
799 	}
800 	kfree(mdp->tx_skbuff);
801 	mdp->tx_skbuff = NULL;
802 }
803 
804 /* format skb and descriptor buffer */
805 static void sh_eth_ring_format(struct net_device *ndev)
806 {
807 	struct sh_eth_private *mdp = netdev_priv(ndev);
808 	int i;
809 	struct sk_buff *skb;
810 	struct sh_eth_rxdesc *rxdesc = NULL;
811 	struct sh_eth_txdesc *txdesc = NULL;
812 	int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
813 	int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
814 
815 	mdp->cur_rx = mdp->cur_tx = 0;
816 	mdp->dirty_rx = mdp->dirty_tx = 0;
817 
818 	memset(mdp->rx_ring, 0, rx_ringsize);
819 
820 	/* build Rx ring buffer */
821 	for (i = 0; i < mdp->num_rx_ring; i++) {
822 		/* skb */
823 		mdp->rx_skbuff[i] = NULL;
824 		skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
825 		mdp->rx_skbuff[i] = skb;
826 		if (skb == NULL)
827 			break;
828 		dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
829 				DMA_FROM_DEVICE);
830 		sh_eth_set_receive_align(skb);
831 
832 		/* RX descriptor */
833 		rxdesc = &mdp->rx_ring[i];
834 		rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
835 		rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
836 
837 		/* The size of the buffer is 16 byte boundary. */
838 		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
839 		/* Rx descriptor address set */
840 		if (i == 0) {
841 			sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
842 			if (sh_eth_is_gether(mdp))
843 				sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
844 		}
845 	}
846 
847 	mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
848 
849 	/* Mark the last entry as wrapping the ring. */
850 	rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
851 
852 	memset(mdp->tx_ring, 0, tx_ringsize);
853 
854 	/* build Tx ring buffer */
855 	for (i = 0; i < mdp->num_tx_ring; i++) {
856 		mdp->tx_skbuff[i] = NULL;
857 		txdesc = &mdp->tx_ring[i];
858 		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
859 		txdesc->buffer_length = 0;
860 		if (i == 0) {
861 			/* Tx descriptor address set */
862 			sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
863 			if (sh_eth_is_gether(mdp))
864 				sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
865 		}
866 	}
867 
868 	txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
869 }
870 
871 /* Get skb and descriptor buffer */
872 static int sh_eth_ring_init(struct net_device *ndev)
873 {
874 	struct sh_eth_private *mdp = netdev_priv(ndev);
875 	int rx_ringsize, tx_ringsize, ret = 0;
876 
877 	/*
878 	 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
879 	 * card needs room to do 8 byte alignment, +2 so we can reserve
880 	 * the first 2 bytes, and +16 gets room for the status word from the
881 	 * card.
882 	 */
883 	mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
884 			  (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
885 	if (mdp->cd->rpadir)
886 		mdp->rx_buf_sz += NET_IP_ALIGN;
887 
888 	/* Allocate RX and TX skb rings */
889 	mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * mdp->num_rx_ring,
890 				GFP_KERNEL);
891 	if (!mdp->rx_skbuff) {
892 		dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
893 		ret = -ENOMEM;
894 		return ret;
895 	}
896 
897 	mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * mdp->num_tx_ring,
898 				GFP_KERNEL);
899 	if (!mdp->tx_skbuff) {
900 		dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
901 		ret = -ENOMEM;
902 		goto skb_ring_free;
903 	}
904 
905 	/* Allocate all Rx descriptors. */
906 	rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
907 	mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
908 			GFP_KERNEL);
909 
910 	if (!mdp->rx_ring) {
911 		dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
912 			rx_ringsize);
913 		ret = -ENOMEM;
914 		goto desc_ring_free;
915 	}
916 
917 	mdp->dirty_rx = 0;
918 
919 	/* Allocate all Tx descriptors. */
920 	tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
921 	mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
922 			GFP_KERNEL);
923 	if (!mdp->tx_ring) {
924 		dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
925 			tx_ringsize);
926 		ret = -ENOMEM;
927 		goto desc_ring_free;
928 	}
929 	return ret;
930 
931 desc_ring_free:
932 	/* free DMA buffer */
933 	dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
934 
935 skb_ring_free:
936 	/* Free Rx and Tx skb ring buffer */
937 	sh_eth_ring_free(ndev);
938 	mdp->tx_ring = NULL;
939 	mdp->rx_ring = NULL;
940 
941 	return ret;
942 }
943 
944 static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
945 {
946 	int ringsize;
947 
948 	if (mdp->rx_ring) {
949 		ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
950 		dma_free_coherent(NULL, ringsize, mdp->rx_ring,
951 				  mdp->rx_desc_dma);
952 		mdp->rx_ring = NULL;
953 	}
954 
955 	if (mdp->tx_ring) {
956 		ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
957 		dma_free_coherent(NULL, ringsize, mdp->tx_ring,
958 				  mdp->tx_desc_dma);
959 		mdp->tx_ring = NULL;
960 	}
961 }
962 
963 static int sh_eth_dev_init(struct net_device *ndev, bool start)
964 {
965 	int ret = 0;
966 	struct sh_eth_private *mdp = netdev_priv(ndev);
967 	u32 val;
968 
969 	/* Soft Reset */
970 	ret = sh_eth_reset(ndev);
971 	if (ret)
972 		goto out;
973 
974 	/* Descriptor format */
975 	sh_eth_ring_format(ndev);
976 	if (mdp->cd->rpadir)
977 		sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
978 
979 	/* all sh_eth int mask */
980 	sh_eth_write(ndev, 0, EESIPR);
981 
982 #if defined(__LITTLE_ENDIAN)
983 	if (mdp->cd->hw_swap)
984 		sh_eth_write(ndev, EDMR_EL, EDMR);
985 	else
986 #endif
987 		sh_eth_write(ndev, 0, EDMR);
988 
989 	/* FIFO size set */
990 	sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
991 	sh_eth_write(ndev, 0, TFTR);
992 
993 	/* Frame recv control */
994 	sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
995 
996 	sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
997 
998 	if (mdp->cd->bculr)
999 		sh_eth_write(ndev, 0x800, BCULR);	/* Burst sycle set */
1000 
1001 	sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1002 
1003 	if (!mdp->cd->no_trimd)
1004 		sh_eth_write(ndev, 0, TRIMD);
1005 
1006 	/* Recv frame limit set register */
1007 	sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1008 		     RFLR);
1009 
1010 	sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1011 	if (start)
1012 		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1013 
1014 	/* PAUSE Prohibition */
1015 	val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
1016 		ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1017 
1018 	sh_eth_write(ndev, val, ECMR);
1019 
1020 	if (mdp->cd->set_rate)
1021 		mdp->cd->set_rate(ndev);
1022 
1023 	/* E-MAC Status Register clear */
1024 	sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1025 
1026 	/* E-MAC Interrupt Enable register */
1027 	if (start)
1028 		sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1029 
1030 	/* Set MAC address */
1031 	update_mac_address(ndev);
1032 
1033 	/* mask reset */
1034 	if (mdp->cd->apr)
1035 		sh_eth_write(ndev, APR_AP, APR);
1036 	if (mdp->cd->mpr)
1037 		sh_eth_write(ndev, MPR_MP, MPR);
1038 	if (mdp->cd->tpauser)
1039 		sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1040 
1041 	if (start) {
1042 		/* Setting the Rx mode will start the Rx process. */
1043 		sh_eth_write(ndev, EDRRR_R, EDRRR);
1044 
1045 		netif_start_queue(ndev);
1046 	}
1047 
1048 out:
1049 	return ret;
1050 }
1051 
1052 /* free Tx skb function */
1053 static int sh_eth_txfree(struct net_device *ndev)
1054 {
1055 	struct sh_eth_private *mdp = netdev_priv(ndev);
1056 	struct sh_eth_txdesc *txdesc;
1057 	int freeNum = 0;
1058 	int entry = 0;
1059 
1060 	for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1061 		entry = mdp->dirty_tx % mdp->num_tx_ring;
1062 		txdesc = &mdp->tx_ring[entry];
1063 		if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1064 			break;
1065 		/* Free the original skb. */
1066 		if (mdp->tx_skbuff[entry]) {
1067 			dma_unmap_single(&ndev->dev, txdesc->addr,
1068 					 txdesc->buffer_length, DMA_TO_DEVICE);
1069 			dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1070 			mdp->tx_skbuff[entry] = NULL;
1071 			freeNum++;
1072 		}
1073 		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1074 		if (entry >= mdp->num_tx_ring - 1)
1075 			txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1076 
1077 		ndev->stats.tx_packets++;
1078 		ndev->stats.tx_bytes += txdesc->buffer_length;
1079 	}
1080 	return freeNum;
1081 }
1082 
1083 /* Packet receive function */
1084 static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1085 {
1086 	struct sh_eth_private *mdp = netdev_priv(ndev);
1087 	struct sh_eth_rxdesc *rxdesc;
1088 
1089 	int entry = mdp->cur_rx % mdp->num_rx_ring;
1090 	int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1091 	struct sk_buff *skb;
1092 	u16 pkt_len = 0;
1093 	u32 desc_status;
1094 
1095 	rxdesc = &mdp->rx_ring[entry];
1096 	while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1097 		desc_status = edmac_to_cpu(mdp, rxdesc->status);
1098 		pkt_len = rxdesc->frame_length;
1099 
1100 #if defined(CONFIG_ARCH_R8A7740)
1101 		desc_status >>= 16;
1102 #endif
1103 
1104 		if (--boguscnt < 0)
1105 			break;
1106 
1107 		if (!(desc_status & RDFEND))
1108 			ndev->stats.rx_length_errors++;
1109 
1110 		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1111 				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1112 			ndev->stats.rx_errors++;
1113 			if (desc_status & RD_RFS1)
1114 				ndev->stats.rx_crc_errors++;
1115 			if (desc_status & RD_RFS2)
1116 				ndev->stats.rx_frame_errors++;
1117 			if (desc_status & RD_RFS3)
1118 				ndev->stats.rx_length_errors++;
1119 			if (desc_status & RD_RFS4)
1120 				ndev->stats.rx_length_errors++;
1121 			if (desc_status & RD_RFS6)
1122 				ndev->stats.rx_missed_errors++;
1123 			if (desc_status & RD_RFS10)
1124 				ndev->stats.rx_over_errors++;
1125 		} else {
1126 			if (!mdp->cd->hw_swap)
1127 				sh_eth_soft_swap(
1128 					phys_to_virt(ALIGN(rxdesc->addr, 4)),
1129 					pkt_len + 2);
1130 			skb = mdp->rx_skbuff[entry];
1131 			mdp->rx_skbuff[entry] = NULL;
1132 			if (mdp->cd->rpadir)
1133 				skb_reserve(skb, NET_IP_ALIGN);
1134 			skb_put(skb, pkt_len);
1135 			skb->protocol = eth_type_trans(skb, ndev);
1136 			netif_rx(skb);
1137 			ndev->stats.rx_packets++;
1138 			ndev->stats.rx_bytes += pkt_len;
1139 		}
1140 		rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
1141 		entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1142 		rxdesc = &mdp->rx_ring[entry];
1143 	}
1144 
1145 	/* Refill the Rx ring buffers. */
1146 	for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1147 		entry = mdp->dirty_rx % mdp->num_rx_ring;
1148 		rxdesc = &mdp->rx_ring[entry];
1149 		/* The size of the buffer is 16 byte boundary. */
1150 		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1151 
1152 		if (mdp->rx_skbuff[entry] == NULL) {
1153 			skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
1154 			mdp->rx_skbuff[entry] = skb;
1155 			if (skb == NULL)
1156 				break;	/* Better luck next round. */
1157 			dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1158 					DMA_FROM_DEVICE);
1159 			sh_eth_set_receive_align(skb);
1160 
1161 			skb_checksum_none_assert(skb);
1162 			rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1163 		}
1164 		if (entry >= mdp->num_rx_ring - 1)
1165 			rxdesc->status |=
1166 				cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
1167 		else
1168 			rxdesc->status |=
1169 				cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1170 	}
1171 
1172 	/* Restart Rx engine if stopped. */
1173 	/* If we don't need to check status, don't. -KDU */
1174 	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1175 		/* fix the values for the next receiving if RDE is set */
1176 		if (intr_status & EESR_RDE)
1177 			mdp->cur_rx = mdp->dirty_rx =
1178 				(sh_eth_read(ndev, RDFAR) -
1179 				 sh_eth_read(ndev, RDLAR)) >> 4;
1180 		sh_eth_write(ndev, EDRRR_R, EDRRR);
1181 	}
1182 
1183 	return 0;
1184 }
1185 
1186 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1187 {
1188 	/* disable tx and rx */
1189 	sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1190 		~(ECMR_RE | ECMR_TE), ECMR);
1191 }
1192 
1193 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1194 {
1195 	/* enable tx and rx */
1196 	sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1197 		(ECMR_RE | ECMR_TE), ECMR);
1198 }
1199 
1200 /* error control function */
1201 static void sh_eth_error(struct net_device *ndev, int intr_status)
1202 {
1203 	struct sh_eth_private *mdp = netdev_priv(ndev);
1204 	u32 felic_stat;
1205 	u32 link_stat;
1206 	u32 mask;
1207 
1208 	if (intr_status & EESR_ECI) {
1209 		felic_stat = sh_eth_read(ndev, ECSR);
1210 		sh_eth_write(ndev, felic_stat, ECSR);	/* clear int */
1211 		if (felic_stat & ECSR_ICD)
1212 			ndev->stats.tx_carrier_errors++;
1213 		if (felic_stat & ECSR_LCHNG) {
1214 			/* Link Changed */
1215 			if (mdp->cd->no_psr || mdp->no_ether_link) {
1216 				if (mdp->link == PHY_DOWN)
1217 					link_stat = 0;
1218 				else
1219 					link_stat = PHY_ST_LINK;
1220 			} else {
1221 				link_stat = (sh_eth_read(ndev, PSR));
1222 				if (mdp->ether_link_active_low)
1223 					link_stat = ~link_stat;
1224 			}
1225 			if (!(link_stat & PHY_ST_LINK))
1226 				sh_eth_rcv_snd_disable(ndev);
1227 			else {
1228 				/* Link Up */
1229 				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1230 					  ~DMAC_M_ECI, EESIPR);
1231 				/*clear int */
1232 				sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1233 					  ECSR);
1234 				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1235 					  DMAC_M_ECI, EESIPR);
1236 				/* enable tx and rx */
1237 				sh_eth_rcv_snd_enable(ndev);
1238 			}
1239 		}
1240 	}
1241 
1242 	if (intr_status & EESR_TWB) {
1243 		/* Write buck end. unused write back interrupt */
1244 		if (intr_status & EESR_TABT)	/* Transmit Abort int */
1245 			ndev->stats.tx_aborted_errors++;
1246 			if (netif_msg_tx_err(mdp))
1247 				dev_err(&ndev->dev, "Transmit Abort\n");
1248 	}
1249 
1250 	if (intr_status & EESR_RABT) {
1251 		/* Receive Abort int */
1252 		if (intr_status & EESR_RFRMER) {
1253 			/* Receive Frame Overflow int */
1254 			ndev->stats.rx_frame_errors++;
1255 			if (netif_msg_rx_err(mdp))
1256 				dev_err(&ndev->dev, "Receive Abort\n");
1257 		}
1258 	}
1259 
1260 	if (intr_status & EESR_TDE) {
1261 		/* Transmit Descriptor Empty int */
1262 		ndev->stats.tx_fifo_errors++;
1263 		if (netif_msg_tx_err(mdp))
1264 			dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1265 	}
1266 
1267 	if (intr_status & EESR_TFE) {
1268 		/* FIFO under flow */
1269 		ndev->stats.tx_fifo_errors++;
1270 		if (netif_msg_tx_err(mdp))
1271 			dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
1272 	}
1273 
1274 	if (intr_status & EESR_RDE) {
1275 		/* Receive Descriptor Empty int */
1276 		ndev->stats.rx_over_errors++;
1277 
1278 		if (netif_msg_rx_err(mdp))
1279 			dev_err(&ndev->dev, "Receive Descriptor Empty\n");
1280 	}
1281 
1282 	if (intr_status & EESR_RFE) {
1283 		/* Receive FIFO Overflow int */
1284 		ndev->stats.rx_fifo_errors++;
1285 		if (netif_msg_rx_err(mdp))
1286 			dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1287 	}
1288 
1289 	if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1290 		/* Address Error */
1291 		ndev->stats.tx_fifo_errors++;
1292 		if (netif_msg_tx_err(mdp))
1293 			dev_err(&ndev->dev, "Address Error\n");
1294 	}
1295 
1296 	mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1297 	if (mdp->cd->no_ade)
1298 		mask &= ~EESR_ADE;
1299 	if (intr_status & mask) {
1300 		/* Tx error */
1301 		u32 edtrr = sh_eth_read(ndev, EDTRR);
1302 		/* dmesg */
1303 		dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
1304 				intr_status, mdp->cur_tx);
1305 		dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1306 				mdp->dirty_tx, (u32) ndev->state, edtrr);
1307 		/* dirty buffer free */
1308 		sh_eth_txfree(ndev);
1309 
1310 		/* SH7712 BUG */
1311 		if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1312 			/* tx dma start */
1313 			sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1314 		}
1315 		/* wakeup */
1316 		netif_wake_queue(ndev);
1317 	}
1318 }
1319 
1320 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1321 {
1322 	struct net_device *ndev = netdev;
1323 	struct sh_eth_private *mdp = netdev_priv(ndev);
1324 	struct sh_eth_cpu_data *cd = mdp->cd;
1325 	irqreturn_t ret = IRQ_NONE;
1326 	u32 intr_status = 0;
1327 
1328 	spin_lock(&mdp->lock);
1329 
1330 	/* Get interrpt stat */
1331 	intr_status = sh_eth_read(ndev, EESR);
1332 	/* Clear interrupt */
1333 	if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
1334 			EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
1335 			cd->tx_check | cd->eesr_err_check)) {
1336 		sh_eth_write(ndev, intr_status, EESR);
1337 		ret = IRQ_HANDLED;
1338 	} else
1339 		goto other_irq;
1340 
1341 	if (intr_status & (EESR_FRC | /* Frame recv*/
1342 			EESR_RMAF | /* Multi cast address recv*/
1343 			EESR_RRF  | /* Bit frame recv */
1344 			EESR_RTLF | /* Long frame recv*/
1345 			EESR_RTSF | /* short frame recv */
1346 			EESR_PRE  | /* PHY-LSI recv error */
1347 			EESR_CERF)){ /* recv frame CRC error */
1348 		sh_eth_rx(ndev, intr_status);
1349 	}
1350 
1351 	/* Tx Check */
1352 	if (intr_status & cd->tx_check) {
1353 		sh_eth_txfree(ndev);
1354 		netif_wake_queue(ndev);
1355 	}
1356 
1357 	if (intr_status & cd->eesr_err_check)
1358 		sh_eth_error(ndev, intr_status);
1359 
1360 other_irq:
1361 	spin_unlock(&mdp->lock);
1362 
1363 	return ret;
1364 }
1365 
1366 /* PHY state control function */
1367 static void sh_eth_adjust_link(struct net_device *ndev)
1368 {
1369 	struct sh_eth_private *mdp = netdev_priv(ndev);
1370 	struct phy_device *phydev = mdp->phydev;
1371 	int new_state = 0;
1372 
1373 	if (phydev->link != PHY_DOWN) {
1374 		if (phydev->duplex != mdp->duplex) {
1375 			new_state = 1;
1376 			mdp->duplex = phydev->duplex;
1377 			if (mdp->cd->set_duplex)
1378 				mdp->cd->set_duplex(ndev);
1379 		}
1380 
1381 		if (phydev->speed != mdp->speed) {
1382 			new_state = 1;
1383 			mdp->speed = phydev->speed;
1384 			if (mdp->cd->set_rate)
1385 				mdp->cd->set_rate(ndev);
1386 		}
1387 		if (mdp->link == PHY_DOWN) {
1388 			sh_eth_write(ndev,
1389 				(sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
1390 			new_state = 1;
1391 			mdp->link = phydev->link;
1392 		}
1393 	} else if (mdp->link) {
1394 		new_state = 1;
1395 		mdp->link = PHY_DOWN;
1396 		mdp->speed = 0;
1397 		mdp->duplex = -1;
1398 	}
1399 
1400 	if (new_state && netif_msg_link(mdp))
1401 		phy_print_status(phydev);
1402 }
1403 
1404 /* PHY init function */
1405 static int sh_eth_phy_init(struct net_device *ndev)
1406 {
1407 	struct sh_eth_private *mdp = netdev_priv(ndev);
1408 	char phy_id[MII_BUS_ID_SIZE + 3];
1409 	struct phy_device *phydev = NULL;
1410 
1411 	snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1412 		mdp->mii_bus->id , mdp->phy_id);
1413 
1414 	mdp->link = PHY_DOWN;
1415 	mdp->speed = 0;
1416 	mdp->duplex = -1;
1417 
1418 	/* Try connect to PHY */
1419 	phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1420 				0, mdp->phy_interface);
1421 	if (IS_ERR(phydev)) {
1422 		dev_err(&ndev->dev, "phy_connect failed\n");
1423 		return PTR_ERR(phydev);
1424 	}
1425 
1426 	dev_info(&ndev->dev, "attached phy %i to driver %s\n",
1427 		phydev->addr, phydev->drv->name);
1428 
1429 	mdp->phydev = phydev;
1430 
1431 	return 0;
1432 }
1433 
1434 /* PHY control start function */
1435 static int sh_eth_phy_start(struct net_device *ndev)
1436 {
1437 	struct sh_eth_private *mdp = netdev_priv(ndev);
1438 	int ret;
1439 
1440 	ret = sh_eth_phy_init(ndev);
1441 	if (ret)
1442 		return ret;
1443 
1444 	/* reset phy - this also wakes it from PDOWN */
1445 	phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
1446 	phy_start(mdp->phydev);
1447 
1448 	return 0;
1449 }
1450 
1451 static int sh_eth_get_settings(struct net_device *ndev,
1452 			struct ethtool_cmd *ecmd)
1453 {
1454 	struct sh_eth_private *mdp = netdev_priv(ndev);
1455 	unsigned long flags;
1456 	int ret;
1457 
1458 	spin_lock_irqsave(&mdp->lock, flags);
1459 	ret = phy_ethtool_gset(mdp->phydev, ecmd);
1460 	spin_unlock_irqrestore(&mdp->lock, flags);
1461 
1462 	return ret;
1463 }
1464 
1465 static int sh_eth_set_settings(struct net_device *ndev,
1466 		struct ethtool_cmd *ecmd)
1467 {
1468 	struct sh_eth_private *mdp = netdev_priv(ndev);
1469 	unsigned long flags;
1470 	int ret;
1471 
1472 	spin_lock_irqsave(&mdp->lock, flags);
1473 
1474 	/* disable tx and rx */
1475 	sh_eth_rcv_snd_disable(ndev);
1476 
1477 	ret = phy_ethtool_sset(mdp->phydev, ecmd);
1478 	if (ret)
1479 		goto error_exit;
1480 
1481 	if (ecmd->duplex == DUPLEX_FULL)
1482 		mdp->duplex = 1;
1483 	else
1484 		mdp->duplex = 0;
1485 
1486 	if (mdp->cd->set_duplex)
1487 		mdp->cd->set_duplex(ndev);
1488 
1489 error_exit:
1490 	mdelay(1);
1491 
1492 	/* enable tx and rx */
1493 	sh_eth_rcv_snd_enable(ndev);
1494 
1495 	spin_unlock_irqrestore(&mdp->lock, flags);
1496 
1497 	return ret;
1498 }
1499 
1500 static int sh_eth_nway_reset(struct net_device *ndev)
1501 {
1502 	struct sh_eth_private *mdp = netdev_priv(ndev);
1503 	unsigned long flags;
1504 	int ret;
1505 
1506 	spin_lock_irqsave(&mdp->lock, flags);
1507 	ret = phy_start_aneg(mdp->phydev);
1508 	spin_unlock_irqrestore(&mdp->lock, flags);
1509 
1510 	return ret;
1511 }
1512 
1513 static u32 sh_eth_get_msglevel(struct net_device *ndev)
1514 {
1515 	struct sh_eth_private *mdp = netdev_priv(ndev);
1516 	return mdp->msg_enable;
1517 }
1518 
1519 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1520 {
1521 	struct sh_eth_private *mdp = netdev_priv(ndev);
1522 	mdp->msg_enable = value;
1523 }
1524 
1525 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1526 	"rx_current", "tx_current",
1527 	"rx_dirty", "tx_dirty",
1528 };
1529 #define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
1530 
1531 static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1532 {
1533 	switch (sset) {
1534 	case ETH_SS_STATS:
1535 		return SH_ETH_STATS_LEN;
1536 	default:
1537 		return -EOPNOTSUPP;
1538 	}
1539 }
1540 
1541 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1542 			struct ethtool_stats *stats, u64 *data)
1543 {
1544 	struct sh_eth_private *mdp = netdev_priv(ndev);
1545 	int i = 0;
1546 
1547 	/* device-specific stats */
1548 	data[i++] = mdp->cur_rx;
1549 	data[i++] = mdp->cur_tx;
1550 	data[i++] = mdp->dirty_rx;
1551 	data[i++] = mdp->dirty_tx;
1552 }
1553 
1554 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1555 {
1556 	switch (stringset) {
1557 	case ETH_SS_STATS:
1558 		memcpy(data, *sh_eth_gstrings_stats,
1559 					sizeof(sh_eth_gstrings_stats));
1560 		break;
1561 	}
1562 }
1563 
1564 static void sh_eth_get_ringparam(struct net_device *ndev,
1565 				 struct ethtool_ringparam *ring)
1566 {
1567 	struct sh_eth_private *mdp = netdev_priv(ndev);
1568 
1569 	ring->rx_max_pending = RX_RING_MAX;
1570 	ring->tx_max_pending = TX_RING_MAX;
1571 	ring->rx_pending = mdp->num_rx_ring;
1572 	ring->tx_pending = mdp->num_tx_ring;
1573 }
1574 
1575 static int sh_eth_set_ringparam(struct net_device *ndev,
1576 				struct ethtool_ringparam *ring)
1577 {
1578 	struct sh_eth_private *mdp = netdev_priv(ndev);
1579 	int ret;
1580 
1581 	if (ring->tx_pending > TX_RING_MAX ||
1582 	    ring->rx_pending > RX_RING_MAX ||
1583 	    ring->tx_pending < TX_RING_MIN ||
1584 	    ring->rx_pending < RX_RING_MIN)
1585 		return -EINVAL;
1586 	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1587 		return -EINVAL;
1588 
1589 	if (netif_running(ndev)) {
1590 		netif_tx_disable(ndev);
1591 		/* Disable interrupts by clearing the interrupt mask. */
1592 		sh_eth_write(ndev, 0x0000, EESIPR);
1593 		/* Stop the chip's Tx and Rx processes. */
1594 		sh_eth_write(ndev, 0, EDTRR);
1595 		sh_eth_write(ndev, 0, EDRRR);
1596 		synchronize_irq(ndev->irq);
1597 	}
1598 
1599 	/* Free all the skbuffs in the Rx queue. */
1600 	sh_eth_ring_free(ndev);
1601 	/* Free DMA buffer */
1602 	sh_eth_free_dma_buffer(mdp);
1603 
1604 	/* Set new parameters */
1605 	mdp->num_rx_ring = ring->rx_pending;
1606 	mdp->num_tx_ring = ring->tx_pending;
1607 
1608 	ret = sh_eth_ring_init(ndev);
1609 	if (ret < 0) {
1610 		dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
1611 		return ret;
1612 	}
1613 	ret = sh_eth_dev_init(ndev, false);
1614 	if (ret < 0) {
1615 		dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
1616 		return ret;
1617 	}
1618 
1619 	if (netif_running(ndev)) {
1620 		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1621 		/* Setting the Rx mode will start the Rx process. */
1622 		sh_eth_write(ndev, EDRRR_R, EDRRR);
1623 		netif_wake_queue(ndev);
1624 	}
1625 
1626 	return 0;
1627 }
1628 
1629 static const struct ethtool_ops sh_eth_ethtool_ops = {
1630 	.get_settings	= sh_eth_get_settings,
1631 	.set_settings	= sh_eth_set_settings,
1632 	.nway_reset	= sh_eth_nway_reset,
1633 	.get_msglevel	= sh_eth_get_msglevel,
1634 	.set_msglevel	= sh_eth_set_msglevel,
1635 	.get_link	= ethtool_op_get_link,
1636 	.get_strings	= sh_eth_get_strings,
1637 	.get_ethtool_stats  = sh_eth_get_ethtool_stats,
1638 	.get_sset_count     = sh_eth_get_sset_count,
1639 	.get_ringparam	= sh_eth_get_ringparam,
1640 	.set_ringparam	= sh_eth_set_ringparam,
1641 };
1642 
1643 /* network device open function */
1644 static int sh_eth_open(struct net_device *ndev)
1645 {
1646 	int ret = 0;
1647 	struct sh_eth_private *mdp = netdev_priv(ndev);
1648 
1649 	pm_runtime_get_sync(&mdp->pdev->dev);
1650 
1651 	ret = request_irq(ndev->irq, sh_eth_interrupt,
1652 #if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
1653 	defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1654 	defined(CONFIG_CPU_SUBTYPE_SH7757)
1655 				IRQF_SHARED,
1656 #else
1657 				0,
1658 #endif
1659 				ndev->name, ndev);
1660 	if (ret) {
1661 		dev_err(&ndev->dev, "Can not assign IRQ number\n");
1662 		return ret;
1663 	}
1664 
1665 	/* Descriptor set */
1666 	ret = sh_eth_ring_init(ndev);
1667 	if (ret)
1668 		goto out_free_irq;
1669 
1670 	/* device init */
1671 	ret = sh_eth_dev_init(ndev, true);
1672 	if (ret)
1673 		goto out_free_irq;
1674 
1675 	/* PHY control start*/
1676 	ret = sh_eth_phy_start(ndev);
1677 	if (ret)
1678 		goto out_free_irq;
1679 
1680 	return ret;
1681 
1682 out_free_irq:
1683 	free_irq(ndev->irq, ndev);
1684 	pm_runtime_put_sync(&mdp->pdev->dev);
1685 	return ret;
1686 }
1687 
1688 /* Timeout function */
1689 static void sh_eth_tx_timeout(struct net_device *ndev)
1690 {
1691 	struct sh_eth_private *mdp = netdev_priv(ndev);
1692 	struct sh_eth_rxdesc *rxdesc;
1693 	int i;
1694 
1695 	netif_stop_queue(ndev);
1696 
1697 	if (netif_msg_timer(mdp))
1698 		dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
1699 	       " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
1700 
1701 	/* tx_errors count up */
1702 	ndev->stats.tx_errors++;
1703 
1704 	/* Free all the skbuffs in the Rx queue. */
1705 	for (i = 0; i < mdp->num_rx_ring; i++) {
1706 		rxdesc = &mdp->rx_ring[i];
1707 		rxdesc->status = 0;
1708 		rxdesc->addr = 0xBADF00D0;
1709 		if (mdp->rx_skbuff[i])
1710 			dev_kfree_skb(mdp->rx_skbuff[i]);
1711 		mdp->rx_skbuff[i] = NULL;
1712 	}
1713 	for (i = 0; i < mdp->num_tx_ring; i++) {
1714 		if (mdp->tx_skbuff[i])
1715 			dev_kfree_skb(mdp->tx_skbuff[i]);
1716 		mdp->tx_skbuff[i] = NULL;
1717 	}
1718 
1719 	/* device init */
1720 	sh_eth_dev_init(ndev, true);
1721 }
1722 
1723 /* Packet transmit function */
1724 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1725 {
1726 	struct sh_eth_private *mdp = netdev_priv(ndev);
1727 	struct sh_eth_txdesc *txdesc;
1728 	u32 entry;
1729 	unsigned long flags;
1730 
1731 	spin_lock_irqsave(&mdp->lock, flags);
1732 	if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
1733 		if (!sh_eth_txfree(ndev)) {
1734 			if (netif_msg_tx_queued(mdp))
1735 				dev_warn(&ndev->dev, "TxFD exhausted.\n");
1736 			netif_stop_queue(ndev);
1737 			spin_unlock_irqrestore(&mdp->lock, flags);
1738 			return NETDEV_TX_BUSY;
1739 		}
1740 	}
1741 	spin_unlock_irqrestore(&mdp->lock, flags);
1742 
1743 	entry = mdp->cur_tx % mdp->num_tx_ring;
1744 	mdp->tx_skbuff[entry] = skb;
1745 	txdesc = &mdp->tx_ring[entry];
1746 	/* soft swap. */
1747 	if (!mdp->cd->hw_swap)
1748 		sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
1749 				 skb->len + 2);
1750 	txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
1751 				      DMA_TO_DEVICE);
1752 	if (skb->len < ETHERSMALL)
1753 		txdesc->buffer_length = ETHERSMALL;
1754 	else
1755 		txdesc->buffer_length = skb->len;
1756 
1757 	if (entry >= mdp->num_tx_ring - 1)
1758 		txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
1759 	else
1760 		txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
1761 
1762 	mdp->cur_tx++;
1763 
1764 	if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
1765 		sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1766 
1767 	return NETDEV_TX_OK;
1768 }
1769 
1770 /* device close function */
1771 static int sh_eth_close(struct net_device *ndev)
1772 {
1773 	struct sh_eth_private *mdp = netdev_priv(ndev);
1774 
1775 	netif_stop_queue(ndev);
1776 
1777 	/* Disable interrupts by clearing the interrupt mask. */
1778 	sh_eth_write(ndev, 0x0000, EESIPR);
1779 
1780 	/* Stop the chip's Tx and Rx processes. */
1781 	sh_eth_write(ndev, 0, EDTRR);
1782 	sh_eth_write(ndev, 0, EDRRR);
1783 
1784 	/* PHY Disconnect */
1785 	if (mdp->phydev) {
1786 		phy_stop(mdp->phydev);
1787 		phy_disconnect(mdp->phydev);
1788 	}
1789 
1790 	free_irq(ndev->irq, ndev);
1791 
1792 	/* Free all the skbuffs in the Rx queue. */
1793 	sh_eth_ring_free(ndev);
1794 
1795 	/* free DMA buffer */
1796 	sh_eth_free_dma_buffer(mdp);
1797 
1798 	pm_runtime_put_sync(&mdp->pdev->dev);
1799 
1800 	return 0;
1801 }
1802 
1803 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1804 {
1805 	struct sh_eth_private *mdp = netdev_priv(ndev);
1806 
1807 	pm_runtime_get_sync(&mdp->pdev->dev);
1808 
1809 	ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
1810 	sh_eth_write(ndev, 0, TROCR);	/* (write clear) */
1811 	ndev->stats.collisions += sh_eth_read(ndev, CDCR);
1812 	sh_eth_write(ndev, 0, CDCR);	/* (write clear) */
1813 	ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
1814 	sh_eth_write(ndev, 0, LCCR);	/* (write clear) */
1815 	if (sh_eth_is_gether(mdp)) {
1816 		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
1817 		sh_eth_write(ndev, 0, CERCR);	/* (write clear) */
1818 		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
1819 		sh_eth_write(ndev, 0, CEECR);	/* (write clear) */
1820 	} else {
1821 		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
1822 		sh_eth_write(ndev, 0, CNDCR);	/* (write clear) */
1823 	}
1824 	pm_runtime_put_sync(&mdp->pdev->dev);
1825 
1826 	return &ndev->stats;
1827 }
1828 
1829 /* ioctl to device function */
1830 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
1831 				int cmd)
1832 {
1833 	struct sh_eth_private *mdp = netdev_priv(ndev);
1834 	struct phy_device *phydev = mdp->phydev;
1835 
1836 	if (!netif_running(ndev))
1837 		return -EINVAL;
1838 
1839 	if (!phydev)
1840 		return -ENODEV;
1841 
1842 	return phy_mii_ioctl(phydev, rq, cmd);
1843 }
1844 
1845 #if defined(SH_ETH_HAS_TSU)
1846 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
1847 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
1848 					    int entry)
1849 {
1850 	return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
1851 }
1852 
1853 static u32 sh_eth_tsu_get_post_mask(int entry)
1854 {
1855 	return 0x0f << (28 - ((entry % 8) * 4));
1856 }
1857 
1858 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
1859 {
1860 	return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
1861 }
1862 
1863 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
1864 					     int entry)
1865 {
1866 	struct sh_eth_private *mdp = netdev_priv(ndev);
1867 	u32 tmp;
1868 	void *reg_offset;
1869 
1870 	reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
1871 	tmp = ioread32(reg_offset);
1872 	iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
1873 }
1874 
1875 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
1876 					      int entry)
1877 {
1878 	struct sh_eth_private *mdp = netdev_priv(ndev);
1879 	u32 post_mask, ref_mask, tmp;
1880 	void *reg_offset;
1881 
1882 	reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
1883 	post_mask = sh_eth_tsu_get_post_mask(entry);
1884 	ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
1885 
1886 	tmp = ioread32(reg_offset);
1887 	iowrite32(tmp & ~post_mask, reg_offset);
1888 
1889 	/* If other port enables, the function returns "true" */
1890 	return tmp & ref_mask;
1891 }
1892 
1893 static int sh_eth_tsu_busy(struct net_device *ndev)
1894 {
1895 	int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
1896 	struct sh_eth_private *mdp = netdev_priv(ndev);
1897 
1898 	while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
1899 		udelay(10);
1900 		timeout--;
1901 		if (timeout <= 0) {
1902 			dev_err(&ndev->dev, "%s: timeout\n", __func__);
1903 			return -ETIMEDOUT;
1904 		}
1905 	}
1906 
1907 	return 0;
1908 }
1909 
1910 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
1911 				  const u8 *addr)
1912 {
1913 	u32 val;
1914 
1915 	val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
1916 	iowrite32(val, reg);
1917 	if (sh_eth_tsu_busy(ndev) < 0)
1918 		return -EBUSY;
1919 
1920 	val = addr[4] << 8 | addr[5];
1921 	iowrite32(val, reg + 4);
1922 	if (sh_eth_tsu_busy(ndev) < 0)
1923 		return -EBUSY;
1924 
1925 	return 0;
1926 }
1927 
1928 static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
1929 {
1930 	u32 val;
1931 
1932 	val = ioread32(reg);
1933 	addr[0] = (val >> 24) & 0xff;
1934 	addr[1] = (val >> 16) & 0xff;
1935 	addr[2] = (val >> 8) & 0xff;
1936 	addr[3] = val & 0xff;
1937 	val = ioread32(reg + 4);
1938 	addr[4] = (val >> 8) & 0xff;
1939 	addr[5] = val & 0xff;
1940 }
1941 
1942 
1943 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
1944 {
1945 	struct sh_eth_private *mdp = netdev_priv(ndev);
1946 	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
1947 	int i;
1948 	u8 c_addr[ETH_ALEN];
1949 
1950 	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
1951 		sh_eth_tsu_read_entry(reg_offset, c_addr);
1952 		if (memcmp(addr, c_addr, ETH_ALEN) == 0)
1953 			return i;
1954 	}
1955 
1956 	return -ENOENT;
1957 }
1958 
1959 static int sh_eth_tsu_find_empty(struct net_device *ndev)
1960 {
1961 	u8 blank[ETH_ALEN];
1962 	int entry;
1963 
1964 	memset(blank, 0, sizeof(blank));
1965 	entry = sh_eth_tsu_find_entry(ndev, blank);
1966 	return (entry < 0) ? -ENOMEM : entry;
1967 }
1968 
1969 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
1970 					      int entry)
1971 {
1972 	struct sh_eth_private *mdp = netdev_priv(ndev);
1973 	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
1974 	int ret;
1975 	u8 blank[ETH_ALEN];
1976 
1977 	sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
1978 			 ~(1 << (31 - entry)), TSU_TEN);
1979 
1980 	memset(blank, 0, sizeof(blank));
1981 	ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
1982 	if (ret < 0)
1983 		return ret;
1984 	return 0;
1985 }
1986 
1987 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
1988 {
1989 	struct sh_eth_private *mdp = netdev_priv(ndev);
1990 	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
1991 	int i, ret;
1992 
1993 	if (!mdp->cd->tsu)
1994 		return 0;
1995 
1996 	i = sh_eth_tsu_find_entry(ndev, addr);
1997 	if (i < 0) {
1998 		/* No entry found, create one */
1999 		i = sh_eth_tsu_find_empty(ndev);
2000 		if (i < 0)
2001 			return -ENOMEM;
2002 		ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2003 		if (ret < 0)
2004 			return ret;
2005 
2006 		/* Enable the entry */
2007 		sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2008 				 (1 << (31 - i)), TSU_TEN);
2009 	}
2010 
2011 	/* Entry found or created, enable POST */
2012 	sh_eth_tsu_enable_cam_entry_post(ndev, i);
2013 
2014 	return 0;
2015 }
2016 
2017 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2018 {
2019 	struct sh_eth_private *mdp = netdev_priv(ndev);
2020 	int i, ret;
2021 
2022 	if (!mdp->cd->tsu)
2023 		return 0;
2024 
2025 	i = sh_eth_tsu_find_entry(ndev, addr);
2026 	if (i) {
2027 		/* Entry found */
2028 		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2029 			goto done;
2030 
2031 		/* Disable the entry if both ports was disabled */
2032 		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2033 		if (ret < 0)
2034 			return ret;
2035 	}
2036 done:
2037 	return 0;
2038 }
2039 
2040 static int sh_eth_tsu_purge_all(struct net_device *ndev)
2041 {
2042 	struct sh_eth_private *mdp = netdev_priv(ndev);
2043 	int i, ret;
2044 
2045 	if (unlikely(!mdp->cd->tsu))
2046 		return 0;
2047 
2048 	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2049 		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2050 			continue;
2051 
2052 		/* Disable the entry if both ports was disabled */
2053 		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2054 		if (ret < 0)
2055 			return ret;
2056 	}
2057 
2058 	return 0;
2059 }
2060 
2061 static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2062 {
2063 	struct sh_eth_private *mdp = netdev_priv(ndev);
2064 	u8 addr[ETH_ALEN];
2065 	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2066 	int i;
2067 
2068 	if (unlikely(!mdp->cd->tsu))
2069 		return;
2070 
2071 	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2072 		sh_eth_tsu_read_entry(reg_offset, addr);
2073 		if (is_multicast_ether_addr(addr))
2074 			sh_eth_tsu_del_entry(ndev, addr);
2075 	}
2076 }
2077 
2078 /* Multicast reception directions set */
2079 static void sh_eth_set_multicast_list(struct net_device *ndev)
2080 {
2081 	struct sh_eth_private *mdp = netdev_priv(ndev);
2082 	u32 ecmr_bits;
2083 	int mcast_all = 0;
2084 	unsigned long flags;
2085 
2086 	spin_lock_irqsave(&mdp->lock, flags);
2087 	/*
2088 	 * Initial condition is MCT = 1, PRM = 0.
2089 	 * Depending on ndev->flags, set PRM or clear MCT
2090 	 */
2091 	ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
2092 
2093 	if (!(ndev->flags & IFF_MULTICAST)) {
2094 		sh_eth_tsu_purge_mcast(ndev);
2095 		mcast_all = 1;
2096 	}
2097 	if (ndev->flags & IFF_ALLMULTI) {
2098 		sh_eth_tsu_purge_mcast(ndev);
2099 		ecmr_bits &= ~ECMR_MCT;
2100 		mcast_all = 1;
2101 	}
2102 
2103 	if (ndev->flags & IFF_PROMISC) {
2104 		sh_eth_tsu_purge_all(ndev);
2105 		ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2106 	} else if (mdp->cd->tsu) {
2107 		struct netdev_hw_addr *ha;
2108 		netdev_for_each_mc_addr(ha, ndev) {
2109 			if (mcast_all && is_multicast_ether_addr(ha->addr))
2110 				continue;
2111 
2112 			if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2113 				if (!mcast_all) {
2114 					sh_eth_tsu_purge_mcast(ndev);
2115 					ecmr_bits &= ~ECMR_MCT;
2116 					mcast_all = 1;
2117 				}
2118 			}
2119 		}
2120 	} else {
2121 		/* Normal, unicast/broadcast-only mode. */
2122 		ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
2123 	}
2124 
2125 	/* update the ethernet mode */
2126 	sh_eth_write(ndev, ecmr_bits, ECMR);
2127 
2128 	spin_unlock_irqrestore(&mdp->lock, flags);
2129 }
2130 
2131 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2132 {
2133 	if (!mdp->port)
2134 		return TSU_VTAG0;
2135 	else
2136 		return TSU_VTAG1;
2137 }
2138 
2139 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2140 {
2141 	struct sh_eth_private *mdp = netdev_priv(ndev);
2142 	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2143 
2144 	if (unlikely(!mdp->cd->tsu))
2145 		return -EPERM;
2146 
2147 	/* No filtering if vid = 0 */
2148 	if (!vid)
2149 		return 0;
2150 
2151 	mdp->vlan_num_ids++;
2152 
2153 	/*
2154 	 * The controller has one VLAN tag HW filter. So, if the filter is
2155 	 * already enabled, the driver disables it and the filte
2156 	 */
2157 	if (mdp->vlan_num_ids > 1) {
2158 		/* disable VLAN filter */
2159 		sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2160 		return 0;
2161 	}
2162 
2163 	sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2164 			 vtag_reg_index);
2165 
2166 	return 0;
2167 }
2168 
2169 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2170 {
2171 	struct sh_eth_private *mdp = netdev_priv(ndev);
2172 	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2173 
2174 	if (unlikely(!mdp->cd->tsu))
2175 		return -EPERM;
2176 
2177 	/* No filtering if vid = 0 */
2178 	if (!vid)
2179 		return 0;
2180 
2181 	mdp->vlan_num_ids--;
2182 	sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2183 
2184 	return 0;
2185 }
2186 #endif /* SH_ETH_HAS_TSU */
2187 
2188 /* SuperH's TSU register init function */
2189 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2190 {
2191 	sh_eth_tsu_write(mdp, 0, TSU_FWEN0);	/* Disable forward(0->1) */
2192 	sh_eth_tsu_write(mdp, 0, TSU_FWEN1);	/* Disable forward(1->0) */
2193 	sh_eth_tsu_write(mdp, 0, TSU_FCM);	/* forward fifo 3k-3k */
2194 	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2195 	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2196 	sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2197 	sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2198 	sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2199 	sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2200 	sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2201 	if (sh_eth_is_gether(mdp)) {
2202 		sh_eth_tsu_write(mdp, 0, TSU_QTAG0);	/* Disable QTAG(0->1) */
2203 		sh_eth_tsu_write(mdp, 0, TSU_QTAG1);	/* Disable QTAG(1->0) */
2204 	} else {
2205 		sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);	/* Disable QTAG(0->1) */
2206 		sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);	/* Disable QTAG(1->0) */
2207 	}
2208 	sh_eth_tsu_write(mdp, 0, TSU_FWSR);	/* all interrupt status clear */
2209 	sh_eth_tsu_write(mdp, 0, TSU_FWINMK);	/* Disable all interrupt */
2210 	sh_eth_tsu_write(mdp, 0, TSU_TEN);	/* Disable all CAM entry */
2211 	sh_eth_tsu_write(mdp, 0, TSU_POST1);	/* Disable CAM entry [ 0- 7] */
2212 	sh_eth_tsu_write(mdp, 0, TSU_POST2);	/* Disable CAM entry [ 8-15] */
2213 	sh_eth_tsu_write(mdp, 0, TSU_POST3);	/* Disable CAM entry [16-23] */
2214 	sh_eth_tsu_write(mdp, 0, TSU_POST4);	/* Disable CAM entry [24-31] */
2215 }
2216 
2217 /* MDIO bus release function */
2218 static int sh_mdio_release(struct net_device *ndev)
2219 {
2220 	struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
2221 
2222 	/* unregister mdio bus */
2223 	mdiobus_unregister(bus);
2224 
2225 	/* remove mdio bus info from net_device */
2226 	dev_set_drvdata(&ndev->dev, NULL);
2227 
2228 	/* free interrupts memory */
2229 	kfree(bus->irq);
2230 
2231 	/* free bitbang info */
2232 	free_mdio_bitbang(bus);
2233 
2234 	return 0;
2235 }
2236 
2237 /* MDIO bus init function */
2238 static int sh_mdio_init(struct net_device *ndev, int id,
2239 			struct sh_eth_plat_data *pd)
2240 {
2241 	int ret, i;
2242 	struct bb_info *bitbang;
2243 	struct sh_eth_private *mdp = netdev_priv(ndev);
2244 
2245 	/* create bit control struct for PHY */
2246 	bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
2247 	if (!bitbang) {
2248 		ret = -ENOMEM;
2249 		goto out;
2250 	}
2251 
2252 	/* bitbang init */
2253 	bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2254 	bitbang->set_gate = pd->set_mdio_gate;
2255 	bitbang->mdi_msk = 0x08;
2256 	bitbang->mdo_msk = 0x04;
2257 	bitbang->mmd_msk = 0x02;/* MMD */
2258 	bitbang->mdc_msk = 0x01;
2259 	bitbang->ctrl.ops = &bb_ops;
2260 
2261 	/* MII controller setting */
2262 	mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2263 	if (!mdp->mii_bus) {
2264 		ret = -ENOMEM;
2265 		goto out_free_bitbang;
2266 	}
2267 
2268 	/* Hook up MII support for ethtool */
2269 	mdp->mii_bus->name = "sh_mii";
2270 	mdp->mii_bus->parent = &ndev->dev;
2271 	snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2272 		mdp->pdev->name, id);
2273 
2274 	/* PHY IRQ */
2275 	mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
2276 	if (!mdp->mii_bus->irq) {
2277 		ret = -ENOMEM;
2278 		goto out_free_bus;
2279 	}
2280 
2281 	for (i = 0; i < PHY_MAX_ADDR; i++)
2282 		mdp->mii_bus->irq[i] = PHY_POLL;
2283 
2284 	/* regist mdio bus */
2285 	ret = mdiobus_register(mdp->mii_bus);
2286 	if (ret)
2287 		goto out_free_irq;
2288 
2289 	dev_set_drvdata(&ndev->dev, mdp->mii_bus);
2290 
2291 	return 0;
2292 
2293 out_free_irq:
2294 	kfree(mdp->mii_bus->irq);
2295 
2296 out_free_bus:
2297 	free_mdio_bitbang(mdp->mii_bus);
2298 
2299 out_free_bitbang:
2300 	kfree(bitbang);
2301 
2302 out:
2303 	return ret;
2304 }
2305 
2306 static const u16 *sh_eth_get_register_offset(int register_type)
2307 {
2308 	const u16 *reg_offset = NULL;
2309 
2310 	switch (register_type) {
2311 	case SH_ETH_REG_GIGABIT:
2312 		reg_offset = sh_eth_offset_gigabit;
2313 		break;
2314 	case SH_ETH_REG_FAST_SH4:
2315 		reg_offset = sh_eth_offset_fast_sh4;
2316 		break;
2317 	case SH_ETH_REG_FAST_SH3_SH2:
2318 		reg_offset = sh_eth_offset_fast_sh3_sh2;
2319 		break;
2320 	default:
2321 		printk(KERN_ERR "Unknown register type (%d)\n", register_type);
2322 		break;
2323 	}
2324 
2325 	return reg_offset;
2326 }
2327 
2328 static const struct net_device_ops sh_eth_netdev_ops = {
2329 	.ndo_open		= sh_eth_open,
2330 	.ndo_stop		= sh_eth_close,
2331 	.ndo_start_xmit		= sh_eth_start_xmit,
2332 	.ndo_get_stats		= sh_eth_get_stats,
2333 #if defined(SH_ETH_HAS_TSU)
2334 	.ndo_set_rx_mode	= sh_eth_set_multicast_list,
2335 	.ndo_vlan_rx_add_vid	= sh_eth_vlan_rx_add_vid,
2336 	.ndo_vlan_rx_kill_vid	= sh_eth_vlan_rx_kill_vid,
2337 #endif
2338 	.ndo_tx_timeout		= sh_eth_tx_timeout,
2339 	.ndo_do_ioctl		= sh_eth_do_ioctl,
2340 	.ndo_validate_addr	= eth_validate_addr,
2341 	.ndo_set_mac_address	= eth_mac_addr,
2342 	.ndo_change_mtu		= eth_change_mtu,
2343 };
2344 
2345 static int sh_eth_drv_probe(struct platform_device *pdev)
2346 {
2347 	int ret, devno = 0;
2348 	struct resource *res;
2349 	struct net_device *ndev = NULL;
2350 	struct sh_eth_private *mdp = NULL;
2351 	struct sh_eth_plat_data *pd;
2352 
2353 	/* get base addr */
2354 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2355 	if (unlikely(res == NULL)) {
2356 		dev_err(&pdev->dev, "invalid resource\n");
2357 		ret = -EINVAL;
2358 		goto out;
2359 	}
2360 
2361 	ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2362 	if (!ndev) {
2363 		ret = -ENOMEM;
2364 		goto out;
2365 	}
2366 
2367 	/* The sh Ether-specific entries in the device structure. */
2368 	ndev->base_addr = res->start;
2369 	devno = pdev->id;
2370 	if (devno < 0)
2371 		devno = 0;
2372 
2373 	ndev->dma = -1;
2374 	ret = platform_get_irq(pdev, 0);
2375 	if (ret < 0) {
2376 		ret = -ENODEV;
2377 		goto out_release;
2378 	}
2379 	ndev->irq = ret;
2380 
2381 	SET_NETDEV_DEV(ndev, &pdev->dev);
2382 
2383 	/* Fill in the fields of the device structure with ethernet values. */
2384 	ether_setup(ndev);
2385 
2386 	mdp = netdev_priv(ndev);
2387 	mdp->num_tx_ring = TX_RING_SIZE;
2388 	mdp->num_rx_ring = RX_RING_SIZE;
2389 	mdp->addr = ioremap(res->start, resource_size(res));
2390 	if (mdp->addr == NULL) {
2391 		ret = -ENOMEM;
2392 		dev_err(&pdev->dev, "ioremap failed.\n");
2393 		goto out_release;
2394 	}
2395 
2396 	spin_lock_init(&mdp->lock);
2397 	mdp->pdev = pdev;
2398 	pm_runtime_enable(&pdev->dev);
2399 	pm_runtime_resume(&pdev->dev);
2400 
2401 	pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
2402 	/* get PHY ID */
2403 	mdp->phy_id = pd->phy;
2404 	mdp->phy_interface = pd->phy_interface;
2405 	/* EDMAC endian */
2406 	mdp->edmac_endian = pd->edmac_endian;
2407 	mdp->no_ether_link = pd->no_ether_link;
2408 	mdp->ether_link_active_low = pd->ether_link_active_low;
2409 	mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
2410 
2411 	/* set cpu data */
2412 #if defined(SH_ETH_HAS_BOTH_MODULES)
2413 	mdp->cd = sh_eth_get_cpu_data(mdp);
2414 #else
2415 	mdp->cd = &sh_eth_my_cpu_data;
2416 #endif
2417 	sh_eth_set_default_cpu_data(mdp->cd);
2418 
2419 	/* set function */
2420 	ndev->netdev_ops = &sh_eth_netdev_ops;
2421 	SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
2422 	ndev->watchdog_timeo = TX_TIMEOUT;
2423 
2424 	/* debug message level */
2425 	mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
2426 
2427 	/* read and set MAC address */
2428 	read_mac_address(ndev, pd->mac_addr);
2429 
2430 	/* ioremap the TSU registers */
2431 	if (mdp->cd->tsu) {
2432 		struct resource *rtsu;
2433 		rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2434 		if (!rtsu) {
2435 			dev_err(&pdev->dev, "Not found TSU resource\n");
2436 			goto out_release;
2437 		}
2438 		mdp->tsu_addr = ioremap(rtsu->start,
2439 					resource_size(rtsu));
2440 		mdp->port = devno % 2;
2441 		ndev->features = NETIF_F_HW_VLAN_FILTER;
2442 	}
2443 
2444 	/* initialize first or needed device */
2445 	if (!devno || pd->needs_init) {
2446 		if (mdp->cd->chip_reset)
2447 			mdp->cd->chip_reset(ndev);
2448 
2449 		if (mdp->cd->tsu) {
2450 			/* TSU init (Init only)*/
2451 			sh_eth_tsu_init(mdp);
2452 		}
2453 	}
2454 
2455 	/* network device register */
2456 	ret = register_netdev(ndev);
2457 	if (ret)
2458 		goto out_release;
2459 
2460 	/* mdio bus init */
2461 	ret = sh_mdio_init(ndev, pdev->id, pd);
2462 	if (ret)
2463 		goto out_unregister;
2464 
2465 	/* print device information */
2466 	pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
2467 	       (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2468 
2469 	platform_set_drvdata(pdev, ndev);
2470 
2471 	return ret;
2472 
2473 out_unregister:
2474 	unregister_netdev(ndev);
2475 
2476 out_release:
2477 	/* net_dev free */
2478 	if (mdp && mdp->addr)
2479 		iounmap(mdp->addr);
2480 	if (mdp && mdp->tsu_addr)
2481 		iounmap(mdp->tsu_addr);
2482 	if (ndev)
2483 		free_netdev(ndev);
2484 
2485 out:
2486 	return ret;
2487 }
2488 
2489 static int sh_eth_drv_remove(struct platform_device *pdev)
2490 {
2491 	struct net_device *ndev = platform_get_drvdata(pdev);
2492 	struct sh_eth_private *mdp = netdev_priv(ndev);
2493 
2494 	if (mdp->cd->tsu)
2495 		iounmap(mdp->tsu_addr);
2496 	sh_mdio_release(ndev);
2497 	unregister_netdev(ndev);
2498 	pm_runtime_disable(&pdev->dev);
2499 	iounmap(mdp->addr);
2500 	free_netdev(ndev);
2501 	platform_set_drvdata(pdev, NULL);
2502 
2503 	return 0;
2504 }
2505 
2506 static int sh_eth_runtime_nop(struct device *dev)
2507 {
2508 	/*
2509 	 * Runtime PM callback shared between ->runtime_suspend()
2510 	 * and ->runtime_resume(). Simply returns success.
2511 	 *
2512 	 * This driver re-initializes all registers after
2513 	 * pm_runtime_get_sync() anyway so there is no need
2514 	 * to save and restore registers here.
2515 	 */
2516 	return 0;
2517 }
2518 
2519 static struct dev_pm_ops sh_eth_dev_pm_ops = {
2520 	.runtime_suspend = sh_eth_runtime_nop,
2521 	.runtime_resume = sh_eth_runtime_nop,
2522 };
2523 
2524 static struct platform_driver sh_eth_driver = {
2525 	.probe = sh_eth_drv_probe,
2526 	.remove = sh_eth_drv_remove,
2527 	.driver = {
2528 		   .name = CARDNAME,
2529 		   .pm = &sh_eth_dev_pm_ops,
2530 	},
2531 };
2532 
2533 module_platform_driver(sh_eth_driver);
2534 
2535 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
2536 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
2537 MODULE_LICENSE("GPL v2");
2538