1 /*  SuperH Ethernet device driver
2  *
3  *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
4  *  Copyright (C) 2008-2014 Renesas Solutions Corp.
5  *  Copyright (C) 2013-2014 Cogent Embedded, Inc.
6  *  Copyright (C) 2014 Codethink Limited
7  *
8  *  This program is free software; you can redistribute it and/or modify it
9  *  under the terms and conditions of the GNU General Public License,
10  *  version 2, as published by the Free Software Foundation.
11  *
12  *  This program is distributed in the hope it will be useful, but WITHOUT
13  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  *  more details.
16  *
17  *  The full GNU General Public License is included in this distribution in
18  *  the file called "COPYING".
19  */
20 
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/spinlock.h>
24 #include <linux/interrupt.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/etherdevice.h>
27 #include <linux/delay.h>
28 #include <linux/platform_device.h>
29 #include <linux/mdio-bitbang.h>
30 #include <linux/netdevice.h>
31 #include <linux/of.h>
32 #include <linux/of_device.h>
33 #include <linux/of_irq.h>
34 #include <linux/of_net.h>
35 #include <linux/phy.h>
36 #include <linux/cache.h>
37 #include <linux/io.h>
38 #include <linux/pm_runtime.h>
39 #include <linux/slab.h>
40 #include <linux/ethtool.h>
41 #include <linux/if_vlan.h>
42 #include <linux/clk.h>
43 #include <linux/sh_eth.h>
44 #include <linux/of_mdio.h>
45 
46 #include "sh_eth.h"
47 
48 #define SH_ETH_DEF_MSG_ENABLE \
49 		(NETIF_MSG_LINK	| \
50 		NETIF_MSG_TIMER	| \
51 		NETIF_MSG_RX_ERR| \
52 		NETIF_MSG_TX_ERR)
53 
54 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
55 	[EDSR]		= 0x0000,
56 	[EDMR]		= 0x0400,
57 	[EDTRR]		= 0x0408,
58 	[EDRRR]		= 0x0410,
59 	[EESR]		= 0x0428,
60 	[EESIPR]	= 0x0430,
61 	[TDLAR]		= 0x0010,
62 	[TDFAR]		= 0x0014,
63 	[TDFXR]		= 0x0018,
64 	[TDFFR]		= 0x001c,
65 	[RDLAR]		= 0x0030,
66 	[RDFAR]		= 0x0034,
67 	[RDFXR]		= 0x0038,
68 	[RDFFR]		= 0x003c,
69 	[TRSCER]	= 0x0438,
70 	[RMFCR]		= 0x0440,
71 	[TFTR]		= 0x0448,
72 	[FDR]		= 0x0450,
73 	[RMCR]		= 0x0458,
74 	[RPADIR]	= 0x0460,
75 	[FCFTR]		= 0x0468,
76 	[CSMR]		= 0x04E4,
77 
78 	[ECMR]		= 0x0500,
79 	[ECSR]		= 0x0510,
80 	[ECSIPR]	= 0x0518,
81 	[PIR]		= 0x0520,
82 	[PSR]		= 0x0528,
83 	[PIPR]		= 0x052c,
84 	[RFLR]		= 0x0508,
85 	[APR]		= 0x0554,
86 	[MPR]		= 0x0558,
87 	[PFTCR]		= 0x055c,
88 	[PFRCR]		= 0x0560,
89 	[TPAUSER]	= 0x0564,
90 	[GECMR]		= 0x05b0,
91 	[BCULR]		= 0x05b4,
92 	[MAHR]		= 0x05c0,
93 	[MALR]		= 0x05c8,
94 	[TROCR]		= 0x0700,
95 	[CDCR]		= 0x0708,
96 	[LCCR]		= 0x0710,
97 	[CEFCR]		= 0x0740,
98 	[FRECR]		= 0x0748,
99 	[TSFRCR]	= 0x0750,
100 	[TLFRCR]	= 0x0758,
101 	[RFCR]		= 0x0760,
102 	[CERCR]		= 0x0768,
103 	[CEECR]		= 0x0770,
104 	[MAFCR]		= 0x0778,
105 	[RMII_MII]	= 0x0790,
106 
107 	[ARSTR]		= 0x0000,
108 	[TSU_CTRST]	= 0x0004,
109 	[TSU_FWEN0]	= 0x0010,
110 	[TSU_FWEN1]	= 0x0014,
111 	[TSU_FCM]	= 0x0018,
112 	[TSU_BSYSL0]	= 0x0020,
113 	[TSU_BSYSL1]	= 0x0024,
114 	[TSU_PRISL0]	= 0x0028,
115 	[TSU_PRISL1]	= 0x002c,
116 	[TSU_FWSL0]	= 0x0030,
117 	[TSU_FWSL1]	= 0x0034,
118 	[TSU_FWSLC]	= 0x0038,
119 	[TSU_QTAG0]	= 0x0040,
120 	[TSU_QTAG1]	= 0x0044,
121 	[TSU_FWSR]	= 0x0050,
122 	[TSU_FWINMK]	= 0x0054,
123 	[TSU_ADQT0]	= 0x0048,
124 	[TSU_ADQT1]	= 0x004c,
125 	[TSU_VTAG0]	= 0x0058,
126 	[TSU_VTAG1]	= 0x005c,
127 	[TSU_ADSBSY]	= 0x0060,
128 	[TSU_TEN]	= 0x0064,
129 	[TSU_POST1]	= 0x0070,
130 	[TSU_POST2]	= 0x0074,
131 	[TSU_POST3]	= 0x0078,
132 	[TSU_POST4]	= 0x007c,
133 	[TSU_ADRH0]	= 0x0100,
134 	[TSU_ADRL0]	= 0x0104,
135 	[TSU_ADRH31]	= 0x01f8,
136 	[TSU_ADRL31]	= 0x01fc,
137 
138 	[TXNLCR0]	= 0x0080,
139 	[TXALCR0]	= 0x0084,
140 	[RXNLCR0]	= 0x0088,
141 	[RXALCR0]	= 0x008c,
142 	[FWNLCR0]	= 0x0090,
143 	[FWALCR0]	= 0x0094,
144 	[TXNLCR1]	= 0x00a0,
145 	[TXALCR1]	= 0x00a0,
146 	[RXNLCR1]	= 0x00a8,
147 	[RXALCR1]	= 0x00ac,
148 	[FWNLCR1]	= 0x00b0,
149 	[FWALCR1]	= 0x00b4,
150 };
151 
152 static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
153 	[EDSR]		= 0x0000,
154 	[EDMR]		= 0x0400,
155 	[EDTRR]		= 0x0408,
156 	[EDRRR]		= 0x0410,
157 	[EESR]		= 0x0428,
158 	[EESIPR]	= 0x0430,
159 	[TDLAR]		= 0x0010,
160 	[TDFAR]		= 0x0014,
161 	[TDFXR]		= 0x0018,
162 	[TDFFR]		= 0x001c,
163 	[RDLAR]		= 0x0030,
164 	[RDFAR]		= 0x0034,
165 	[RDFXR]		= 0x0038,
166 	[RDFFR]		= 0x003c,
167 	[TRSCER]	= 0x0438,
168 	[RMFCR]		= 0x0440,
169 	[TFTR]		= 0x0448,
170 	[FDR]		= 0x0450,
171 	[RMCR]		= 0x0458,
172 	[RPADIR]	= 0x0460,
173 	[FCFTR]		= 0x0468,
174 	[CSMR]		= 0x04E4,
175 
176 	[ECMR]		= 0x0500,
177 	[RFLR]		= 0x0508,
178 	[ECSR]		= 0x0510,
179 	[ECSIPR]	= 0x0518,
180 	[PIR]		= 0x0520,
181 	[APR]		= 0x0554,
182 	[MPR]		= 0x0558,
183 	[PFTCR]		= 0x055c,
184 	[PFRCR]		= 0x0560,
185 	[TPAUSER]	= 0x0564,
186 	[MAHR]		= 0x05c0,
187 	[MALR]		= 0x05c8,
188 	[CEFCR]		= 0x0740,
189 	[FRECR]		= 0x0748,
190 	[TSFRCR]	= 0x0750,
191 	[TLFRCR]	= 0x0758,
192 	[RFCR]		= 0x0760,
193 	[MAFCR]		= 0x0778,
194 
195 	[ARSTR]		= 0x0000,
196 	[TSU_CTRST]	= 0x0004,
197 	[TSU_VTAG0]	= 0x0058,
198 	[TSU_ADSBSY]	= 0x0060,
199 	[TSU_TEN]	= 0x0064,
200 	[TSU_ADRH0]	= 0x0100,
201 	[TSU_ADRL0]	= 0x0104,
202 	[TSU_ADRH31]	= 0x01f8,
203 	[TSU_ADRL31]	= 0x01fc,
204 
205 	[TXNLCR0]	= 0x0080,
206 	[TXALCR0]	= 0x0084,
207 	[RXNLCR0]	= 0x0088,
208 	[RXALCR0]	= 0x008C,
209 };
210 
211 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
212 	[ECMR]		= 0x0300,
213 	[RFLR]		= 0x0308,
214 	[ECSR]		= 0x0310,
215 	[ECSIPR]	= 0x0318,
216 	[PIR]		= 0x0320,
217 	[PSR]		= 0x0328,
218 	[RDMLR]		= 0x0340,
219 	[IPGR]		= 0x0350,
220 	[APR]		= 0x0354,
221 	[MPR]		= 0x0358,
222 	[RFCF]		= 0x0360,
223 	[TPAUSER]	= 0x0364,
224 	[TPAUSECR]	= 0x0368,
225 	[MAHR]		= 0x03c0,
226 	[MALR]		= 0x03c8,
227 	[TROCR]		= 0x03d0,
228 	[CDCR]		= 0x03d4,
229 	[LCCR]		= 0x03d8,
230 	[CNDCR]		= 0x03dc,
231 	[CEFCR]		= 0x03e4,
232 	[FRECR]		= 0x03e8,
233 	[TSFRCR]	= 0x03ec,
234 	[TLFRCR]	= 0x03f0,
235 	[RFCR]		= 0x03f4,
236 	[MAFCR]		= 0x03f8,
237 
238 	[EDMR]		= 0x0200,
239 	[EDTRR]		= 0x0208,
240 	[EDRRR]		= 0x0210,
241 	[TDLAR]		= 0x0218,
242 	[RDLAR]		= 0x0220,
243 	[EESR]		= 0x0228,
244 	[EESIPR]	= 0x0230,
245 	[TRSCER]	= 0x0238,
246 	[RMFCR]		= 0x0240,
247 	[TFTR]		= 0x0248,
248 	[FDR]		= 0x0250,
249 	[RMCR]		= 0x0258,
250 	[TFUCR]		= 0x0264,
251 	[RFOCR]		= 0x0268,
252 	[RMIIMODE]      = 0x026c,
253 	[FCFTR]		= 0x0270,
254 	[TRIMD]		= 0x027c,
255 };
256 
257 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
258 	[ECMR]		= 0x0100,
259 	[RFLR]		= 0x0108,
260 	[ECSR]		= 0x0110,
261 	[ECSIPR]	= 0x0118,
262 	[PIR]		= 0x0120,
263 	[PSR]		= 0x0128,
264 	[RDMLR]		= 0x0140,
265 	[IPGR]		= 0x0150,
266 	[APR]		= 0x0154,
267 	[MPR]		= 0x0158,
268 	[TPAUSER]	= 0x0164,
269 	[RFCF]		= 0x0160,
270 	[TPAUSECR]	= 0x0168,
271 	[BCFRR]		= 0x016c,
272 	[MAHR]		= 0x01c0,
273 	[MALR]		= 0x01c8,
274 	[TROCR]		= 0x01d0,
275 	[CDCR]		= 0x01d4,
276 	[LCCR]		= 0x01d8,
277 	[CNDCR]		= 0x01dc,
278 	[CEFCR]		= 0x01e4,
279 	[FRECR]		= 0x01e8,
280 	[TSFRCR]	= 0x01ec,
281 	[TLFRCR]	= 0x01f0,
282 	[RFCR]		= 0x01f4,
283 	[MAFCR]		= 0x01f8,
284 	[RTRATE]	= 0x01fc,
285 
286 	[EDMR]		= 0x0000,
287 	[EDTRR]		= 0x0008,
288 	[EDRRR]		= 0x0010,
289 	[TDLAR]		= 0x0018,
290 	[RDLAR]		= 0x0020,
291 	[EESR]		= 0x0028,
292 	[EESIPR]	= 0x0030,
293 	[TRSCER]	= 0x0038,
294 	[RMFCR]		= 0x0040,
295 	[TFTR]		= 0x0048,
296 	[FDR]		= 0x0050,
297 	[RMCR]		= 0x0058,
298 	[TFUCR]		= 0x0064,
299 	[RFOCR]		= 0x0068,
300 	[FCFTR]		= 0x0070,
301 	[RPADIR]	= 0x0078,
302 	[TRIMD]		= 0x007c,
303 	[RBWAR]		= 0x00c8,
304 	[RDFAR]		= 0x00cc,
305 	[TBRAR]		= 0x00d4,
306 	[TDFAR]		= 0x00d8,
307 };
308 
309 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
310 	[ECMR]		= 0x0160,
311 	[ECSR]		= 0x0164,
312 	[ECSIPR]	= 0x0168,
313 	[PIR]		= 0x016c,
314 	[MAHR]		= 0x0170,
315 	[MALR]		= 0x0174,
316 	[RFLR]		= 0x0178,
317 	[PSR]		= 0x017c,
318 	[TROCR]		= 0x0180,
319 	[CDCR]		= 0x0184,
320 	[LCCR]		= 0x0188,
321 	[CNDCR]		= 0x018c,
322 	[CEFCR]		= 0x0194,
323 	[FRECR]		= 0x0198,
324 	[TSFRCR]	= 0x019c,
325 	[TLFRCR]	= 0x01a0,
326 	[RFCR]		= 0x01a4,
327 	[MAFCR]		= 0x01a8,
328 	[IPGR]		= 0x01b4,
329 	[APR]		= 0x01b8,
330 	[MPR]		= 0x01bc,
331 	[TPAUSER]	= 0x01c4,
332 	[BCFR]		= 0x01cc,
333 
334 	[ARSTR]		= 0x0000,
335 	[TSU_CTRST]	= 0x0004,
336 	[TSU_FWEN0]	= 0x0010,
337 	[TSU_FWEN1]	= 0x0014,
338 	[TSU_FCM]	= 0x0018,
339 	[TSU_BSYSL0]	= 0x0020,
340 	[TSU_BSYSL1]	= 0x0024,
341 	[TSU_PRISL0]	= 0x0028,
342 	[TSU_PRISL1]	= 0x002c,
343 	[TSU_FWSL0]	= 0x0030,
344 	[TSU_FWSL1]	= 0x0034,
345 	[TSU_FWSLC]	= 0x0038,
346 	[TSU_QTAGM0]	= 0x0040,
347 	[TSU_QTAGM1]	= 0x0044,
348 	[TSU_ADQT0]	= 0x0048,
349 	[TSU_ADQT1]	= 0x004c,
350 	[TSU_FWSR]	= 0x0050,
351 	[TSU_FWINMK]	= 0x0054,
352 	[TSU_ADSBSY]	= 0x0060,
353 	[TSU_TEN]	= 0x0064,
354 	[TSU_POST1]	= 0x0070,
355 	[TSU_POST2]	= 0x0074,
356 	[TSU_POST3]	= 0x0078,
357 	[TSU_POST4]	= 0x007c,
358 
359 	[TXNLCR0]	= 0x0080,
360 	[TXALCR0]	= 0x0084,
361 	[RXNLCR0]	= 0x0088,
362 	[RXALCR0]	= 0x008c,
363 	[FWNLCR0]	= 0x0090,
364 	[FWALCR0]	= 0x0094,
365 	[TXNLCR1]	= 0x00a0,
366 	[TXALCR1]	= 0x00a0,
367 	[RXNLCR1]	= 0x00a8,
368 	[RXALCR1]	= 0x00ac,
369 	[FWNLCR1]	= 0x00b0,
370 	[FWALCR1]	= 0x00b4,
371 
372 	[TSU_ADRH0]	= 0x0100,
373 	[TSU_ADRL0]	= 0x0104,
374 	[TSU_ADRL31]	= 0x01fc,
375 };
376 
377 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
378 {
379 	return mdp->reg_offset == sh_eth_offset_gigabit;
380 }
381 
382 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
383 {
384 	return mdp->reg_offset == sh_eth_offset_fast_rz;
385 }
386 
387 static void sh_eth_select_mii(struct net_device *ndev)
388 {
389 	u32 value = 0x0;
390 	struct sh_eth_private *mdp = netdev_priv(ndev);
391 
392 	switch (mdp->phy_interface) {
393 	case PHY_INTERFACE_MODE_GMII:
394 		value = 0x2;
395 		break;
396 	case PHY_INTERFACE_MODE_MII:
397 		value = 0x1;
398 		break;
399 	case PHY_INTERFACE_MODE_RMII:
400 		value = 0x0;
401 		break;
402 	default:
403 		netdev_warn(ndev,
404 			    "PHY interface mode was not setup. Set to MII.\n");
405 		value = 0x1;
406 		break;
407 	}
408 
409 	sh_eth_write(ndev, value, RMII_MII);
410 }
411 
412 static void sh_eth_set_duplex(struct net_device *ndev)
413 {
414 	struct sh_eth_private *mdp = netdev_priv(ndev);
415 
416 	if (mdp->duplex) /* Full */
417 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
418 	else		/* Half */
419 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
420 }
421 
422 /* There is CPU dependent code */
423 static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
424 {
425 	struct sh_eth_private *mdp = netdev_priv(ndev);
426 
427 	switch (mdp->speed) {
428 	case 10: /* 10BASE */
429 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
430 		break;
431 	case 100:/* 100BASE */
432 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
433 		break;
434 	default:
435 		break;
436 	}
437 }
438 
439 /* R8A7778/9 */
440 static struct sh_eth_cpu_data r8a777x_data = {
441 	.set_duplex	= sh_eth_set_duplex,
442 	.set_rate	= sh_eth_set_rate_r8a777x,
443 
444 	.register_type	= SH_ETH_REG_FAST_RCAR,
445 
446 	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
447 	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
448 	.eesipr_value	= 0x01ff009f,
449 
450 	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
451 	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
452 			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
453 			  EESR_ECI,
454 
455 	.apr		= 1,
456 	.mpr		= 1,
457 	.tpauser	= 1,
458 	.hw_swap	= 1,
459 };
460 
461 /* R8A7790/1 */
462 static struct sh_eth_cpu_data r8a779x_data = {
463 	.set_duplex	= sh_eth_set_duplex,
464 	.set_rate	= sh_eth_set_rate_r8a777x,
465 
466 	.register_type	= SH_ETH_REG_FAST_RCAR,
467 
468 	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
469 	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
470 	.eesipr_value	= 0x01ff009f,
471 
472 	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
473 	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
474 			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
475 			  EESR_ECI,
476 
477 	.apr		= 1,
478 	.mpr		= 1,
479 	.tpauser	= 1,
480 	.hw_swap	= 1,
481 	.rmiimode	= 1,
482 	.shift_rd0	= 1,
483 };
484 
485 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
486 {
487 	struct sh_eth_private *mdp = netdev_priv(ndev);
488 
489 	switch (mdp->speed) {
490 	case 10: /* 10BASE */
491 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
492 		break;
493 	case 100:/* 100BASE */
494 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
495 		break;
496 	default:
497 		break;
498 	}
499 }
500 
501 /* SH7724 */
502 static struct sh_eth_cpu_data sh7724_data = {
503 	.set_duplex	= sh_eth_set_duplex,
504 	.set_rate	= sh_eth_set_rate_sh7724,
505 
506 	.register_type	= SH_ETH_REG_FAST_SH4,
507 
508 	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
509 	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
510 	.eesipr_value	= 0x01ff009f,
511 
512 	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
513 	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
514 			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
515 			  EESR_ECI,
516 
517 	.apr		= 1,
518 	.mpr		= 1,
519 	.tpauser	= 1,
520 	.hw_swap	= 1,
521 	.rpadir		= 1,
522 	.rpadir_value	= 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
523 };
524 
525 static void sh_eth_set_rate_sh7757(struct net_device *ndev)
526 {
527 	struct sh_eth_private *mdp = netdev_priv(ndev);
528 
529 	switch (mdp->speed) {
530 	case 10: /* 10BASE */
531 		sh_eth_write(ndev, 0, RTRATE);
532 		break;
533 	case 100:/* 100BASE */
534 		sh_eth_write(ndev, 1, RTRATE);
535 		break;
536 	default:
537 		break;
538 	}
539 }
540 
541 /* SH7757 */
542 static struct sh_eth_cpu_data sh7757_data = {
543 	.set_duplex	= sh_eth_set_duplex,
544 	.set_rate	= sh_eth_set_rate_sh7757,
545 
546 	.register_type	= SH_ETH_REG_FAST_SH4,
547 
548 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
549 	.rmcr_value	= RMCR_RNC,
550 
551 	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
552 	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
553 			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
554 			  EESR_ECI,
555 
556 	.irq_flags	= IRQF_SHARED,
557 	.apr		= 1,
558 	.mpr		= 1,
559 	.tpauser	= 1,
560 	.hw_swap	= 1,
561 	.no_ade		= 1,
562 	.rpadir		= 1,
563 	.rpadir_value   = 2 << 16,
564 };
565 
566 #define SH_GIGA_ETH_BASE	0xfee00000UL
567 #define GIGA_MALR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
568 #define GIGA_MAHR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
569 static void sh_eth_chip_reset_giga(struct net_device *ndev)
570 {
571 	int i;
572 	unsigned long mahr[2], malr[2];
573 
574 	/* save MAHR and MALR */
575 	for (i = 0; i < 2; i++) {
576 		malr[i] = ioread32((void *)GIGA_MALR(i));
577 		mahr[i] = ioread32((void *)GIGA_MAHR(i));
578 	}
579 
580 	/* reset device */
581 	iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
582 	mdelay(1);
583 
584 	/* restore MAHR and MALR */
585 	for (i = 0; i < 2; i++) {
586 		iowrite32(malr[i], (void *)GIGA_MALR(i));
587 		iowrite32(mahr[i], (void *)GIGA_MAHR(i));
588 	}
589 }
590 
591 static void sh_eth_set_rate_giga(struct net_device *ndev)
592 {
593 	struct sh_eth_private *mdp = netdev_priv(ndev);
594 
595 	switch (mdp->speed) {
596 	case 10: /* 10BASE */
597 		sh_eth_write(ndev, 0x00000000, GECMR);
598 		break;
599 	case 100:/* 100BASE */
600 		sh_eth_write(ndev, 0x00000010, GECMR);
601 		break;
602 	case 1000: /* 1000BASE */
603 		sh_eth_write(ndev, 0x00000020, GECMR);
604 		break;
605 	default:
606 		break;
607 	}
608 }
609 
610 /* SH7757(GETHERC) */
611 static struct sh_eth_cpu_data sh7757_data_giga = {
612 	.chip_reset	= sh_eth_chip_reset_giga,
613 	.set_duplex	= sh_eth_set_duplex,
614 	.set_rate	= sh_eth_set_rate_giga,
615 
616 	.register_type	= SH_ETH_REG_GIGABIT,
617 
618 	.ecsr_value	= ECSR_ICD | ECSR_MPD,
619 	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
620 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
621 
622 	.tx_check	= EESR_TC1 | EESR_FTC,
623 	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
624 			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
625 			  EESR_TDE | EESR_ECI,
626 	.fdr_value	= 0x0000072f,
627 	.rmcr_value	= RMCR_RNC,
628 
629 	.irq_flags	= IRQF_SHARED,
630 	.apr		= 1,
631 	.mpr		= 1,
632 	.tpauser	= 1,
633 	.bculr		= 1,
634 	.hw_swap	= 1,
635 	.rpadir		= 1,
636 	.rpadir_value   = 2 << 16,
637 	.no_trimd	= 1,
638 	.no_ade		= 1,
639 	.tsu		= 1,
640 };
641 
642 static void sh_eth_chip_reset(struct net_device *ndev)
643 {
644 	struct sh_eth_private *mdp = netdev_priv(ndev);
645 
646 	/* reset device */
647 	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
648 	mdelay(1);
649 }
650 
651 static void sh_eth_set_rate_gether(struct net_device *ndev)
652 {
653 	struct sh_eth_private *mdp = netdev_priv(ndev);
654 
655 	switch (mdp->speed) {
656 	case 10: /* 10BASE */
657 		sh_eth_write(ndev, GECMR_10, GECMR);
658 		break;
659 	case 100:/* 100BASE */
660 		sh_eth_write(ndev, GECMR_100, GECMR);
661 		break;
662 	case 1000: /* 1000BASE */
663 		sh_eth_write(ndev, GECMR_1000, GECMR);
664 		break;
665 	default:
666 		break;
667 	}
668 }
669 
670 /* SH7734 */
671 static struct sh_eth_cpu_data sh7734_data = {
672 	.chip_reset	= sh_eth_chip_reset,
673 	.set_duplex	= sh_eth_set_duplex,
674 	.set_rate	= sh_eth_set_rate_gether,
675 
676 	.register_type	= SH_ETH_REG_GIGABIT,
677 
678 	.ecsr_value	= ECSR_ICD | ECSR_MPD,
679 	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
680 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
681 
682 	.tx_check	= EESR_TC1 | EESR_FTC,
683 	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
684 			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
685 			  EESR_TDE | EESR_ECI,
686 
687 	.apr		= 1,
688 	.mpr		= 1,
689 	.tpauser	= 1,
690 	.bculr		= 1,
691 	.hw_swap	= 1,
692 	.no_trimd	= 1,
693 	.no_ade		= 1,
694 	.tsu		= 1,
695 	.hw_crc		= 1,
696 	.select_mii	= 1,
697 };
698 
699 /* SH7763 */
700 static struct sh_eth_cpu_data sh7763_data = {
701 	.chip_reset	= sh_eth_chip_reset,
702 	.set_duplex	= sh_eth_set_duplex,
703 	.set_rate	= sh_eth_set_rate_gether,
704 
705 	.register_type	= SH_ETH_REG_GIGABIT,
706 
707 	.ecsr_value	= ECSR_ICD | ECSR_MPD,
708 	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
709 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
710 
711 	.tx_check	= EESR_TC1 | EESR_FTC,
712 	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
713 			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
714 			  EESR_ECI,
715 
716 	.apr		= 1,
717 	.mpr		= 1,
718 	.tpauser	= 1,
719 	.bculr		= 1,
720 	.hw_swap	= 1,
721 	.no_trimd	= 1,
722 	.no_ade		= 1,
723 	.tsu		= 1,
724 	.irq_flags	= IRQF_SHARED,
725 };
726 
727 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
728 {
729 	struct sh_eth_private *mdp = netdev_priv(ndev);
730 
731 	/* reset device */
732 	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
733 	mdelay(1);
734 
735 	sh_eth_select_mii(ndev);
736 }
737 
738 /* R8A7740 */
739 static struct sh_eth_cpu_data r8a7740_data = {
740 	.chip_reset	= sh_eth_chip_reset_r8a7740,
741 	.set_duplex	= sh_eth_set_duplex,
742 	.set_rate	= sh_eth_set_rate_gether,
743 
744 	.register_type	= SH_ETH_REG_GIGABIT,
745 
746 	.ecsr_value	= ECSR_ICD | ECSR_MPD,
747 	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
748 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
749 
750 	.tx_check	= EESR_TC1 | EESR_FTC,
751 	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
752 			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
753 			  EESR_TDE | EESR_ECI,
754 	.fdr_value	= 0x0000070f,
755 	.rmcr_value	= RMCR_RNC,
756 
757 	.apr		= 1,
758 	.mpr		= 1,
759 	.tpauser	= 1,
760 	.bculr		= 1,
761 	.hw_swap	= 1,
762 	.rpadir		= 1,
763 	.rpadir_value   = 2 << 16,
764 	.no_trimd	= 1,
765 	.no_ade		= 1,
766 	.tsu		= 1,
767 	.select_mii	= 1,
768 	.shift_rd0	= 1,
769 };
770 
771 /* R7S72100 */
772 static struct sh_eth_cpu_data r7s72100_data = {
773 	.chip_reset	= sh_eth_chip_reset,
774 	.set_duplex	= sh_eth_set_duplex,
775 
776 	.register_type	= SH_ETH_REG_FAST_RZ,
777 
778 	.ecsr_value	= ECSR_ICD,
779 	.ecsipr_value	= ECSIPR_ICDIP,
780 	.eesipr_value	= 0xff7f009f,
781 
782 	.tx_check	= EESR_TC1 | EESR_FTC,
783 	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
784 			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
785 			  EESR_TDE | EESR_ECI,
786 	.fdr_value	= 0x0000070f,
787 	.rmcr_value	= RMCR_RNC,
788 
789 	.no_psr		= 1,
790 	.apr		= 1,
791 	.mpr		= 1,
792 	.tpauser	= 1,
793 	.hw_swap	= 1,
794 	.rpadir		= 1,
795 	.rpadir_value   = 2 << 16,
796 	.no_trimd	= 1,
797 	.no_ade		= 1,
798 	.hw_crc		= 1,
799 	.tsu		= 1,
800 	.shift_rd0	= 1,
801 };
802 
803 static struct sh_eth_cpu_data sh7619_data = {
804 	.register_type	= SH_ETH_REG_FAST_SH3_SH2,
805 
806 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
807 
808 	.apr		= 1,
809 	.mpr		= 1,
810 	.tpauser	= 1,
811 	.hw_swap	= 1,
812 };
813 
814 static struct sh_eth_cpu_data sh771x_data = {
815 	.register_type	= SH_ETH_REG_FAST_SH3_SH2,
816 
817 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
818 	.tsu		= 1,
819 };
820 
821 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
822 {
823 	if (!cd->ecsr_value)
824 		cd->ecsr_value = DEFAULT_ECSR_INIT;
825 
826 	if (!cd->ecsipr_value)
827 		cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
828 
829 	if (!cd->fcftr_value)
830 		cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
831 				  DEFAULT_FIFO_F_D_RFD;
832 
833 	if (!cd->fdr_value)
834 		cd->fdr_value = DEFAULT_FDR_INIT;
835 
836 	if (!cd->rmcr_value)
837 		cd->rmcr_value = DEFAULT_RMCR_VALUE;
838 
839 	if (!cd->tx_check)
840 		cd->tx_check = DEFAULT_TX_CHECK;
841 
842 	if (!cd->eesr_err_check)
843 		cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
844 }
845 
846 static int sh_eth_check_reset(struct net_device *ndev)
847 {
848 	int ret = 0;
849 	int cnt = 100;
850 
851 	while (cnt > 0) {
852 		if (!(sh_eth_read(ndev, EDMR) & 0x3))
853 			break;
854 		mdelay(1);
855 		cnt--;
856 	}
857 	if (cnt <= 0) {
858 		netdev_err(ndev, "Device reset failed\n");
859 		ret = -ETIMEDOUT;
860 	}
861 	return ret;
862 }
863 
864 static int sh_eth_reset(struct net_device *ndev)
865 {
866 	struct sh_eth_private *mdp = netdev_priv(ndev);
867 	int ret = 0;
868 
869 	if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
870 		sh_eth_write(ndev, EDSR_ENALL, EDSR);
871 		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
872 			     EDMR);
873 
874 		ret = sh_eth_check_reset(ndev);
875 		if (ret)
876 			return ret;
877 
878 		/* Table Init */
879 		sh_eth_write(ndev, 0x0, TDLAR);
880 		sh_eth_write(ndev, 0x0, TDFAR);
881 		sh_eth_write(ndev, 0x0, TDFXR);
882 		sh_eth_write(ndev, 0x0, TDFFR);
883 		sh_eth_write(ndev, 0x0, RDLAR);
884 		sh_eth_write(ndev, 0x0, RDFAR);
885 		sh_eth_write(ndev, 0x0, RDFXR);
886 		sh_eth_write(ndev, 0x0, RDFFR);
887 
888 		/* Reset HW CRC register */
889 		if (mdp->cd->hw_crc)
890 			sh_eth_write(ndev, 0x0, CSMR);
891 
892 		/* Select MII mode */
893 		if (mdp->cd->select_mii)
894 			sh_eth_select_mii(ndev);
895 	} else {
896 		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
897 			     EDMR);
898 		mdelay(3);
899 		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
900 			     EDMR);
901 	}
902 
903 	return ret;
904 }
905 
906 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
907 static void sh_eth_set_receive_align(struct sk_buff *skb)
908 {
909 	int reserve;
910 
911 	reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
912 	if (reserve)
913 		skb_reserve(skb, reserve);
914 }
915 #else
916 static void sh_eth_set_receive_align(struct sk_buff *skb)
917 {
918 	skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
919 }
920 #endif
921 
922 
923 /* CPU <-> EDMAC endian convert */
924 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
925 {
926 	switch (mdp->edmac_endian) {
927 	case EDMAC_LITTLE_ENDIAN:
928 		return cpu_to_le32(x);
929 	case EDMAC_BIG_ENDIAN:
930 		return cpu_to_be32(x);
931 	}
932 	return x;
933 }
934 
935 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
936 {
937 	switch (mdp->edmac_endian) {
938 	case EDMAC_LITTLE_ENDIAN:
939 		return le32_to_cpu(x);
940 	case EDMAC_BIG_ENDIAN:
941 		return be32_to_cpu(x);
942 	}
943 	return x;
944 }
945 
946 /* Program the hardware MAC address from dev->dev_addr. */
947 static void update_mac_address(struct net_device *ndev)
948 {
949 	sh_eth_write(ndev,
950 		     (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
951 		     (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
952 	sh_eth_write(ndev,
953 		     (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
954 }
955 
956 /* Get MAC address from SuperH MAC address register
957  *
958  * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
959  * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
960  * When you want use this device, you must set MAC address in bootloader.
961  *
962  */
963 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
964 {
965 	if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
966 		memcpy(ndev->dev_addr, mac, ETH_ALEN);
967 	} else {
968 		ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
969 		ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
970 		ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
971 		ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
972 		ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
973 		ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
974 	}
975 }
976 
977 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
978 {
979 	if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
980 		return EDTRR_TRNS_GETHER;
981 	else
982 		return EDTRR_TRNS_ETHER;
983 }
984 
985 struct bb_info {
986 	void (*set_gate)(void *addr);
987 	struct mdiobb_ctrl ctrl;
988 	void *addr;
989 	u32 mmd_msk;/* MMD */
990 	u32 mdo_msk;
991 	u32 mdi_msk;
992 	u32 mdc_msk;
993 };
994 
995 /* PHY bit set */
996 static void bb_set(void *addr, u32 msk)
997 {
998 	iowrite32(ioread32(addr) | msk, addr);
999 }
1000 
1001 /* PHY bit clear */
1002 static void bb_clr(void *addr, u32 msk)
1003 {
1004 	iowrite32((ioread32(addr) & ~msk), addr);
1005 }
1006 
1007 /* PHY bit read */
1008 static int bb_read(void *addr, u32 msk)
1009 {
1010 	return (ioread32(addr) & msk) != 0;
1011 }
1012 
1013 /* Data I/O pin control */
1014 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1015 {
1016 	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1017 
1018 	if (bitbang->set_gate)
1019 		bitbang->set_gate(bitbang->addr);
1020 
1021 	if (bit)
1022 		bb_set(bitbang->addr, bitbang->mmd_msk);
1023 	else
1024 		bb_clr(bitbang->addr, bitbang->mmd_msk);
1025 }
1026 
1027 /* Set bit data*/
1028 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1029 {
1030 	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1031 
1032 	if (bitbang->set_gate)
1033 		bitbang->set_gate(bitbang->addr);
1034 
1035 	if (bit)
1036 		bb_set(bitbang->addr, bitbang->mdo_msk);
1037 	else
1038 		bb_clr(bitbang->addr, bitbang->mdo_msk);
1039 }
1040 
1041 /* Get bit data*/
1042 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1043 {
1044 	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1045 
1046 	if (bitbang->set_gate)
1047 		bitbang->set_gate(bitbang->addr);
1048 
1049 	return bb_read(bitbang->addr, bitbang->mdi_msk);
1050 }
1051 
1052 /* MDC pin control */
1053 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1054 {
1055 	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1056 
1057 	if (bitbang->set_gate)
1058 		bitbang->set_gate(bitbang->addr);
1059 
1060 	if (bit)
1061 		bb_set(bitbang->addr, bitbang->mdc_msk);
1062 	else
1063 		bb_clr(bitbang->addr, bitbang->mdc_msk);
1064 }
1065 
1066 /* mdio bus control struct */
1067 static struct mdiobb_ops bb_ops = {
1068 	.owner = THIS_MODULE,
1069 	.set_mdc = sh_mdc_ctrl,
1070 	.set_mdio_dir = sh_mmd_ctrl,
1071 	.set_mdio_data = sh_set_mdio,
1072 	.get_mdio_data = sh_get_mdio,
1073 };
1074 
1075 /* free skb and descriptor buffer */
1076 static void sh_eth_ring_free(struct net_device *ndev)
1077 {
1078 	struct sh_eth_private *mdp = netdev_priv(ndev);
1079 	int i;
1080 
1081 	/* Free Rx skb ringbuffer */
1082 	if (mdp->rx_skbuff) {
1083 		for (i = 0; i < mdp->num_rx_ring; i++) {
1084 			if (mdp->rx_skbuff[i])
1085 				dev_kfree_skb(mdp->rx_skbuff[i]);
1086 		}
1087 	}
1088 	kfree(mdp->rx_skbuff);
1089 	mdp->rx_skbuff = NULL;
1090 
1091 	/* Free Tx skb ringbuffer */
1092 	if (mdp->tx_skbuff) {
1093 		for (i = 0; i < mdp->num_tx_ring; i++) {
1094 			if (mdp->tx_skbuff[i])
1095 				dev_kfree_skb(mdp->tx_skbuff[i]);
1096 		}
1097 	}
1098 	kfree(mdp->tx_skbuff);
1099 	mdp->tx_skbuff = NULL;
1100 }
1101 
1102 /* format skb and descriptor buffer */
1103 static void sh_eth_ring_format(struct net_device *ndev)
1104 {
1105 	struct sh_eth_private *mdp = netdev_priv(ndev);
1106 	int i;
1107 	struct sk_buff *skb;
1108 	struct sh_eth_rxdesc *rxdesc = NULL;
1109 	struct sh_eth_txdesc *txdesc = NULL;
1110 	int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1111 	int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1112 
1113 	mdp->cur_rx = 0;
1114 	mdp->cur_tx = 0;
1115 	mdp->dirty_rx = 0;
1116 	mdp->dirty_tx = 0;
1117 
1118 	memset(mdp->rx_ring, 0, rx_ringsize);
1119 
1120 	/* build Rx ring buffer */
1121 	for (i = 0; i < mdp->num_rx_ring; i++) {
1122 		/* skb */
1123 		mdp->rx_skbuff[i] = NULL;
1124 		skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
1125 		mdp->rx_skbuff[i] = skb;
1126 		if (skb == NULL)
1127 			break;
1128 		dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1129 			       DMA_FROM_DEVICE);
1130 		sh_eth_set_receive_align(skb);
1131 
1132 		/* RX descriptor */
1133 		rxdesc = &mdp->rx_ring[i];
1134 		rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1135 		rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1136 
1137 		/* The size of the buffer is 16 byte boundary. */
1138 		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1139 		/* Rx descriptor address set */
1140 		if (i == 0) {
1141 			sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1142 			if (sh_eth_is_gether(mdp) ||
1143 			    sh_eth_is_rz_fast_ether(mdp))
1144 				sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1145 		}
1146 	}
1147 
1148 	mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1149 
1150 	/* Mark the last entry as wrapping the ring. */
1151 	rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
1152 
1153 	memset(mdp->tx_ring, 0, tx_ringsize);
1154 
1155 	/* build Tx ring buffer */
1156 	for (i = 0; i < mdp->num_tx_ring; i++) {
1157 		mdp->tx_skbuff[i] = NULL;
1158 		txdesc = &mdp->tx_ring[i];
1159 		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1160 		txdesc->buffer_length = 0;
1161 		if (i == 0) {
1162 			/* Tx descriptor address set */
1163 			sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1164 			if (sh_eth_is_gether(mdp) ||
1165 			    sh_eth_is_rz_fast_ether(mdp))
1166 				sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1167 		}
1168 	}
1169 
1170 	txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1171 }
1172 
1173 /* Get skb and descriptor buffer */
1174 static int sh_eth_ring_init(struct net_device *ndev)
1175 {
1176 	struct sh_eth_private *mdp = netdev_priv(ndev);
1177 	int rx_ringsize, tx_ringsize, ret = 0;
1178 
1179 	/* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1180 	 * card needs room to do 8 byte alignment, +2 so we can reserve
1181 	 * the first 2 bytes, and +16 gets room for the status word from the
1182 	 * card.
1183 	 */
1184 	mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1185 			  (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1186 	if (mdp->cd->rpadir)
1187 		mdp->rx_buf_sz += NET_IP_ALIGN;
1188 
1189 	/* Allocate RX and TX skb rings */
1190 	mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
1191 				       sizeof(*mdp->rx_skbuff), GFP_KERNEL);
1192 	if (!mdp->rx_skbuff) {
1193 		ret = -ENOMEM;
1194 		return ret;
1195 	}
1196 
1197 	mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
1198 				       sizeof(*mdp->tx_skbuff), GFP_KERNEL);
1199 	if (!mdp->tx_skbuff) {
1200 		ret = -ENOMEM;
1201 		goto skb_ring_free;
1202 	}
1203 
1204 	/* Allocate all Rx descriptors. */
1205 	rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1206 	mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
1207 					  GFP_KERNEL);
1208 	if (!mdp->rx_ring) {
1209 		ret = -ENOMEM;
1210 		goto desc_ring_free;
1211 	}
1212 
1213 	mdp->dirty_rx = 0;
1214 
1215 	/* Allocate all Tx descriptors. */
1216 	tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1217 	mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
1218 					  GFP_KERNEL);
1219 	if (!mdp->tx_ring) {
1220 		ret = -ENOMEM;
1221 		goto desc_ring_free;
1222 	}
1223 	return ret;
1224 
1225 desc_ring_free:
1226 	/* free DMA buffer */
1227 	dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1228 
1229 skb_ring_free:
1230 	/* Free Rx and Tx skb ring buffer */
1231 	sh_eth_ring_free(ndev);
1232 	mdp->tx_ring = NULL;
1233 	mdp->rx_ring = NULL;
1234 
1235 	return ret;
1236 }
1237 
1238 static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
1239 {
1240 	int ringsize;
1241 
1242 	if (mdp->rx_ring) {
1243 		ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1244 		dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1245 				  mdp->rx_desc_dma);
1246 		mdp->rx_ring = NULL;
1247 	}
1248 
1249 	if (mdp->tx_ring) {
1250 		ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1251 		dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1252 				  mdp->tx_desc_dma);
1253 		mdp->tx_ring = NULL;
1254 	}
1255 }
1256 
1257 static int sh_eth_dev_init(struct net_device *ndev, bool start)
1258 {
1259 	int ret = 0;
1260 	struct sh_eth_private *mdp = netdev_priv(ndev);
1261 	u32 val;
1262 
1263 	/* Soft Reset */
1264 	ret = sh_eth_reset(ndev);
1265 	if (ret)
1266 		return ret;
1267 
1268 	if (mdp->cd->rmiimode)
1269 		sh_eth_write(ndev, 0x1, RMIIMODE);
1270 
1271 	/* Descriptor format */
1272 	sh_eth_ring_format(ndev);
1273 	if (mdp->cd->rpadir)
1274 		sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1275 
1276 	/* all sh_eth int mask */
1277 	sh_eth_write(ndev, 0, EESIPR);
1278 
1279 #if defined(__LITTLE_ENDIAN)
1280 	if (mdp->cd->hw_swap)
1281 		sh_eth_write(ndev, EDMR_EL, EDMR);
1282 	else
1283 #endif
1284 		sh_eth_write(ndev, 0, EDMR);
1285 
1286 	/* FIFO size set */
1287 	sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1288 	sh_eth_write(ndev, 0, TFTR);
1289 
1290 	/* Frame recv control */
1291 	sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
1292 
1293 	sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
1294 
1295 	if (mdp->cd->bculr)
1296 		sh_eth_write(ndev, 0x800, BCULR);	/* Burst sycle set */
1297 
1298 	sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1299 
1300 	if (!mdp->cd->no_trimd)
1301 		sh_eth_write(ndev, 0, TRIMD);
1302 
1303 	/* Recv frame limit set register */
1304 	sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1305 		     RFLR);
1306 
1307 	sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1308 	if (start)
1309 		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1310 
1311 	/* PAUSE Prohibition */
1312 	val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
1313 		ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1314 
1315 	sh_eth_write(ndev, val, ECMR);
1316 
1317 	if (mdp->cd->set_rate)
1318 		mdp->cd->set_rate(ndev);
1319 
1320 	/* E-MAC Status Register clear */
1321 	sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1322 
1323 	/* E-MAC Interrupt Enable register */
1324 	if (start)
1325 		sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1326 
1327 	/* Set MAC address */
1328 	update_mac_address(ndev);
1329 
1330 	/* mask reset */
1331 	if (mdp->cd->apr)
1332 		sh_eth_write(ndev, APR_AP, APR);
1333 	if (mdp->cd->mpr)
1334 		sh_eth_write(ndev, MPR_MP, MPR);
1335 	if (mdp->cd->tpauser)
1336 		sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1337 
1338 	if (start) {
1339 		/* Setting the Rx mode will start the Rx process. */
1340 		sh_eth_write(ndev, EDRRR_R, EDRRR);
1341 
1342 		netif_start_queue(ndev);
1343 	}
1344 
1345 	return ret;
1346 }
1347 
1348 /* free Tx skb function */
1349 static int sh_eth_txfree(struct net_device *ndev)
1350 {
1351 	struct sh_eth_private *mdp = netdev_priv(ndev);
1352 	struct sh_eth_txdesc *txdesc;
1353 	int free_num = 0;
1354 	int entry = 0;
1355 
1356 	for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1357 		entry = mdp->dirty_tx % mdp->num_tx_ring;
1358 		txdesc = &mdp->tx_ring[entry];
1359 		if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1360 			break;
1361 		/* Free the original skb. */
1362 		if (mdp->tx_skbuff[entry]) {
1363 			dma_unmap_single(&ndev->dev, txdesc->addr,
1364 					 txdesc->buffer_length, DMA_TO_DEVICE);
1365 			dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1366 			mdp->tx_skbuff[entry] = NULL;
1367 			free_num++;
1368 		}
1369 		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1370 		if (entry >= mdp->num_tx_ring - 1)
1371 			txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1372 
1373 		ndev->stats.tx_packets++;
1374 		ndev->stats.tx_bytes += txdesc->buffer_length;
1375 	}
1376 	return free_num;
1377 }
1378 
1379 /* Packet receive function */
1380 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1381 {
1382 	struct sh_eth_private *mdp = netdev_priv(ndev);
1383 	struct sh_eth_rxdesc *rxdesc;
1384 
1385 	int entry = mdp->cur_rx % mdp->num_rx_ring;
1386 	int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1387 	struct sk_buff *skb;
1388 	int exceeded = 0;
1389 	u16 pkt_len = 0;
1390 	u32 desc_status;
1391 
1392 	rxdesc = &mdp->rx_ring[entry];
1393 	while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1394 		desc_status = edmac_to_cpu(mdp, rxdesc->status);
1395 		pkt_len = rxdesc->frame_length;
1396 
1397 		if (--boguscnt < 0)
1398 			break;
1399 
1400 		if (*quota <= 0) {
1401 			exceeded = 1;
1402 			break;
1403 		}
1404 		(*quota)--;
1405 
1406 		if (!(desc_status & RDFEND))
1407 			ndev->stats.rx_length_errors++;
1408 
1409 		/* In case of almost all GETHER/ETHERs, the Receive Frame State
1410 		 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1411 		 * bit 0. However, in case of the R8A7740, R8A779x, and
1412 		 * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
1413 		 * driver needs right shifting by 16.
1414 		 */
1415 		if (mdp->cd->shift_rd0)
1416 			desc_status >>= 16;
1417 
1418 		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1419 				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1420 			ndev->stats.rx_errors++;
1421 			if (desc_status & RD_RFS1)
1422 				ndev->stats.rx_crc_errors++;
1423 			if (desc_status & RD_RFS2)
1424 				ndev->stats.rx_frame_errors++;
1425 			if (desc_status & RD_RFS3)
1426 				ndev->stats.rx_length_errors++;
1427 			if (desc_status & RD_RFS4)
1428 				ndev->stats.rx_length_errors++;
1429 			if (desc_status & RD_RFS6)
1430 				ndev->stats.rx_missed_errors++;
1431 			if (desc_status & RD_RFS10)
1432 				ndev->stats.rx_over_errors++;
1433 		} else {
1434 			if (!mdp->cd->hw_swap)
1435 				sh_eth_soft_swap(
1436 					phys_to_virt(ALIGN(rxdesc->addr, 4)),
1437 					pkt_len + 2);
1438 			skb = mdp->rx_skbuff[entry];
1439 			mdp->rx_skbuff[entry] = NULL;
1440 			if (mdp->cd->rpadir)
1441 				skb_reserve(skb, NET_IP_ALIGN);
1442 			dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
1443 						mdp->rx_buf_sz,
1444 						DMA_FROM_DEVICE);
1445 			skb_put(skb, pkt_len);
1446 			skb->protocol = eth_type_trans(skb, ndev);
1447 			netif_receive_skb(skb);
1448 			ndev->stats.rx_packets++;
1449 			ndev->stats.rx_bytes += pkt_len;
1450 		}
1451 		rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
1452 		entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1453 		rxdesc = &mdp->rx_ring[entry];
1454 	}
1455 
1456 	/* Refill the Rx ring buffers. */
1457 	for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1458 		entry = mdp->dirty_rx % mdp->num_rx_ring;
1459 		rxdesc = &mdp->rx_ring[entry];
1460 		/* The size of the buffer is 16 byte boundary. */
1461 		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1462 
1463 		if (mdp->rx_skbuff[entry] == NULL) {
1464 			skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
1465 			mdp->rx_skbuff[entry] = skb;
1466 			if (skb == NULL)
1467 				break;	/* Better luck next round. */
1468 			dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1469 				       DMA_FROM_DEVICE);
1470 			sh_eth_set_receive_align(skb);
1471 
1472 			skb_checksum_none_assert(skb);
1473 			rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1474 		}
1475 		if (entry >= mdp->num_rx_ring - 1)
1476 			rxdesc->status |=
1477 				cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
1478 		else
1479 			rxdesc->status |=
1480 				cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1481 	}
1482 
1483 	/* Restart Rx engine if stopped. */
1484 	/* If we don't need to check status, don't. -KDU */
1485 	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1486 		/* fix the values for the next receiving if RDE is set */
1487 		if (intr_status & EESR_RDE) {
1488 			u32 count = (sh_eth_read(ndev, RDFAR) -
1489 				     sh_eth_read(ndev, RDLAR)) >> 4;
1490 
1491 			mdp->cur_rx = count;
1492 			mdp->dirty_rx = count;
1493 		}
1494 		sh_eth_write(ndev, EDRRR_R, EDRRR);
1495 	}
1496 
1497 	return exceeded;
1498 }
1499 
1500 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1501 {
1502 	/* disable tx and rx */
1503 	sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1504 		~(ECMR_RE | ECMR_TE), ECMR);
1505 }
1506 
1507 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1508 {
1509 	/* enable tx and rx */
1510 	sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1511 		(ECMR_RE | ECMR_TE), ECMR);
1512 }
1513 
1514 /* error control function */
1515 static void sh_eth_error(struct net_device *ndev, int intr_status)
1516 {
1517 	struct sh_eth_private *mdp = netdev_priv(ndev);
1518 	u32 felic_stat;
1519 	u32 link_stat;
1520 	u32 mask;
1521 
1522 	if (intr_status & EESR_ECI) {
1523 		felic_stat = sh_eth_read(ndev, ECSR);
1524 		sh_eth_write(ndev, felic_stat, ECSR);	/* clear int */
1525 		if (felic_stat & ECSR_ICD)
1526 			ndev->stats.tx_carrier_errors++;
1527 		if (felic_stat & ECSR_LCHNG) {
1528 			/* Link Changed */
1529 			if (mdp->cd->no_psr || mdp->no_ether_link) {
1530 				goto ignore_link;
1531 			} else {
1532 				link_stat = (sh_eth_read(ndev, PSR));
1533 				if (mdp->ether_link_active_low)
1534 					link_stat = ~link_stat;
1535 			}
1536 			if (!(link_stat & PHY_ST_LINK)) {
1537 				sh_eth_rcv_snd_disable(ndev);
1538 			} else {
1539 				/* Link Up */
1540 				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1541 						   ~DMAC_M_ECI, EESIPR);
1542 				/* clear int */
1543 				sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1544 					     ECSR);
1545 				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1546 						   DMAC_M_ECI, EESIPR);
1547 				/* enable tx and rx */
1548 				sh_eth_rcv_snd_enable(ndev);
1549 			}
1550 		}
1551 	}
1552 
1553 ignore_link:
1554 	if (intr_status & EESR_TWB) {
1555 		/* Unused write back interrupt */
1556 		if (intr_status & EESR_TABT) {	/* Transmit Abort int */
1557 			ndev->stats.tx_aborted_errors++;
1558 			netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1559 		}
1560 	}
1561 
1562 	if (intr_status & EESR_RABT) {
1563 		/* Receive Abort int */
1564 		if (intr_status & EESR_RFRMER) {
1565 			/* Receive Frame Overflow int */
1566 			ndev->stats.rx_frame_errors++;
1567 			netif_err(mdp, rx_err, ndev, "Receive Abort\n");
1568 		}
1569 	}
1570 
1571 	if (intr_status & EESR_TDE) {
1572 		/* Transmit Descriptor Empty int */
1573 		ndev->stats.tx_fifo_errors++;
1574 		netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1575 	}
1576 
1577 	if (intr_status & EESR_TFE) {
1578 		/* FIFO under flow */
1579 		ndev->stats.tx_fifo_errors++;
1580 		netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1581 	}
1582 
1583 	if (intr_status & EESR_RDE) {
1584 		/* Receive Descriptor Empty int */
1585 		ndev->stats.rx_over_errors++;
1586 		netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
1587 	}
1588 
1589 	if (intr_status & EESR_RFE) {
1590 		/* Receive FIFO Overflow int */
1591 		ndev->stats.rx_fifo_errors++;
1592 		netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
1593 	}
1594 
1595 	if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1596 		/* Address Error */
1597 		ndev->stats.tx_fifo_errors++;
1598 		netif_err(mdp, tx_err, ndev, "Address Error\n");
1599 	}
1600 
1601 	mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1602 	if (mdp->cd->no_ade)
1603 		mask &= ~EESR_ADE;
1604 	if (intr_status & mask) {
1605 		/* Tx error */
1606 		u32 edtrr = sh_eth_read(ndev, EDTRR);
1607 
1608 		/* dmesg */
1609 		netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1610 			   intr_status, mdp->cur_tx, mdp->dirty_tx,
1611 			   (u32)ndev->state, edtrr);
1612 		/* dirty buffer free */
1613 		sh_eth_txfree(ndev);
1614 
1615 		/* SH7712 BUG */
1616 		if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1617 			/* tx dma start */
1618 			sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1619 		}
1620 		/* wakeup */
1621 		netif_wake_queue(ndev);
1622 	}
1623 }
1624 
1625 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1626 {
1627 	struct net_device *ndev = netdev;
1628 	struct sh_eth_private *mdp = netdev_priv(ndev);
1629 	struct sh_eth_cpu_data *cd = mdp->cd;
1630 	irqreturn_t ret = IRQ_NONE;
1631 	unsigned long intr_status, intr_enable;
1632 
1633 	spin_lock(&mdp->lock);
1634 
1635 	/* Get interrupt status */
1636 	intr_status = sh_eth_read(ndev, EESR);
1637 	/* Mask it with the interrupt mask, forcing ECI interrupt to be always
1638 	 * enabled since it's the one that  comes thru regardless of the mask,
1639 	 * and we need to fully handle it in sh_eth_error() in order to quench
1640 	 * it as it doesn't get cleared by just writing 1 to the ECI bit...
1641 	 */
1642 	intr_enable = sh_eth_read(ndev, EESIPR);
1643 	intr_status &= intr_enable | DMAC_M_ECI;
1644 	if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1645 		ret = IRQ_HANDLED;
1646 	else
1647 		goto other_irq;
1648 
1649 	if (intr_status & EESR_RX_CHECK) {
1650 		if (napi_schedule_prep(&mdp->napi)) {
1651 			/* Mask Rx interrupts */
1652 			sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1653 				     EESIPR);
1654 			__napi_schedule(&mdp->napi);
1655 		} else {
1656 			netdev_warn(ndev,
1657 				    "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
1658 				    intr_status, intr_enable);
1659 		}
1660 	}
1661 
1662 	/* Tx Check */
1663 	if (intr_status & cd->tx_check) {
1664 		/* Clear Tx interrupts */
1665 		sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1666 
1667 		sh_eth_txfree(ndev);
1668 		netif_wake_queue(ndev);
1669 	}
1670 
1671 	if (intr_status & cd->eesr_err_check) {
1672 		/* Clear error interrupts */
1673 		sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1674 
1675 		sh_eth_error(ndev, intr_status);
1676 	}
1677 
1678 other_irq:
1679 	spin_unlock(&mdp->lock);
1680 
1681 	return ret;
1682 }
1683 
1684 static int sh_eth_poll(struct napi_struct *napi, int budget)
1685 {
1686 	struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1687 						  napi);
1688 	struct net_device *ndev = napi->dev;
1689 	int quota = budget;
1690 	unsigned long intr_status;
1691 
1692 	for (;;) {
1693 		intr_status = sh_eth_read(ndev, EESR);
1694 		if (!(intr_status & EESR_RX_CHECK))
1695 			break;
1696 		/* Clear Rx interrupts */
1697 		sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1698 
1699 		if (sh_eth_rx(ndev, intr_status, &quota))
1700 			goto out;
1701 	}
1702 
1703 	napi_complete(napi);
1704 
1705 	/* Reenable Rx interrupts */
1706 	sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1707 out:
1708 	return budget - quota;
1709 }
1710 
1711 /* PHY state control function */
1712 static void sh_eth_adjust_link(struct net_device *ndev)
1713 {
1714 	struct sh_eth_private *mdp = netdev_priv(ndev);
1715 	struct phy_device *phydev = mdp->phydev;
1716 	int new_state = 0;
1717 
1718 	if (phydev->link) {
1719 		if (phydev->duplex != mdp->duplex) {
1720 			new_state = 1;
1721 			mdp->duplex = phydev->duplex;
1722 			if (mdp->cd->set_duplex)
1723 				mdp->cd->set_duplex(ndev);
1724 		}
1725 
1726 		if (phydev->speed != mdp->speed) {
1727 			new_state = 1;
1728 			mdp->speed = phydev->speed;
1729 			if (mdp->cd->set_rate)
1730 				mdp->cd->set_rate(ndev);
1731 		}
1732 		if (!mdp->link) {
1733 			sh_eth_write(ndev,
1734 				     sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
1735 				     ECMR);
1736 			new_state = 1;
1737 			mdp->link = phydev->link;
1738 			if (mdp->cd->no_psr || mdp->no_ether_link)
1739 				sh_eth_rcv_snd_enable(ndev);
1740 		}
1741 	} else if (mdp->link) {
1742 		new_state = 1;
1743 		mdp->link = 0;
1744 		mdp->speed = 0;
1745 		mdp->duplex = -1;
1746 		if (mdp->cd->no_psr || mdp->no_ether_link)
1747 			sh_eth_rcv_snd_disable(ndev);
1748 	}
1749 
1750 	if (new_state && netif_msg_link(mdp))
1751 		phy_print_status(phydev);
1752 }
1753 
1754 /* PHY init function */
1755 static int sh_eth_phy_init(struct net_device *ndev)
1756 {
1757 	struct device_node *np = ndev->dev.parent->of_node;
1758 	struct sh_eth_private *mdp = netdev_priv(ndev);
1759 	struct phy_device *phydev = NULL;
1760 
1761 	mdp->link = 0;
1762 	mdp->speed = 0;
1763 	mdp->duplex = -1;
1764 
1765 	/* Try connect to PHY */
1766 	if (np) {
1767 		struct device_node *pn;
1768 
1769 		pn = of_parse_phandle(np, "phy-handle", 0);
1770 		phydev = of_phy_connect(ndev, pn,
1771 					sh_eth_adjust_link, 0,
1772 					mdp->phy_interface);
1773 
1774 		if (!phydev)
1775 			phydev = ERR_PTR(-ENOENT);
1776 	} else {
1777 		char phy_id[MII_BUS_ID_SIZE + 3];
1778 
1779 		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1780 			 mdp->mii_bus->id, mdp->phy_id);
1781 
1782 		phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1783 				     mdp->phy_interface);
1784 	}
1785 
1786 	if (IS_ERR(phydev)) {
1787 		netdev_err(ndev, "failed to connect PHY\n");
1788 		return PTR_ERR(phydev);
1789 	}
1790 
1791 	netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
1792 		    phydev->addr, phydev->irq, phydev->drv->name);
1793 
1794 	mdp->phydev = phydev;
1795 
1796 	return 0;
1797 }
1798 
1799 /* PHY control start function */
1800 static int sh_eth_phy_start(struct net_device *ndev)
1801 {
1802 	struct sh_eth_private *mdp = netdev_priv(ndev);
1803 	int ret;
1804 
1805 	ret = sh_eth_phy_init(ndev);
1806 	if (ret)
1807 		return ret;
1808 
1809 	phy_start(mdp->phydev);
1810 
1811 	return 0;
1812 }
1813 
1814 static int sh_eth_get_settings(struct net_device *ndev,
1815 			       struct ethtool_cmd *ecmd)
1816 {
1817 	struct sh_eth_private *mdp = netdev_priv(ndev);
1818 	unsigned long flags;
1819 	int ret;
1820 
1821 	spin_lock_irqsave(&mdp->lock, flags);
1822 	ret = phy_ethtool_gset(mdp->phydev, ecmd);
1823 	spin_unlock_irqrestore(&mdp->lock, flags);
1824 
1825 	return ret;
1826 }
1827 
1828 static int sh_eth_set_settings(struct net_device *ndev,
1829 			       struct ethtool_cmd *ecmd)
1830 {
1831 	struct sh_eth_private *mdp = netdev_priv(ndev);
1832 	unsigned long flags;
1833 	int ret;
1834 
1835 	spin_lock_irqsave(&mdp->lock, flags);
1836 
1837 	/* disable tx and rx */
1838 	sh_eth_rcv_snd_disable(ndev);
1839 
1840 	ret = phy_ethtool_sset(mdp->phydev, ecmd);
1841 	if (ret)
1842 		goto error_exit;
1843 
1844 	if (ecmd->duplex == DUPLEX_FULL)
1845 		mdp->duplex = 1;
1846 	else
1847 		mdp->duplex = 0;
1848 
1849 	if (mdp->cd->set_duplex)
1850 		mdp->cd->set_duplex(ndev);
1851 
1852 error_exit:
1853 	mdelay(1);
1854 
1855 	/* enable tx and rx */
1856 	sh_eth_rcv_snd_enable(ndev);
1857 
1858 	spin_unlock_irqrestore(&mdp->lock, flags);
1859 
1860 	return ret;
1861 }
1862 
1863 static int sh_eth_nway_reset(struct net_device *ndev)
1864 {
1865 	struct sh_eth_private *mdp = netdev_priv(ndev);
1866 	unsigned long flags;
1867 	int ret;
1868 
1869 	spin_lock_irqsave(&mdp->lock, flags);
1870 	ret = phy_start_aneg(mdp->phydev);
1871 	spin_unlock_irqrestore(&mdp->lock, flags);
1872 
1873 	return ret;
1874 }
1875 
1876 static u32 sh_eth_get_msglevel(struct net_device *ndev)
1877 {
1878 	struct sh_eth_private *mdp = netdev_priv(ndev);
1879 	return mdp->msg_enable;
1880 }
1881 
1882 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1883 {
1884 	struct sh_eth_private *mdp = netdev_priv(ndev);
1885 	mdp->msg_enable = value;
1886 }
1887 
1888 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1889 	"rx_current", "tx_current",
1890 	"rx_dirty", "tx_dirty",
1891 };
1892 #define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
1893 
1894 static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1895 {
1896 	switch (sset) {
1897 	case ETH_SS_STATS:
1898 		return SH_ETH_STATS_LEN;
1899 	default:
1900 		return -EOPNOTSUPP;
1901 	}
1902 }
1903 
1904 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1905 				     struct ethtool_stats *stats, u64 *data)
1906 {
1907 	struct sh_eth_private *mdp = netdev_priv(ndev);
1908 	int i = 0;
1909 
1910 	/* device-specific stats */
1911 	data[i++] = mdp->cur_rx;
1912 	data[i++] = mdp->cur_tx;
1913 	data[i++] = mdp->dirty_rx;
1914 	data[i++] = mdp->dirty_tx;
1915 }
1916 
1917 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1918 {
1919 	switch (stringset) {
1920 	case ETH_SS_STATS:
1921 		memcpy(data, *sh_eth_gstrings_stats,
1922 		       sizeof(sh_eth_gstrings_stats));
1923 		break;
1924 	}
1925 }
1926 
1927 static void sh_eth_get_ringparam(struct net_device *ndev,
1928 				 struct ethtool_ringparam *ring)
1929 {
1930 	struct sh_eth_private *mdp = netdev_priv(ndev);
1931 
1932 	ring->rx_max_pending = RX_RING_MAX;
1933 	ring->tx_max_pending = TX_RING_MAX;
1934 	ring->rx_pending = mdp->num_rx_ring;
1935 	ring->tx_pending = mdp->num_tx_ring;
1936 }
1937 
1938 static int sh_eth_set_ringparam(struct net_device *ndev,
1939 				struct ethtool_ringparam *ring)
1940 {
1941 	struct sh_eth_private *mdp = netdev_priv(ndev);
1942 	int ret;
1943 
1944 	if (ring->tx_pending > TX_RING_MAX ||
1945 	    ring->rx_pending > RX_RING_MAX ||
1946 	    ring->tx_pending < TX_RING_MIN ||
1947 	    ring->rx_pending < RX_RING_MIN)
1948 		return -EINVAL;
1949 	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1950 		return -EINVAL;
1951 
1952 	if (netif_running(ndev)) {
1953 		netif_tx_disable(ndev);
1954 		/* Disable interrupts by clearing the interrupt mask. */
1955 		sh_eth_write(ndev, 0x0000, EESIPR);
1956 		/* Stop the chip's Tx and Rx processes. */
1957 		sh_eth_write(ndev, 0, EDTRR);
1958 		sh_eth_write(ndev, 0, EDRRR);
1959 		synchronize_irq(ndev->irq);
1960 	}
1961 
1962 	/* Free all the skbuffs in the Rx queue. */
1963 	sh_eth_ring_free(ndev);
1964 	/* Free DMA buffer */
1965 	sh_eth_free_dma_buffer(mdp);
1966 
1967 	/* Set new parameters */
1968 	mdp->num_rx_ring = ring->rx_pending;
1969 	mdp->num_tx_ring = ring->tx_pending;
1970 
1971 	ret = sh_eth_ring_init(ndev);
1972 	if (ret < 0) {
1973 		netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
1974 		return ret;
1975 	}
1976 	ret = sh_eth_dev_init(ndev, false);
1977 	if (ret < 0) {
1978 		netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
1979 		return ret;
1980 	}
1981 
1982 	if (netif_running(ndev)) {
1983 		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1984 		/* Setting the Rx mode will start the Rx process. */
1985 		sh_eth_write(ndev, EDRRR_R, EDRRR);
1986 		netif_wake_queue(ndev);
1987 	}
1988 
1989 	return 0;
1990 }
1991 
1992 static const struct ethtool_ops sh_eth_ethtool_ops = {
1993 	.get_settings	= sh_eth_get_settings,
1994 	.set_settings	= sh_eth_set_settings,
1995 	.nway_reset	= sh_eth_nway_reset,
1996 	.get_msglevel	= sh_eth_get_msglevel,
1997 	.set_msglevel	= sh_eth_set_msglevel,
1998 	.get_link	= ethtool_op_get_link,
1999 	.get_strings	= sh_eth_get_strings,
2000 	.get_ethtool_stats  = sh_eth_get_ethtool_stats,
2001 	.get_sset_count     = sh_eth_get_sset_count,
2002 	.get_ringparam	= sh_eth_get_ringparam,
2003 	.set_ringparam	= sh_eth_set_ringparam,
2004 };
2005 
2006 /* network device open function */
2007 static int sh_eth_open(struct net_device *ndev)
2008 {
2009 	int ret = 0;
2010 	struct sh_eth_private *mdp = netdev_priv(ndev);
2011 
2012 	pm_runtime_get_sync(&mdp->pdev->dev);
2013 
2014 	napi_enable(&mdp->napi);
2015 
2016 	ret = request_irq(ndev->irq, sh_eth_interrupt,
2017 			  mdp->cd->irq_flags, ndev->name, ndev);
2018 	if (ret) {
2019 		netdev_err(ndev, "Can not assign IRQ number\n");
2020 		goto out_napi_off;
2021 	}
2022 
2023 	/* Descriptor set */
2024 	ret = sh_eth_ring_init(ndev);
2025 	if (ret)
2026 		goto out_free_irq;
2027 
2028 	/* device init */
2029 	ret = sh_eth_dev_init(ndev, true);
2030 	if (ret)
2031 		goto out_free_irq;
2032 
2033 	/* PHY control start*/
2034 	ret = sh_eth_phy_start(ndev);
2035 	if (ret)
2036 		goto out_free_irq;
2037 
2038 	return ret;
2039 
2040 out_free_irq:
2041 	free_irq(ndev->irq, ndev);
2042 out_napi_off:
2043 	napi_disable(&mdp->napi);
2044 	pm_runtime_put_sync(&mdp->pdev->dev);
2045 	return ret;
2046 }
2047 
2048 /* Timeout function */
2049 static void sh_eth_tx_timeout(struct net_device *ndev)
2050 {
2051 	struct sh_eth_private *mdp = netdev_priv(ndev);
2052 	struct sh_eth_rxdesc *rxdesc;
2053 	int i;
2054 
2055 	netif_stop_queue(ndev);
2056 
2057 	netif_err(mdp, timer, ndev,
2058 		  "transmit timed out, status %8.8x, resetting...\n",
2059 		  (int)sh_eth_read(ndev, EESR));
2060 
2061 	/* tx_errors count up */
2062 	ndev->stats.tx_errors++;
2063 
2064 	/* Free all the skbuffs in the Rx queue. */
2065 	for (i = 0; i < mdp->num_rx_ring; i++) {
2066 		rxdesc = &mdp->rx_ring[i];
2067 		rxdesc->status = 0;
2068 		rxdesc->addr = 0xBADF00D0;
2069 		if (mdp->rx_skbuff[i])
2070 			dev_kfree_skb(mdp->rx_skbuff[i]);
2071 		mdp->rx_skbuff[i] = NULL;
2072 	}
2073 	for (i = 0; i < mdp->num_tx_ring; i++) {
2074 		if (mdp->tx_skbuff[i])
2075 			dev_kfree_skb(mdp->tx_skbuff[i]);
2076 		mdp->tx_skbuff[i] = NULL;
2077 	}
2078 
2079 	/* device init */
2080 	sh_eth_dev_init(ndev, true);
2081 }
2082 
2083 /* Packet transmit function */
2084 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2085 {
2086 	struct sh_eth_private *mdp = netdev_priv(ndev);
2087 	struct sh_eth_txdesc *txdesc;
2088 	u32 entry;
2089 	unsigned long flags;
2090 
2091 	spin_lock_irqsave(&mdp->lock, flags);
2092 	if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2093 		if (!sh_eth_txfree(ndev)) {
2094 			netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2095 			netif_stop_queue(ndev);
2096 			spin_unlock_irqrestore(&mdp->lock, flags);
2097 			return NETDEV_TX_BUSY;
2098 		}
2099 	}
2100 	spin_unlock_irqrestore(&mdp->lock, flags);
2101 
2102 	entry = mdp->cur_tx % mdp->num_tx_ring;
2103 	mdp->tx_skbuff[entry] = skb;
2104 	txdesc = &mdp->tx_ring[entry];
2105 	/* soft swap. */
2106 	if (!mdp->cd->hw_swap)
2107 		sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
2108 				 skb->len + 2);
2109 	txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2110 				      DMA_TO_DEVICE);
2111 	if (skb->len < ETH_ZLEN)
2112 		txdesc->buffer_length = ETH_ZLEN;
2113 	else
2114 		txdesc->buffer_length = skb->len;
2115 
2116 	if (entry >= mdp->num_tx_ring - 1)
2117 		txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
2118 	else
2119 		txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
2120 
2121 	mdp->cur_tx++;
2122 
2123 	if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2124 		sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
2125 
2126 	return NETDEV_TX_OK;
2127 }
2128 
2129 /* device close function */
2130 static int sh_eth_close(struct net_device *ndev)
2131 {
2132 	struct sh_eth_private *mdp = netdev_priv(ndev);
2133 
2134 	netif_stop_queue(ndev);
2135 
2136 	/* Disable interrupts by clearing the interrupt mask. */
2137 	sh_eth_write(ndev, 0x0000, EESIPR);
2138 
2139 	/* Stop the chip's Tx and Rx processes. */
2140 	sh_eth_write(ndev, 0, EDTRR);
2141 	sh_eth_write(ndev, 0, EDRRR);
2142 
2143 	/* PHY Disconnect */
2144 	if (mdp->phydev) {
2145 		phy_stop(mdp->phydev);
2146 		phy_disconnect(mdp->phydev);
2147 	}
2148 
2149 	free_irq(ndev->irq, ndev);
2150 
2151 	napi_disable(&mdp->napi);
2152 
2153 	/* Free all the skbuffs in the Rx queue. */
2154 	sh_eth_ring_free(ndev);
2155 
2156 	/* free DMA buffer */
2157 	sh_eth_free_dma_buffer(mdp);
2158 
2159 	pm_runtime_put_sync(&mdp->pdev->dev);
2160 
2161 	return 0;
2162 }
2163 
2164 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2165 {
2166 	struct sh_eth_private *mdp = netdev_priv(ndev);
2167 
2168 	if (sh_eth_is_rz_fast_ether(mdp))
2169 		return &ndev->stats;
2170 
2171 	pm_runtime_get_sync(&mdp->pdev->dev);
2172 
2173 	ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2174 	sh_eth_write(ndev, 0, TROCR);	/* (write clear) */
2175 	ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2176 	sh_eth_write(ndev, 0, CDCR);	/* (write clear) */
2177 	ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2178 	sh_eth_write(ndev, 0, LCCR);	/* (write clear) */
2179 	if (sh_eth_is_gether(mdp)) {
2180 		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2181 		sh_eth_write(ndev, 0, CERCR);	/* (write clear) */
2182 		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2183 		sh_eth_write(ndev, 0, CEECR);	/* (write clear) */
2184 	} else {
2185 		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2186 		sh_eth_write(ndev, 0, CNDCR);	/* (write clear) */
2187 	}
2188 	pm_runtime_put_sync(&mdp->pdev->dev);
2189 
2190 	return &ndev->stats;
2191 }
2192 
2193 /* ioctl to device function */
2194 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2195 {
2196 	struct sh_eth_private *mdp = netdev_priv(ndev);
2197 	struct phy_device *phydev = mdp->phydev;
2198 
2199 	if (!netif_running(ndev))
2200 		return -EINVAL;
2201 
2202 	if (!phydev)
2203 		return -ENODEV;
2204 
2205 	return phy_mii_ioctl(phydev, rq, cmd);
2206 }
2207 
2208 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2209 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2210 					    int entry)
2211 {
2212 	return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2213 }
2214 
2215 static u32 sh_eth_tsu_get_post_mask(int entry)
2216 {
2217 	return 0x0f << (28 - ((entry % 8) * 4));
2218 }
2219 
2220 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2221 {
2222 	return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2223 }
2224 
2225 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2226 					     int entry)
2227 {
2228 	struct sh_eth_private *mdp = netdev_priv(ndev);
2229 	u32 tmp;
2230 	void *reg_offset;
2231 
2232 	reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2233 	tmp = ioread32(reg_offset);
2234 	iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2235 }
2236 
2237 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2238 					      int entry)
2239 {
2240 	struct sh_eth_private *mdp = netdev_priv(ndev);
2241 	u32 post_mask, ref_mask, tmp;
2242 	void *reg_offset;
2243 
2244 	reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2245 	post_mask = sh_eth_tsu_get_post_mask(entry);
2246 	ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2247 
2248 	tmp = ioread32(reg_offset);
2249 	iowrite32(tmp & ~post_mask, reg_offset);
2250 
2251 	/* If other port enables, the function returns "true" */
2252 	return tmp & ref_mask;
2253 }
2254 
2255 static int sh_eth_tsu_busy(struct net_device *ndev)
2256 {
2257 	int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2258 	struct sh_eth_private *mdp = netdev_priv(ndev);
2259 
2260 	while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2261 		udelay(10);
2262 		timeout--;
2263 		if (timeout <= 0) {
2264 			netdev_err(ndev, "%s: timeout\n", __func__);
2265 			return -ETIMEDOUT;
2266 		}
2267 	}
2268 
2269 	return 0;
2270 }
2271 
2272 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2273 				  const u8 *addr)
2274 {
2275 	u32 val;
2276 
2277 	val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2278 	iowrite32(val, reg);
2279 	if (sh_eth_tsu_busy(ndev) < 0)
2280 		return -EBUSY;
2281 
2282 	val = addr[4] << 8 | addr[5];
2283 	iowrite32(val, reg + 4);
2284 	if (sh_eth_tsu_busy(ndev) < 0)
2285 		return -EBUSY;
2286 
2287 	return 0;
2288 }
2289 
2290 static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2291 {
2292 	u32 val;
2293 
2294 	val = ioread32(reg);
2295 	addr[0] = (val >> 24) & 0xff;
2296 	addr[1] = (val >> 16) & 0xff;
2297 	addr[2] = (val >> 8) & 0xff;
2298 	addr[3] = val & 0xff;
2299 	val = ioread32(reg + 4);
2300 	addr[4] = (val >> 8) & 0xff;
2301 	addr[5] = val & 0xff;
2302 }
2303 
2304 
2305 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2306 {
2307 	struct sh_eth_private *mdp = netdev_priv(ndev);
2308 	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2309 	int i;
2310 	u8 c_addr[ETH_ALEN];
2311 
2312 	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2313 		sh_eth_tsu_read_entry(reg_offset, c_addr);
2314 		if (ether_addr_equal(addr, c_addr))
2315 			return i;
2316 	}
2317 
2318 	return -ENOENT;
2319 }
2320 
2321 static int sh_eth_tsu_find_empty(struct net_device *ndev)
2322 {
2323 	u8 blank[ETH_ALEN];
2324 	int entry;
2325 
2326 	memset(blank, 0, sizeof(blank));
2327 	entry = sh_eth_tsu_find_entry(ndev, blank);
2328 	return (entry < 0) ? -ENOMEM : entry;
2329 }
2330 
2331 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2332 					      int entry)
2333 {
2334 	struct sh_eth_private *mdp = netdev_priv(ndev);
2335 	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2336 	int ret;
2337 	u8 blank[ETH_ALEN];
2338 
2339 	sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2340 			 ~(1 << (31 - entry)), TSU_TEN);
2341 
2342 	memset(blank, 0, sizeof(blank));
2343 	ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2344 	if (ret < 0)
2345 		return ret;
2346 	return 0;
2347 }
2348 
2349 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2350 {
2351 	struct sh_eth_private *mdp = netdev_priv(ndev);
2352 	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2353 	int i, ret;
2354 
2355 	if (!mdp->cd->tsu)
2356 		return 0;
2357 
2358 	i = sh_eth_tsu_find_entry(ndev, addr);
2359 	if (i < 0) {
2360 		/* No entry found, create one */
2361 		i = sh_eth_tsu_find_empty(ndev);
2362 		if (i < 0)
2363 			return -ENOMEM;
2364 		ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2365 		if (ret < 0)
2366 			return ret;
2367 
2368 		/* Enable the entry */
2369 		sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2370 				 (1 << (31 - i)), TSU_TEN);
2371 	}
2372 
2373 	/* Entry found or created, enable POST */
2374 	sh_eth_tsu_enable_cam_entry_post(ndev, i);
2375 
2376 	return 0;
2377 }
2378 
2379 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2380 {
2381 	struct sh_eth_private *mdp = netdev_priv(ndev);
2382 	int i, ret;
2383 
2384 	if (!mdp->cd->tsu)
2385 		return 0;
2386 
2387 	i = sh_eth_tsu_find_entry(ndev, addr);
2388 	if (i) {
2389 		/* Entry found */
2390 		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2391 			goto done;
2392 
2393 		/* Disable the entry if both ports was disabled */
2394 		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2395 		if (ret < 0)
2396 			return ret;
2397 	}
2398 done:
2399 	return 0;
2400 }
2401 
2402 static int sh_eth_tsu_purge_all(struct net_device *ndev)
2403 {
2404 	struct sh_eth_private *mdp = netdev_priv(ndev);
2405 	int i, ret;
2406 
2407 	if (unlikely(!mdp->cd->tsu))
2408 		return 0;
2409 
2410 	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2411 		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2412 			continue;
2413 
2414 		/* Disable the entry if both ports was disabled */
2415 		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2416 		if (ret < 0)
2417 			return ret;
2418 	}
2419 
2420 	return 0;
2421 }
2422 
2423 static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2424 {
2425 	struct sh_eth_private *mdp = netdev_priv(ndev);
2426 	u8 addr[ETH_ALEN];
2427 	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2428 	int i;
2429 
2430 	if (unlikely(!mdp->cd->tsu))
2431 		return;
2432 
2433 	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2434 		sh_eth_tsu_read_entry(reg_offset, addr);
2435 		if (is_multicast_ether_addr(addr))
2436 			sh_eth_tsu_del_entry(ndev, addr);
2437 	}
2438 }
2439 
2440 /* Multicast reception directions set */
2441 static void sh_eth_set_multicast_list(struct net_device *ndev)
2442 {
2443 	struct sh_eth_private *mdp = netdev_priv(ndev);
2444 	u32 ecmr_bits;
2445 	int mcast_all = 0;
2446 	unsigned long flags;
2447 
2448 	spin_lock_irqsave(&mdp->lock, flags);
2449 	/* Initial condition is MCT = 1, PRM = 0.
2450 	 * Depending on ndev->flags, set PRM or clear MCT
2451 	 */
2452 	ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
2453 
2454 	if (!(ndev->flags & IFF_MULTICAST)) {
2455 		sh_eth_tsu_purge_mcast(ndev);
2456 		mcast_all = 1;
2457 	}
2458 	if (ndev->flags & IFF_ALLMULTI) {
2459 		sh_eth_tsu_purge_mcast(ndev);
2460 		ecmr_bits &= ~ECMR_MCT;
2461 		mcast_all = 1;
2462 	}
2463 
2464 	if (ndev->flags & IFF_PROMISC) {
2465 		sh_eth_tsu_purge_all(ndev);
2466 		ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2467 	} else if (mdp->cd->tsu) {
2468 		struct netdev_hw_addr *ha;
2469 		netdev_for_each_mc_addr(ha, ndev) {
2470 			if (mcast_all && is_multicast_ether_addr(ha->addr))
2471 				continue;
2472 
2473 			if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2474 				if (!mcast_all) {
2475 					sh_eth_tsu_purge_mcast(ndev);
2476 					ecmr_bits &= ~ECMR_MCT;
2477 					mcast_all = 1;
2478 				}
2479 			}
2480 		}
2481 	} else {
2482 		/* Normal, unicast/broadcast-only mode. */
2483 		ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
2484 	}
2485 
2486 	/* update the ethernet mode */
2487 	sh_eth_write(ndev, ecmr_bits, ECMR);
2488 
2489 	spin_unlock_irqrestore(&mdp->lock, flags);
2490 }
2491 
2492 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2493 {
2494 	if (!mdp->port)
2495 		return TSU_VTAG0;
2496 	else
2497 		return TSU_VTAG1;
2498 }
2499 
2500 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2501 				  __be16 proto, u16 vid)
2502 {
2503 	struct sh_eth_private *mdp = netdev_priv(ndev);
2504 	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2505 
2506 	if (unlikely(!mdp->cd->tsu))
2507 		return -EPERM;
2508 
2509 	/* No filtering if vid = 0 */
2510 	if (!vid)
2511 		return 0;
2512 
2513 	mdp->vlan_num_ids++;
2514 
2515 	/* The controller has one VLAN tag HW filter. So, if the filter is
2516 	 * already enabled, the driver disables it and the filte
2517 	 */
2518 	if (mdp->vlan_num_ids > 1) {
2519 		/* disable VLAN filter */
2520 		sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2521 		return 0;
2522 	}
2523 
2524 	sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2525 			 vtag_reg_index);
2526 
2527 	return 0;
2528 }
2529 
2530 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2531 				   __be16 proto, u16 vid)
2532 {
2533 	struct sh_eth_private *mdp = netdev_priv(ndev);
2534 	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2535 
2536 	if (unlikely(!mdp->cd->tsu))
2537 		return -EPERM;
2538 
2539 	/* No filtering if vid = 0 */
2540 	if (!vid)
2541 		return 0;
2542 
2543 	mdp->vlan_num_ids--;
2544 	sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2545 
2546 	return 0;
2547 }
2548 
2549 /* SuperH's TSU register init function */
2550 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2551 {
2552 	if (sh_eth_is_rz_fast_ether(mdp)) {
2553 		sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2554 		return;
2555 	}
2556 
2557 	sh_eth_tsu_write(mdp, 0, TSU_FWEN0);	/* Disable forward(0->1) */
2558 	sh_eth_tsu_write(mdp, 0, TSU_FWEN1);	/* Disable forward(1->0) */
2559 	sh_eth_tsu_write(mdp, 0, TSU_FCM);	/* forward fifo 3k-3k */
2560 	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2561 	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2562 	sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2563 	sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2564 	sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2565 	sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2566 	sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2567 	if (sh_eth_is_gether(mdp)) {
2568 		sh_eth_tsu_write(mdp, 0, TSU_QTAG0);	/* Disable QTAG(0->1) */
2569 		sh_eth_tsu_write(mdp, 0, TSU_QTAG1);	/* Disable QTAG(1->0) */
2570 	} else {
2571 		sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);	/* Disable QTAG(0->1) */
2572 		sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);	/* Disable QTAG(1->0) */
2573 	}
2574 	sh_eth_tsu_write(mdp, 0, TSU_FWSR);	/* all interrupt status clear */
2575 	sh_eth_tsu_write(mdp, 0, TSU_FWINMK);	/* Disable all interrupt */
2576 	sh_eth_tsu_write(mdp, 0, TSU_TEN);	/* Disable all CAM entry */
2577 	sh_eth_tsu_write(mdp, 0, TSU_POST1);	/* Disable CAM entry [ 0- 7] */
2578 	sh_eth_tsu_write(mdp, 0, TSU_POST2);	/* Disable CAM entry [ 8-15] */
2579 	sh_eth_tsu_write(mdp, 0, TSU_POST3);	/* Disable CAM entry [16-23] */
2580 	sh_eth_tsu_write(mdp, 0, TSU_POST4);	/* Disable CAM entry [24-31] */
2581 }
2582 
2583 /* MDIO bus release function */
2584 static int sh_mdio_release(struct sh_eth_private *mdp)
2585 {
2586 	/* unregister mdio bus */
2587 	mdiobus_unregister(mdp->mii_bus);
2588 
2589 	/* free bitbang info */
2590 	free_mdio_bitbang(mdp->mii_bus);
2591 
2592 	return 0;
2593 }
2594 
2595 /* MDIO bus init function */
2596 static int sh_mdio_init(struct sh_eth_private *mdp,
2597 			struct sh_eth_plat_data *pd)
2598 {
2599 	int ret, i;
2600 	struct bb_info *bitbang;
2601 	struct platform_device *pdev = mdp->pdev;
2602 	struct device *dev = &mdp->pdev->dev;
2603 
2604 	/* create bit control struct for PHY */
2605 	bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
2606 	if (!bitbang)
2607 		return -ENOMEM;
2608 
2609 	/* bitbang init */
2610 	bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2611 	bitbang->set_gate = pd->set_mdio_gate;
2612 	bitbang->mdi_msk = PIR_MDI;
2613 	bitbang->mdo_msk = PIR_MDO;
2614 	bitbang->mmd_msk = PIR_MMD;
2615 	bitbang->mdc_msk = PIR_MDC;
2616 	bitbang->ctrl.ops = &bb_ops;
2617 
2618 	/* MII controller setting */
2619 	mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2620 	if (!mdp->mii_bus)
2621 		return -ENOMEM;
2622 
2623 	/* Hook up MII support for ethtool */
2624 	mdp->mii_bus->name = "sh_mii";
2625 	mdp->mii_bus->parent = dev;
2626 	snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2627 		 pdev->name, pdev->id);
2628 
2629 	/* PHY IRQ */
2630 	mdp->mii_bus->irq = devm_kzalloc(dev, sizeof(int) * PHY_MAX_ADDR,
2631 					 GFP_KERNEL);
2632 	if (!mdp->mii_bus->irq) {
2633 		ret = -ENOMEM;
2634 		goto out_free_bus;
2635 	}
2636 
2637 	/* register MDIO bus */
2638 	if (dev->of_node) {
2639 		ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
2640 	} else {
2641 		for (i = 0; i < PHY_MAX_ADDR; i++)
2642 			mdp->mii_bus->irq[i] = PHY_POLL;
2643 		if (pd->phy_irq > 0)
2644 			mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
2645 
2646 		ret = mdiobus_register(mdp->mii_bus);
2647 	}
2648 
2649 	if (ret)
2650 		goto out_free_bus;
2651 
2652 	return 0;
2653 
2654 out_free_bus:
2655 	free_mdio_bitbang(mdp->mii_bus);
2656 	return ret;
2657 }
2658 
2659 static const u16 *sh_eth_get_register_offset(int register_type)
2660 {
2661 	const u16 *reg_offset = NULL;
2662 
2663 	switch (register_type) {
2664 	case SH_ETH_REG_GIGABIT:
2665 		reg_offset = sh_eth_offset_gigabit;
2666 		break;
2667 	case SH_ETH_REG_FAST_RZ:
2668 		reg_offset = sh_eth_offset_fast_rz;
2669 		break;
2670 	case SH_ETH_REG_FAST_RCAR:
2671 		reg_offset = sh_eth_offset_fast_rcar;
2672 		break;
2673 	case SH_ETH_REG_FAST_SH4:
2674 		reg_offset = sh_eth_offset_fast_sh4;
2675 		break;
2676 	case SH_ETH_REG_FAST_SH3_SH2:
2677 		reg_offset = sh_eth_offset_fast_sh3_sh2;
2678 		break;
2679 	default:
2680 		break;
2681 	}
2682 
2683 	return reg_offset;
2684 }
2685 
2686 static const struct net_device_ops sh_eth_netdev_ops = {
2687 	.ndo_open		= sh_eth_open,
2688 	.ndo_stop		= sh_eth_close,
2689 	.ndo_start_xmit		= sh_eth_start_xmit,
2690 	.ndo_get_stats		= sh_eth_get_stats,
2691 	.ndo_tx_timeout		= sh_eth_tx_timeout,
2692 	.ndo_do_ioctl		= sh_eth_do_ioctl,
2693 	.ndo_validate_addr	= eth_validate_addr,
2694 	.ndo_set_mac_address	= eth_mac_addr,
2695 	.ndo_change_mtu		= eth_change_mtu,
2696 };
2697 
2698 static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2699 	.ndo_open		= sh_eth_open,
2700 	.ndo_stop		= sh_eth_close,
2701 	.ndo_start_xmit		= sh_eth_start_xmit,
2702 	.ndo_get_stats		= sh_eth_get_stats,
2703 	.ndo_set_rx_mode	= sh_eth_set_multicast_list,
2704 	.ndo_vlan_rx_add_vid	= sh_eth_vlan_rx_add_vid,
2705 	.ndo_vlan_rx_kill_vid	= sh_eth_vlan_rx_kill_vid,
2706 	.ndo_tx_timeout		= sh_eth_tx_timeout,
2707 	.ndo_do_ioctl		= sh_eth_do_ioctl,
2708 	.ndo_validate_addr	= eth_validate_addr,
2709 	.ndo_set_mac_address	= eth_mac_addr,
2710 	.ndo_change_mtu		= eth_change_mtu,
2711 };
2712 
2713 #ifdef CONFIG_OF
2714 static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2715 {
2716 	struct device_node *np = dev->of_node;
2717 	struct sh_eth_plat_data *pdata;
2718 	const char *mac_addr;
2719 
2720 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2721 	if (!pdata)
2722 		return NULL;
2723 
2724 	pdata->phy_interface = of_get_phy_mode(np);
2725 
2726 	mac_addr = of_get_mac_address(np);
2727 	if (mac_addr)
2728 		memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
2729 
2730 	pdata->no_ether_link =
2731 		of_property_read_bool(np, "renesas,no-ether-link");
2732 	pdata->ether_link_active_low =
2733 		of_property_read_bool(np, "renesas,ether-link-active-low");
2734 
2735 	return pdata;
2736 }
2737 
2738 static const struct of_device_id sh_eth_match_table[] = {
2739 	{ .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
2740 	{ .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
2741 	{ .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
2742 	{ .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
2743 	{ .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
2744 	{ .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
2745 	{ }
2746 };
2747 MODULE_DEVICE_TABLE(of, sh_eth_match_table);
2748 #else
2749 static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2750 {
2751 	return NULL;
2752 }
2753 #endif
2754 
2755 static int sh_eth_drv_probe(struct platform_device *pdev)
2756 {
2757 	int ret, devno = 0;
2758 	struct resource *res;
2759 	struct net_device *ndev = NULL;
2760 	struct sh_eth_private *mdp = NULL;
2761 	struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
2762 	const struct platform_device_id *id = platform_get_device_id(pdev);
2763 
2764 	/* get base addr */
2765 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2766 	if (unlikely(res == NULL)) {
2767 		dev_err(&pdev->dev, "invalid resource\n");
2768 		return -EINVAL;
2769 	}
2770 
2771 	ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2772 	if (!ndev)
2773 		return -ENOMEM;
2774 
2775 	pm_runtime_enable(&pdev->dev);
2776 	pm_runtime_get_sync(&pdev->dev);
2777 
2778 	/* The sh Ether-specific entries in the device structure. */
2779 	ndev->base_addr = res->start;
2780 	devno = pdev->id;
2781 	if (devno < 0)
2782 		devno = 0;
2783 
2784 	ndev->dma = -1;
2785 	ret = platform_get_irq(pdev, 0);
2786 	if (ret < 0) {
2787 		ret = -ENODEV;
2788 		goto out_release;
2789 	}
2790 	ndev->irq = ret;
2791 
2792 	SET_NETDEV_DEV(ndev, &pdev->dev);
2793 
2794 	mdp = netdev_priv(ndev);
2795 	mdp->num_tx_ring = TX_RING_SIZE;
2796 	mdp->num_rx_ring = RX_RING_SIZE;
2797 	mdp->addr = devm_ioremap_resource(&pdev->dev, res);
2798 	if (IS_ERR(mdp->addr)) {
2799 		ret = PTR_ERR(mdp->addr);
2800 		goto out_release;
2801 	}
2802 
2803 	spin_lock_init(&mdp->lock);
2804 	mdp->pdev = pdev;
2805 
2806 	if (pdev->dev.of_node)
2807 		pd = sh_eth_parse_dt(&pdev->dev);
2808 	if (!pd) {
2809 		dev_err(&pdev->dev, "no platform data\n");
2810 		ret = -EINVAL;
2811 		goto out_release;
2812 	}
2813 
2814 	/* get PHY ID */
2815 	mdp->phy_id = pd->phy;
2816 	mdp->phy_interface = pd->phy_interface;
2817 	/* EDMAC endian */
2818 	mdp->edmac_endian = pd->edmac_endian;
2819 	mdp->no_ether_link = pd->no_ether_link;
2820 	mdp->ether_link_active_low = pd->ether_link_active_low;
2821 
2822 	/* set cpu data */
2823 	if (id) {
2824 		mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
2825 	} else	{
2826 		const struct of_device_id *match;
2827 
2828 		match = of_match_device(of_match_ptr(sh_eth_match_table),
2829 					&pdev->dev);
2830 		mdp->cd = (struct sh_eth_cpu_data *)match->data;
2831 	}
2832 	mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
2833 	if (!mdp->reg_offset) {
2834 		dev_err(&pdev->dev, "Unknown register type (%d)\n",
2835 			mdp->cd->register_type);
2836 		ret = -EINVAL;
2837 		goto out_release;
2838 	}
2839 	sh_eth_set_default_cpu_data(mdp->cd);
2840 
2841 	/* set function */
2842 	if (mdp->cd->tsu)
2843 		ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
2844 	else
2845 		ndev->netdev_ops = &sh_eth_netdev_ops;
2846 	SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
2847 	ndev->watchdog_timeo = TX_TIMEOUT;
2848 
2849 	/* debug message level */
2850 	mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
2851 
2852 	/* read and set MAC address */
2853 	read_mac_address(ndev, pd->mac_addr);
2854 	if (!is_valid_ether_addr(ndev->dev_addr)) {
2855 		dev_warn(&pdev->dev,
2856 			 "no valid MAC address supplied, using a random one.\n");
2857 		eth_hw_addr_random(ndev);
2858 	}
2859 
2860 	/* ioremap the TSU registers */
2861 	if (mdp->cd->tsu) {
2862 		struct resource *rtsu;
2863 		rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2864 		mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
2865 		if (IS_ERR(mdp->tsu_addr)) {
2866 			ret = PTR_ERR(mdp->tsu_addr);
2867 			goto out_release;
2868 		}
2869 		mdp->port = devno % 2;
2870 		ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
2871 	}
2872 
2873 	/* initialize first or needed device */
2874 	if (!devno || pd->needs_init) {
2875 		if (mdp->cd->chip_reset)
2876 			mdp->cd->chip_reset(ndev);
2877 
2878 		if (mdp->cd->tsu) {
2879 			/* TSU init (Init only)*/
2880 			sh_eth_tsu_init(mdp);
2881 		}
2882 	}
2883 
2884 	/* MDIO bus init */
2885 	ret = sh_mdio_init(mdp, pd);
2886 	if (ret) {
2887 		dev_err(&ndev->dev, "failed to initialise MDIO\n");
2888 		goto out_release;
2889 	}
2890 
2891 	netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
2892 
2893 	/* network device register */
2894 	ret = register_netdev(ndev);
2895 	if (ret)
2896 		goto out_napi_del;
2897 
2898 	/* print device information */
2899 	netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
2900 		    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2901 
2902 	pm_runtime_put(&pdev->dev);
2903 	platform_set_drvdata(pdev, ndev);
2904 
2905 	return ret;
2906 
2907 out_napi_del:
2908 	netif_napi_del(&mdp->napi);
2909 	sh_mdio_release(mdp);
2910 
2911 out_release:
2912 	/* net_dev free */
2913 	if (ndev)
2914 		free_netdev(ndev);
2915 
2916 	pm_runtime_put(&pdev->dev);
2917 	pm_runtime_disable(&pdev->dev);
2918 	return ret;
2919 }
2920 
2921 static int sh_eth_drv_remove(struct platform_device *pdev)
2922 {
2923 	struct net_device *ndev = platform_get_drvdata(pdev);
2924 	struct sh_eth_private *mdp = netdev_priv(ndev);
2925 
2926 	unregister_netdev(ndev);
2927 	netif_napi_del(&mdp->napi);
2928 	sh_mdio_release(mdp);
2929 	pm_runtime_disable(&pdev->dev);
2930 	free_netdev(ndev);
2931 
2932 	return 0;
2933 }
2934 
2935 #ifdef CONFIG_PM
2936 static int sh_eth_runtime_nop(struct device *dev)
2937 {
2938 	/* Runtime PM callback shared between ->runtime_suspend()
2939 	 * and ->runtime_resume(). Simply returns success.
2940 	 *
2941 	 * This driver re-initializes all registers after
2942 	 * pm_runtime_get_sync() anyway so there is no need
2943 	 * to save and restore registers here.
2944 	 */
2945 	return 0;
2946 }
2947 
2948 static const struct dev_pm_ops sh_eth_dev_pm_ops = {
2949 	.runtime_suspend = sh_eth_runtime_nop,
2950 	.runtime_resume = sh_eth_runtime_nop,
2951 };
2952 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
2953 #else
2954 #define SH_ETH_PM_OPS NULL
2955 #endif
2956 
2957 static struct platform_device_id sh_eth_id_table[] = {
2958 	{ "sh7619-ether", (kernel_ulong_t)&sh7619_data },
2959 	{ "sh771x-ether", (kernel_ulong_t)&sh771x_data },
2960 	{ "sh7724-ether", (kernel_ulong_t)&sh7724_data },
2961 	{ "sh7734-gether", (kernel_ulong_t)&sh7734_data },
2962 	{ "sh7757-ether", (kernel_ulong_t)&sh7757_data },
2963 	{ "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
2964 	{ "sh7763-gether", (kernel_ulong_t)&sh7763_data },
2965 	{ "r7s72100-ether", (kernel_ulong_t)&r7s72100_data },
2966 	{ "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
2967 	{ "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
2968 	{ "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
2969 	{ "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
2970 	{ }
2971 };
2972 MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
2973 
2974 static struct platform_driver sh_eth_driver = {
2975 	.probe = sh_eth_drv_probe,
2976 	.remove = sh_eth_drv_remove,
2977 	.id_table = sh_eth_id_table,
2978 	.driver = {
2979 		   .name = CARDNAME,
2980 		   .pm = SH_ETH_PM_OPS,
2981 		   .of_match_table = of_match_ptr(sh_eth_match_table),
2982 	},
2983 };
2984 
2985 module_platform_driver(sh_eth_driver);
2986 
2987 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
2988 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
2989 MODULE_LICENSE("GPL v2");
2990